| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * This file is subject to the terms and conditions of the GNU General Public | 
|  | 3 | * License.  See the file "COPYING" in the main directory of this archive | 
|  | 4 | * for more details. | 
|  | 5 | * | 
|  | 6 | * Copyright (C) 1994 - 2002 by Ralf Baechle | 
|  | 7 | * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. | 
|  | 8 | * Copyright (C) 2002  Maciej W. Rozycki | 
|  | 9 | */ | 
|  | 10 | #ifndef _ASM_PGTABLE_BITS_H | 
|  | 11 | #define _ASM_PGTABLE_BITS_H | 
|  | 12 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 |  | 
|  | 14 | /* | 
|  | 15 | * Note that we shift the lower 32bits of each EntryLo[01] entry | 
|  | 16 | * 6 bits to the left. That way we can convert the PFN into the | 
|  | 17 | * physical address by a single 'and' operation and gain 6 additional | 
|  | 18 | * bits for storing information which isn't present in a normal | 
|  | 19 | * MIPS page table. | 
|  | 20 | * | 
|  | 21 | * Similar to the Alpha port, we need to keep track of the ref | 
|  | 22 | * and mod bits in software.  We have a software "yeah you can read | 
|  | 23 | * from this page" bit, and a hardware one which actually lets the | 
| Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 24 | * process read from the page.	On the same token we have a software | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | * writable bit and the real hardware one which actually lets the | 
|  | 26 | * process write to the page, this keeps a mod bit via the hardware | 
|  | 27 | * dirty bit. | 
|  | 28 | * | 
|  | 29 | * Certain revisions of the R4000 and R5000 have a bug where if a | 
|  | 30 | * certain sequence occurs in the last 3 instructions of an executable | 
|  | 31 | * page, and the following page is not mapped, the cpu can do | 
|  | 32 | * unpredictable things.  The code (when it is written) to deal with | 
|  | 33 | * this problem will be in the update_mmu_cache() code for the r4k. | 
|  | 34 | */ | 
| Ralf Baechle | 34adb28 | 2014-11-22 00:16:48 +0100 | [diff] [blame] | 35 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 |  | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 37 | /* | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 38 | * The following bits are implemented by the TLB hardware | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 39 | */ | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 40 | #define _PAGE_GLOBAL_SHIFT	0 | 
|  | 41 | #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT) | 
|  | 42 | #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 43 | #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT) | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 44 | #define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1) | 
|  | 45 | #define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT) | 
|  | 46 | #define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1) | 
|  | 47 | #define _CACHE_MASK		(7 << _CACHE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 |  | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 49 | /* | 
|  | 50 | * The following bits are implemented in software | 
| Ralf Baechle | 82de378 | 2013-01-18 16:58:26 +0100 | [diff] [blame] | 51 | */ | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 52 | #define _PAGE_PRESENT_SHIFT	(_CACHE_SHIFT + 3) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 53 | #define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT) | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 54 | #define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 55 | #define _PAGE_READ		(1 << _PAGE_READ_SHIFT) | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 56 | #define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 57 | #define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT) | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 58 | #define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 59 | #define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT) | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 60 | #define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 61 | #define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT) | 
|  | 62 |  | 
| Steven J. Hill | 77a5c59 | 2014-11-13 09:52:01 -0600 | [diff] [blame] | 63 | #define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 65 | #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 |  | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 67 | /* | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 68 | * The following bits are implemented in software | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 69 | */ | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 70 | #define _PAGE_PRESENT_SHIFT	(0) | 
|  | 71 | #define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT) | 
|  | 72 | #define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1) | 
|  | 73 | #define _PAGE_READ		(1 << _PAGE_READ_SHIFT) | 
|  | 74 | #define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1) | 
|  | 75 | #define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT) | 
|  | 76 | #define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1) | 
|  | 77 | #define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT) | 
|  | 78 | #define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1) | 
|  | 79 | #define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 |  | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 81 | /* | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 82 | * The following bits are implemented by the TLB hardware | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 83 | */ | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 84 | #define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 4) | 
|  | 85 | #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT) | 
|  | 86 | #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1) | 
|  | 87 | #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT) | 
|  | 88 | #define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 89 | #define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT) | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 90 | #define _CACHE_UNCACHED_SHIFT	(_PAGE_DIRTY_SHIFT + 1) | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 91 | #define _CACHE_UNCACHED		(1 << _CACHE_UNCACHED_SHIFT) | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 92 | #define _CACHE_MASK		_CACHE_UNCACHED | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 |  | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 94 | #define _PFN_SHIFT		PAGE_SHIFT | 
|  | 95 |  | 
|  | 96 | #else | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 97 | /* | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 98 | * Below are the "Normal" R4K cases | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 99 | */ | 
|  | 100 |  | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 101 | /* | 
|  | 102 | * The following bits are implemented in software | 
| Ralf Baechle | a2c763e | 2012-10-16 22:20:26 +0200 | [diff] [blame] | 103 | */ | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 104 | #define _PAGE_PRESENT_SHIFT	0 | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 105 | #define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT) | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 106 | /* R2 or later cores check for RI/XI support to determine _PAGE_READ */ | 
|  | 107 | #ifdef CONFIG_CPU_MIPSR2 | 
|  | 108 | #define _PAGE_WRITE_SHIFT	(_PAGE_PRESENT_SHIFT + 1) | 
|  | 109 | #define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT) | 
|  | 110 | #else | 
|  | 111 | #define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1) | 
|  | 112 | #define _PAGE_READ		(1 << _PAGE_READ_SHIFT) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 113 | #define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1) | 
|  | 114 | #define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT) | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 115 | #endif | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 116 | #define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1) | 
|  | 117 | #define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 118 | #define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1) | 
|  | 119 | #define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 120 |  | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 121 | #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) | 
|  | 122 | /* Huge TLB page */ | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 123 | #define _PAGE_HUGE_SHIFT	(_PAGE_MODIFIED_SHIFT + 1) | 
|  | 124 | #define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT) | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 125 | #define _PAGE_SPLITTING_SHIFT	(_PAGE_HUGE_SHIFT + 1) | 
|  | 126 | #define _PAGE_SPLITTING		(1 << _PAGE_SPLITTING_SHIFT) | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 127 |  | 
|  | 128 | /* Only R2 or newer cores have the XI bit */ | 
|  | 129 | #ifdef CONFIG_CPU_MIPSR2 | 
|  | 130 | #define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1) | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 131 | #else | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 132 | #define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1) | 
|  | 133 | #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT) | 
|  | 134 | #endif	/* CONFIG_CPU_MIPSR2 */ | 
|  | 135 |  | 
|  | 136 | #endif	/* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ | 
|  | 137 |  | 
|  | 138 | #ifdef CONFIG_CPU_MIPSR2 | 
|  | 139 | /* XI - page cannot be executed */ | 
|  | 140 | #ifndef _PAGE_NO_EXEC_SHIFT | 
|  | 141 | #define _PAGE_NO_EXEC_SHIFT	(_PAGE_MODIFIED_SHIFT + 1) | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 142 | #endif | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 143 | #define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) | 
| Ralf Baechle | 970d032 | 2012-10-18 13:54:15 +0200 | [diff] [blame] | 144 |  | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 145 | /* RI - page cannot be read */ | 
|  | 146 | #define _PAGE_READ_SHIFT	(_PAGE_NO_EXEC_SHIFT + 1) | 
|  | 147 | #define _PAGE_READ		(cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT)) | 
|  | 148 | #define _PAGE_NO_READ_SHIFT	_PAGE_READ_SHIFT | 
|  | 149 | #define _PAGE_NO_READ		(cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 150 |  | 
|  | 151 | #define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1) | 
|  | 152 | #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT) | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 153 |  | 
|  | 154 | #else	/* !CONFIG_CPU_MIPSR2 */ | 
|  | 155 | #define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 1) | 
|  | 156 | #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT) | 
|  | 157 | #endif	/* CONFIG_CPU_MIPSR2 */ | 
|  | 158 |  | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 159 | #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1) | 
|  | 160 | #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 161 | #define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1) | 
|  | 162 | #define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT) | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 163 | #define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1) | 
|  | 164 | #define _CACHE_MASK		(7 << _CACHE_SHIFT) | 
|  | 165 |  | 
|  | 166 | #define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) | 
|  | 167 |  | 
| Ralf Baechle | 34adb28 | 2014-11-22 00:16:48 +0100 | [diff] [blame] | 168 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ | 
| Chris Dearman | bec5052 | 2007-09-19 00:51:57 +0100 | [diff] [blame] | 169 |  | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 170 | #ifndef _PAGE_NO_EXEC | 
|  | 171 | #define _PAGE_NO_EXEC		0 | 
|  | 172 | #endif | 
|  | 173 | #ifndef _PAGE_NO_READ | 
|  | 174 | #define _PAGE_NO_READ		0 | 
|  | 175 | #endif | 
|  | 176 |  | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 177 | #define _PAGE_SILENT_READ	_PAGE_VALID | 
|  | 178 | #define _PAGE_SILENT_WRITE	_PAGE_DIRTY | 
|  | 179 |  | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 180 | #define _PFN_MASK		(~((1 << (_PFN_SHIFT)) - 1)) | 
|  | 181 |  | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 182 | /* | 
|  | 183 | * The final layouts of the PTE bits are: | 
|  | 184 | * | 
|  | 185 | *   64-bit, R1 or earlier:     CCC D V G [S H] M A W R P | 
|  | 186 | *   32-bit, R1 or earler:      CCC D V G M A W R P | 
|  | 187 | *   64-bit, R2 or later:       CCC D V G RI/R XI [S H] M A W P | 
|  | 188 | *   32-bit, R2 or later:       CCC D V G RI/R XI M A W P | 
|  | 189 | */ | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 190 |  | 
|  | 191 |  | 
|  | 192 | #ifndef __ASSEMBLY__ | 
|  | 193 | /* | 
|  | 194 | * pte_to_entrylo converts a page table entry (PTE) into a Mips | 
|  | 195 | * entrylo0/1 value. | 
|  | 196 | */ | 
|  | 197 | static inline uint64_t pte_to_entrylo(unsigned long pte_val) | 
|  | 198 | { | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 199 | #ifdef CONFIG_CPU_MIPSR2 | 
| Steven J. Hill | 05857c6 | 2012-09-13 16:51:46 -0500 | [diff] [blame] | 200 | if (cpu_has_rixi) { | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 201 | int sa; | 
|  | 202 | #ifdef CONFIG_32BIT | 
|  | 203 | sa = 31 - _PAGE_NO_READ_SHIFT; | 
|  | 204 | #else | 
|  | 205 | sa = 63 - _PAGE_NO_READ_SHIFT; | 
|  | 206 | #endif | 
|  | 207 | /* | 
|  | 208 | * C has no way to express that this is a DSRL | 
|  | 209 | * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2.  Luckily | 
|  | 210 | * in the fast path this is done in assembly | 
|  | 211 | */ | 
|  | 212 | return (pte_val >> _PAGE_GLOBAL_SHIFT) | | 
|  | 213 | ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); | 
|  | 214 | } | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 215 | #endif | 
| David Daney | 6dd9344 | 2010-02-10 15:12:47 -0800 | [diff] [blame] | 216 |  | 
|  | 217 | return pte_val >> _PAGE_GLOBAL_SHIFT; | 
|  | 218 | } | 
|  | 219 | #endif | 
| Chris Dearman | bec5052 | 2007-09-19 00:51:57 +0100 | [diff] [blame] | 220 |  | 
|  | 221 | /* | 
|  | 222 | * Cache attributes | 
|  | 223 | */ | 
|  | 224 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | 
|  | 225 |  | 
|  | 226 | #define _CACHE_CACHABLE_NONCOHERENT 0 | 
| Markos Chandras | fb02035 | 2014-07-18 10:51:30 +0100 | [diff] [blame] | 227 | #define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED | 
| Chris Dearman | bec5052 | 2007-09-19 00:51:57 +0100 | [diff] [blame] | 228 |  | 
|  | 229 | #elif defined(CONFIG_CPU_SB1) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 |  | 
|  | 231 | /* No penalty for being coherent on the SB1, so just | 
|  | 232 | use it for "noncoherent" spaces, too.  Shouldn't hurt. */ | 
|  | 233 |  | 
| Chris Dearman | bec5052 | 2007-09-19 00:51:57 +0100 | [diff] [blame] | 234 | #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 |  | 
| Huacai Chen | 152ebb4 | 2014-03-21 18:43:59 +0800 | [diff] [blame] | 236 | #elif defined(CONFIG_CPU_LOONGSON3) | 
|  | 237 |  | 
|  | 238 | /* Using COHERENT flag for NONCOHERENT doesn't hurt. */ | 
|  | 239 |  | 
| Huacai Chen | 152ebb4 | 2014-03-21 18:43:59 +0800 | [diff] [blame] | 240 | #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */ | 
|  | 241 | #define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */ | 
| Huacai Chen | 152ebb4 | 2014-03-21 18:43:59 +0800 | [diff] [blame] | 242 |  | 
| Markos Chandras | 80bc94d1 | 2014-07-18 10:51:31 +0100 | [diff] [blame] | 243 | #elif defined(CONFIG_MACH_JZ4740) | 
|  | 244 |  | 
|  | 245 | /* Ingenic uses the WA bit to achieve write-combine memory writes */ | 
|  | 246 | #define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) | 
|  | 247 |  | 
| Markos Chandras | fb02035 | 2014-07-18 10:51:30 +0100 | [diff] [blame] | 248 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 |  | 
| Markos Chandras | fb02035 | 2014-07-18 10:51:30 +0100 | [diff] [blame] | 250 | #ifndef _CACHE_CACHABLE_NO_WA | 
|  | 251 | #define _CACHE_CACHABLE_NO_WA		(0<<_CACHE_SHIFT) | 
|  | 252 | #endif | 
|  | 253 | #ifndef _CACHE_CACHABLE_WA | 
|  | 254 | #define _CACHE_CACHABLE_WA		(1<<_CACHE_SHIFT) | 
|  | 255 | #endif | 
|  | 256 | #ifndef _CACHE_UNCACHED | 
|  | 257 | #define _CACHE_UNCACHED			(2<<_CACHE_SHIFT) | 
|  | 258 | #endif | 
|  | 259 | #ifndef _CACHE_CACHABLE_NONCOHERENT | 
|  | 260 | #define _CACHE_CACHABLE_NONCOHERENT	(3<<_CACHE_SHIFT) | 
|  | 261 | #endif | 
|  | 262 | #ifndef _CACHE_CACHABLE_CE | 
|  | 263 | #define _CACHE_CACHABLE_CE		(4<<_CACHE_SHIFT) | 
|  | 264 | #endif | 
|  | 265 | #ifndef _CACHE_CACHABLE_COW | 
|  | 266 | #define _CACHE_CACHABLE_COW		(5<<_CACHE_SHIFT) | 
|  | 267 | #endif | 
|  | 268 | #ifndef _CACHE_CACHABLE_CUW | 
|  | 269 | #define _CACHE_CACHABLE_CUW		(6<<_CACHE_SHIFT) | 
|  | 270 | #endif | 
|  | 271 | #ifndef _CACHE_UNCACHED_ACCELERATED | 
|  | 272 | #define _CACHE_UNCACHED_ACCELERATED	(7<<_CACHE_SHIFT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 |  | 
| Steven J. Hill | be0c37c | 2015-02-26 18:16:37 -0600 | [diff] [blame^] | 275 | #define __READABLE	(_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED) | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 276 | #define __WRITEABLE	(_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 |  | 
| Steven J. Hill | 05f9883 | 2015-02-19 10:18:50 -0600 | [diff] [blame] | 278 | #define _PAGE_CHG_MASK	(_PAGE_ACCESSED | _PAGE_MODIFIED |	\ | 
|  | 279 | _PFN_MASK | _CACHE_MASK) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | #endif /* _ASM_PGTABLE_BITS_H */ |