Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-s390/pgtable.h |
| 3 | * |
| 4 | * S390 version |
| 5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
| 6 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 7 | * Ulrich Weigand (weigand@de.ibm.com) |
| 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 9 | * |
| 10 | * Derived from "include/asm-i386/pgtable.h" |
| 11 | */ |
| 12 | |
| 13 | #ifndef _ASM_S390_PGTABLE_H |
| 14 | #define _ASM_S390_PGTABLE_H |
| 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | /* |
| 17 | * The Linux memory management assumes a three-level page table setup. For |
| 18 | * s390 31 bit we "fold" the mid level into the top-level page table, so |
| 19 | * that we physically have the same two-level page table as the s390 mmu |
| 20 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels |
| 21 | * the hardware provides (region first and region second tables are not |
| 22 | * used). |
| 23 | * |
| 24 | * The "pgd_xxx()" functions are trivial for a folded two-level |
| 25 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 26 | * into the pgd entry) |
| 27 | * |
| 28 | * This file contains the functions and defines necessary to modify and use |
| 29 | * the S390 page table tree. |
| 30 | */ |
| 31 | #ifndef __ASSEMBLY__ |
Heiko Carstens | 9789db0 | 2008-07-14 09:59:11 +0200 | [diff] [blame] | 32 | #include <linux/sched.h> |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 33 | #include <linux/mm_types.h> |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 34 | #include <asm/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/bug.h> |
| 36 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
| 39 | extern void paging_init(void); |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 40 | extern void vmem_map_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
| 42 | /* |
| 43 | * The S390 doesn't have any external MMU info: the kernel page |
| 44 | * tables contain all the necessary information. |
| 45 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 46 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
| 48 | /* |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 49 | * ZERO_PAGE is a global shared page that is always zero; used |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | * for zero-mapped memory areas etc.. |
| 51 | */ |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 52 | |
| 53 | extern unsigned long empty_zero_page; |
| 54 | extern unsigned long zero_page_mask; |
| 55 | |
| 56 | #define ZERO_PAGE(vaddr) \ |
| 57 | (virt_to_page((void *)(empty_zero_page + \ |
| 58 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
| 59 | |
| 60 | #define is_zero_pfn is_zero_pfn |
| 61 | static inline int is_zero_pfn(unsigned long pfn) |
| 62 | { |
| 63 | extern unsigned long zero_pfn; |
| 64 | unsigned long offset_from_zero_pfn = pfn - zero_pfn; |
| 65 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); |
| 66 | } |
| 67 | |
| 68 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) |
| 69 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #endif /* !__ASSEMBLY__ */ |
| 71 | |
| 72 | /* |
| 73 | * PMD_SHIFT determines the size of the area a second-level page |
| 74 | * table can map |
| 75 | * PGDIR_SHIFT determines what a third-level page table entry can map |
| 76 | */ |
| 77 | #ifndef __s390x__ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 78 | # define PMD_SHIFT 20 |
| 79 | # define PUD_SHIFT 20 |
| 80 | # define PGDIR_SHIFT 20 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | #else /* __s390x__ */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 82 | # define PMD_SHIFT 20 |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 83 | # define PUD_SHIFT 31 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 84 | # define PGDIR_SHIFT 42 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | #endif /* __s390x__ */ |
| 86 | |
| 87 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 88 | #define PMD_MASK (~(PMD_SIZE-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 89 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 90 | #define PUD_MASK (~(PUD_SIZE-1)) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 91 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 92 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
| 94 | /* |
| 95 | * entries per page directory level: the S390 is two-level, so |
| 96 | * we don't really have any PMD directory physically. |
| 97 | * for S390 segment-table entries are combined to one PGD |
| 98 | * that leads to 1024 pte per pgd |
| 99 | */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 100 | #define PTRS_PER_PTE 256 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | #ifndef __s390x__ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 102 | #define PTRS_PER_PMD 1 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 103 | #define PTRS_PER_PUD 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #else /* __s390x__ */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 105 | #define PTRS_PER_PMD 2048 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 106 | #define PTRS_PER_PUD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | #endif /* __s390x__ */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 108 | #define PTRS_PER_PGD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 110 | #define FIRST_USER_ADDRESS 0 |
| 111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | #define pte_ERROR(e) \ |
| 113 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) |
| 114 | #define pmd_ERROR(e) \ |
| 115 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 116 | #define pud_ERROR(e) \ |
| 117 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | #define pgd_ERROR(e) \ |
| 119 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) |
| 120 | |
| 121 | #ifndef __ASSEMBLY__ |
| 122 | /* |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 123 | * The vmalloc area will always be on the topmost area of the kernel |
Martin Schwidefsky | 7d3f661 | 2010-04-09 13:43:02 +0200 | [diff] [blame] | 124 | * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 125 | * which should be enough for any sane case. |
| 126 | * By putting vmalloc at the top, we maximise the gap between physical |
| 127 | * memory and vmalloc to catch misplaced memory accesses. As a side |
| 128 | * effect, this also makes sure that 64 bit module code cannot be used |
| 129 | * as system call address. |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 130 | */ |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 131 | |
| 132 | extern unsigned long VMALLOC_START; |
| 133 | |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 134 | #ifndef __s390x__ |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 135 | #define VMALLOC_SIZE (96UL << 20) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 136 | #define VMALLOC_END 0x7e000000UL |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 137 | #define VMEM_MAP_END 0x80000000UL |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 138 | #else /* __s390x__ */ |
Martin Schwidefsky | 7d3f661 | 2010-04-09 13:43:02 +0200 | [diff] [blame] | 139 | #define VMALLOC_SIZE (128UL << 30) |
| 140 | #define VMALLOC_END 0x3e000000000UL |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 141 | #define VMEM_MAP_END 0x40000000000UL |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 142 | #endif /* __s390x__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 144 | /* |
| 145 | * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 |
| 146 | * mapping. This needs to be calculated at compile time since the size of the |
| 147 | * VMEM_MAP is static but the size of struct page can change. |
| 148 | */ |
Martin Schwidefsky | 522d8dc | 2008-02-09 18:24:31 +0100 | [diff] [blame] | 149 | #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) |
| 150 | #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) |
| 151 | #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 152 | #define vmemmap ((struct page *) VMALLOC_END) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | /* |
| 155 | * A 31 bit pagetable entry of S390 has following format: |
| 156 | * | PFRA | | OS | |
| 157 | * 0 0IP0 |
| 158 | * 00000000001111111111222222222233 |
| 159 | * 01234567890123456789012345678901 |
| 160 | * |
| 161 | * I Page-Invalid Bit: Page is not available for address-translation |
| 162 | * P Page-Protection Bit: Store access not possible for page |
| 163 | * |
| 164 | * A 31 bit segmenttable entry of S390 has following format: |
| 165 | * | P-table origin | |PTL |
| 166 | * 0 IC |
| 167 | * 00000000001111111111222222222233 |
| 168 | * 01234567890123456789012345678901 |
| 169 | * |
| 170 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 171 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 172 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) |
| 173 | * |
| 174 | * The 31 bit segmenttable origin of S390 has following format: |
| 175 | * |
| 176 | * |S-table origin | | STL | |
| 177 | * X **GPS |
| 178 | * 00000000001111111111222222222233 |
| 179 | * 01234567890123456789012345678901 |
| 180 | * |
| 181 | * X Space-Switch event: |
| 182 | * G Segment-Invalid Bit: * |
| 183 | * P Private-Space Bit: Segment is not private (PoP 3-30) |
| 184 | * S Storage-Alteration: |
| 185 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) |
| 186 | * |
| 187 | * A 64 bit pagetable entry of S390 has following format: |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 188 | * | PFRA |0IPC| OS | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 190 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 191 | * |
| 192 | * I Page-Invalid Bit: Page is not available for address-translation |
| 193 | * P Page-Protection Bit: Store access not possible for page |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 194 | * C Change-bit override: HW is not required to set change bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | * |
| 196 | * A 64 bit segmenttable entry of S390 has following format: |
| 197 | * | P-table origin | TT |
| 198 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 199 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 200 | * |
| 201 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 202 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 203 | * P Page-Protection Bit: Store access not possible for page |
| 204 | * TT Type 00 |
| 205 | * |
| 206 | * A 64 bit region table entry of S390 has following format: |
| 207 | * | S-table origin | TF TTTL |
| 208 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 209 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 210 | * |
| 211 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 212 | * TT Type 01 |
| 213 | * TF |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 214 | * TL Table length |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | * |
| 216 | * The 64 bit regiontable origin of S390 has following format: |
| 217 | * | region table origon | DTTL |
| 218 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 219 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 220 | * |
| 221 | * X Space-Switch event: |
| 222 | * G Segment-Invalid Bit: |
| 223 | * P Private-Space Bit: |
| 224 | * S Storage-Alteration: |
| 225 | * R Real space |
| 226 | * TL Table-Length: |
| 227 | * |
| 228 | * A storage key has the following format: |
| 229 | * | ACC |F|R|C|0| |
| 230 | * 0 3 4 5 6 7 |
| 231 | * ACC: access key |
| 232 | * F : fetch protection bit |
| 233 | * R : referenced bit |
| 234 | * C : changed bit |
| 235 | */ |
| 236 | |
| 237 | /* Hardware bits in the page table entry */ |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 238 | #define _PAGE_CO 0x100 /* HW Change-bit override */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 239 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
| 240 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 241 | |
| 242 | /* Software bits in the page table entry */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 243 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
| 244 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 245 | #define _PAGE_SPECIAL 0x004 /* SW associated with special page */ |
| 246 | #define __HAVE_ARCH_PTE_SPECIAL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 248 | /* Set of bits not changed in pte_modify */ |
| 249 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) |
| 250 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 251 | /* Six different types of pages. */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 252 | #define _PAGE_TYPE_EMPTY 0x400 |
| 253 | #define _PAGE_TYPE_NONE 0x401 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 254 | #define _PAGE_TYPE_SWAP 0x403 |
| 255 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 256 | #define _PAGE_TYPE_RO 0x200 |
| 257 | #define _PAGE_TYPE_RW 0x000 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 258 | #define _PAGE_TYPE_EX_RO 0x202 |
| 259 | #define _PAGE_TYPE_EX_RW 0x002 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 261 | /* |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 262 | * Only four types for huge pages, using the invalid bit and protection bit |
| 263 | * of a segment table entry. |
| 264 | */ |
| 265 | #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ |
| 266 | #define _HPAGE_TYPE_NONE 0x220 |
| 267 | #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ |
| 268 | #define _HPAGE_TYPE_RW 0x000 |
| 269 | |
| 270 | /* |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 271 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, |
| 272 | * pte_none and pte_file to find out the pte type WITHOUT holding the page |
| 273 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to |
| 274 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs |
| 275 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. |
| 276 | * This change is done while holding the lock, but the intermediate step |
| 277 | * of a previously valid pte with the hw invalid bit set can be observed by |
| 278 | * handle_pte_fault. That makes it necessary that all valid pte types with |
| 279 | * the hw invalid bit set must be distinguishable from the four pte types |
| 280 | * empty, none, swap and file. |
| 281 | * |
| 282 | * irxt ipte irxt |
| 283 | * _PAGE_TYPE_EMPTY 1000 -> 1000 |
| 284 | * _PAGE_TYPE_NONE 1001 -> 1001 |
| 285 | * _PAGE_TYPE_SWAP 1011 -> 1011 |
| 286 | * _PAGE_TYPE_FILE 11?1 -> 11?1 |
| 287 | * _PAGE_TYPE_RO 0100 -> 1100 |
| 288 | * _PAGE_TYPE_RW 0000 -> 1000 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 289 | * _PAGE_TYPE_EX_RO 0110 -> 1110 |
| 290 | * _PAGE_TYPE_EX_RW 0010 -> 1010 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 291 | * |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 292 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 293 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
| 294 | * pte_file is true for bits combinations 1101, 1111 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 295 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 296 | */ |
| 297 | |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 298 | /* Page status table bits for virtualization */ |
| 299 | #define RCP_PCL_BIT 55 |
| 300 | #define RCP_HR_BIT 54 |
| 301 | #define RCP_HC_BIT 53 |
| 302 | #define RCP_GR_BIT 50 |
| 303 | #define RCP_GC_BIT 49 |
| 304 | |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 305 | /* User dirty bit for KVM's migration feature */ |
| 306 | #define KVM_UD_BIT 47 |
| 307 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | #ifndef __s390x__ |
| 309 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 310 | /* Bits in the segment table address-space-control-element */ |
| 311 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ |
| 312 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ |
| 313 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 314 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 315 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ |
| 316 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | /* Bits in the segment table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 318 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame^] | 319 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 320 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
| 321 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ |
| 322 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ |
| 323 | |
| 324 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
| 325 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) |
| 326 | |
| 327 | #else /* __s390x__ */ |
| 328 | |
| 329 | /* Bits in the segment/region table address-space-control-element */ |
| 330 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ |
| 331 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 332 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 333 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ |
| 334 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ |
| 335 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ |
| 336 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ |
| 337 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ |
| 338 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ |
| 339 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ |
| 340 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ |
| 341 | |
| 342 | /* Bits in the region table entry */ |
| 343 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ |
| 344 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ |
| 345 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ |
| 346 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
| 347 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ |
| 348 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ |
| 349 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ |
| 350 | |
| 351 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) |
| 352 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) |
| 353 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
| 354 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) |
| 355 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) |
| 356 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) |
| 357 | |
| 358 | /* Bits in the segment table entry */ |
| 359 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
| 360 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
| 361 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
| 362 | |
| 363 | #define _SEGMENT_ENTRY (0) |
| 364 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) |
| 365 | |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 366 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ |
| 367 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ |
| 368 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 369 | #endif /* __s390x__ */ |
| 370 | |
| 371 | /* |
| 372 | * A user page table pointer has the space-switch-event bit, the |
| 373 | * private-space-control bit and the storage-alteration-event-control |
| 374 | * bit set. A kernel page table pointer doesn't need them. |
| 375 | */ |
| 376 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
| 377 | _ASCE_ALT_EVENT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | |
| 379 | /* Bits int the storage key */ |
| 380 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ |
| 381 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ |
| 382 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | /* |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 384 | * Page protection definitions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 386 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
| 387 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) |
| 388 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 389 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) |
| 390 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 391 | |
| 392 | #define PAGE_KERNEL PAGE_RW |
| 393 | #define PAGE_COPY PAGE_RO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
| 395 | /* |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 396 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. |
| 397 | * Write permission always implies read permission. In theory with a |
| 398 | * primary/secondary page table execute only can be implemented but |
| 399 | * it would cost an additional bit in the pte to distinguish all the |
| 400 | * different pte types. To avoid that execute permission currently |
| 401 | * implies read permission as well. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | */ |
| 403 | /*xwr*/ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 404 | #define __P000 PAGE_NONE |
| 405 | #define __P001 PAGE_RO |
| 406 | #define __P010 PAGE_RO |
| 407 | #define __P011 PAGE_RO |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 408 | #define __P100 PAGE_EX_RO |
| 409 | #define __P101 PAGE_EX_RO |
| 410 | #define __P110 PAGE_EX_RO |
| 411 | #define __P111 PAGE_EX_RO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 413 | #define __S000 PAGE_NONE |
| 414 | #define __S001 PAGE_RO |
| 415 | #define __S010 PAGE_RW |
| 416 | #define __S011 PAGE_RW |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 417 | #define __S100 PAGE_EX_RO |
| 418 | #define __S101 PAGE_EX_RO |
| 419 | #define __S110 PAGE_EX_RW |
| 420 | #define __S111 PAGE_EX_RW |
| 421 | |
| 422 | #ifndef __s390x__ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 423 | # define PxD_SHADOW_SHIFT 1 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 424 | #else /* __s390x__ */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 425 | # define PxD_SHADOW_SHIFT 2 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 426 | #endif /* __s390x__ */ |
| 427 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 428 | static inline void *get_shadow_table(void *table) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 429 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 430 | unsigned long addr, offset; |
| 431 | struct page *page; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 432 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 433 | addr = (unsigned long) table; |
| 434 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); |
| 435 | page = virt_to_page((void *)(addr ^ offset)); |
| 436 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 437 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | |
| 439 | /* |
| 440 | * Certain architectures need to do special things when PTEs |
| 441 | * within a page table are directly modified. Thus, the following |
| 442 | * hook is made available. |
| 443 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 444 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 445 | pte_t *ptep, pte_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 447 | *ptep = entry; |
| 448 | if (mm->context.noexec) { |
| 449 | if (!(pte_val(entry) & _PAGE_INVALID) && |
| 450 | (pte_val(entry) & _PAGE_SWX)) |
| 451 | pte_val(entry) |= _PAGE_RO; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 452 | else |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 453 | pte_val(entry) = _PAGE_TYPE_EMPTY; |
| 454 | ptep[PTRS_PER_PTE] = entry; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 455 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | |
| 458 | /* |
| 459 | * pgd/pmd/pte query functions |
| 460 | */ |
| 461 | #ifndef __s390x__ |
| 462 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 463 | static inline int pgd_present(pgd_t pgd) { return 1; } |
| 464 | static inline int pgd_none(pgd_t pgd) { return 0; } |
| 465 | static inline int pgd_bad(pgd_t pgd) { return 0; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 467 | static inline int pud_present(pud_t pud) { return 1; } |
| 468 | static inline int pud_none(pud_t pud) { return 0; } |
| 469 | static inline int pud_bad(pud_t pud) { return 0; } |
| 470 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | #else /* __s390x__ */ |
| 472 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 473 | static inline int pgd_present(pgd_t pgd) |
| 474 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 475 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 476 | return 1; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 477 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
| 478 | } |
| 479 | |
| 480 | static inline int pgd_none(pgd_t pgd) |
| 481 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 482 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 483 | return 0; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 484 | return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; |
| 485 | } |
| 486 | |
| 487 | static inline int pgd_bad(pgd_t pgd) |
| 488 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 489 | /* |
| 490 | * With dynamic page table levels the pgd can be a region table |
| 491 | * entry or a segment table entry. Check for the bit that are |
| 492 | * invalid for either table entry. |
| 493 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 494 | unsigned long mask = |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 495 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 496 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 497 | return (pgd_val(pgd) & mask) != 0; |
| 498 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 499 | |
| 500 | static inline int pud_present(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 502 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 503 | return 1; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 504 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | } |
| 506 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 507 | static inline int pud_none(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 508 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 509 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 510 | return 0; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 511 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | } |
| 513 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 514 | static inline int pud_bad(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 516 | /* |
| 517 | * With dynamic page table levels the pud can be a region table |
| 518 | * entry or a segment table entry. Check for the bit that are |
| 519 | * invalid for either table entry. |
| 520 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 521 | unsigned long mask = |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 522 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 523 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 524 | return (pud_val(pud) & mask) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | } |
| 526 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 527 | #endif /* __s390x__ */ |
| 528 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 529 | static inline int pmd_present(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | { |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 531 | return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | } |
| 533 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 534 | static inline int pmd_none(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | { |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 536 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | } |
| 538 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 539 | static inline int pmd_bad(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 541 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
| 542 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 543 | } |
| 544 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 545 | static inline int pte_none(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 546 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 547 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | } |
| 549 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 550 | static inline int pte_present(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 552 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; |
| 553 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || |
| 554 | (!(pte_val(pte) & _PAGE_INVALID) && |
| 555 | !(pte_val(pte) & _PAGE_SWT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 558 | static inline int pte_file(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 559 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 560 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; |
| 561 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | } |
| 563 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 564 | static inline int pte_special(pte_t pte) |
| 565 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 566 | return (pte_val(pte) & _PAGE_SPECIAL); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 567 | } |
| 568 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 569 | #define __HAVE_ARCH_PTE_SAME |
| 570 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 572 | static inline void rcp_lock(pte_t *ptep) |
| 573 | { |
| 574 | #ifdef CONFIG_PGSTE |
| 575 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 576 | preempt_disable(); |
| 577 | while (test_and_set_bit(RCP_PCL_BIT, pgste)) |
| 578 | ; |
| 579 | #endif |
| 580 | } |
| 581 | |
| 582 | static inline void rcp_unlock(pte_t *ptep) |
| 583 | { |
| 584 | #ifdef CONFIG_PGSTE |
| 585 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 586 | clear_bit(RCP_PCL_BIT, pgste); |
| 587 | preempt_enable(); |
| 588 | #endif |
| 589 | } |
| 590 | |
| 591 | /* forward declaration for SetPageUptodate in page-flags.h*/ |
| 592 | static inline void page_clear_dirty(struct page *page); |
| 593 | #include <linux/page-flags.h> |
| 594 | |
| 595 | static inline void ptep_rcp_copy(pte_t *ptep) |
| 596 | { |
| 597 | #ifdef CONFIG_PGSTE |
| 598 | struct page *page = virt_to_page(pte_val(*ptep)); |
| 599 | unsigned int skey; |
| 600 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 601 | |
| 602 | skey = page_get_storage_key(page_to_phys(page)); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 603 | if (skey & _PAGE_CHANGED) { |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 604 | set_bit_simple(RCP_GC_BIT, pgste); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 605 | set_bit_simple(KVM_UD_BIT, pgste); |
| 606 | } |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 607 | if (skey & _PAGE_REFERENCED) |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 608 | set_bit_simple(RCP_GR_BIT, pgste); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 609 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 610 | SetPageDirty(page); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 611 | set_bit_simple(KVM_UD_BIT, pgste); |
| 612 | } |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 613 | if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 614 | SetPageReferenced(page); |
| 615 | #endif |
| 616 | } |
| 617 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | /* |
| 619 | * query functions pte_write/pte_dirty/pte_young only work if |
| 620 | * pte_present() is true. Undefined behaviour if not.. |
| 621 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 622 | static inline int pte_write(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | { |
| 624 | return (pte_val(pte) & _PAGE_RO) == 0; |
| 625 | } |
| 626 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 627 | static inline int pte_dirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 628 | { |
| 629 | /* A pte is neither clean nor dirty on s/390. The dirty bit |
| 630 | * is in the storage key. See page_test_and_clear_dirty for |
| 631 | * details. |
| 632 | */ |
| 633 | return 0; |
| 634 | } |
| 635 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 636 | static inline int pte_young(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | { |
| 638 | /* A pte is neither young nor old on s/390. The young bit |
| 639 | * is in the storage key. See page_test_and_clear_young for |
| 640 | * details. |
| 641 | */ |
| 642 | return 0; |
| 643 | } |
| 644 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | /* |
| 646 | * pgd/pmd/pte modification functions |
| 647 | */ |
| 648 | |
| 649 | #ifndef __s390x__ |
| 650 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 651 | #define pgd_clear(pgd) do { } while (0) |
| 652 | #define pud_clear(pud) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | #else /* __s390x__ */ |
| 655 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 656 | static inline void pgd_clear_kernel(pgd_t * pgd) |
| 657 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 658 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 659 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 660 | } |
| 661 | |
| 662 | static inline void pgd_clear(pgd_t * pgd) |
| 663 | { |
| 664 | pgd_t *shadow = get_shadow_table(pgd); |
| 665 | |
| 666 | pgd_clear_kernel(pgd); |
| 667 | if (shadow) |
| 668 | pgd_clear_kernel(shadow); |
| 669 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 670 | |
| 671 | static inline void pud_clear_kernel(pud_t *pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 673 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 674 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | } |
| 676 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 677 | static inline void pud_clear(pud_t *pud) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 678 | { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 679 | pud_t *shadow = get_shadow_table(pud); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 680 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 681 | pud_clear_kernel(pud); |
| 682 | if (shadow) |
| 683 | pud_clear_kernel(shadow); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 684 | } |
| 685 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 686 | #endif /* __s390x__ */ |
| 687 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 688 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 690 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | } |
| 692 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 693 | static inline void pmd_clear(pmd_t *pmd) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 694 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 695 | pmd_t *shadow = get_shadow_table(pmd); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 696 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 697 | pmd_clear_kernel(pmd); |
| 698 | if (shadow) |
| 699 | pmd_clear_kernel(shadow); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 700 | } |
| 701 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 702 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 704 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 705 | if (mm->context.noexec) |
| 706 | pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | } |
| 708 | |
| 709 | /* |
| 710 | * The following pte modification functions only work if |
| 711 | * pte_present() is true. Undefined behaviour if not.. |
| 712 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 713 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | { |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 715 | pte_val(pte) &= _PAGE_CHG_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | pte_val(pte) |= pgprot_val(newprot); |
| 717 | return pte; |
| 718 | } |
| 719 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 720 | static inline pte_t pte_wrprotect(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 722 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | if (!(pte_val(pte) & _PAGE_INVALID)) |
| 724 | pte_val(pte) |= _PAGE_RO; |
| 725 | return pte; |
| 726 | } |
| 727 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 728 | static inline pte_t pte_mkwrite(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 729 | { |
| 730 | pte_val(pte) &= ~_PAGE_RO; |
| 731 | return pte; |
| 732 | } |
| 733 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 734 | static inline pte_t pte_mkclean(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | { |
| 736 | /* The only user of pte_mkclean is the fork() code. |
| 737 | We must *not* clear the *physical* page dirty bit |
| 738 | just because fork() wants to clear the dirty bit in |
| 739 | *one* of the page's mappings. So we just do nothing. */ |
| 740 | return pte; |
| 741 | } |
| 742 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 743 | static inline pte_t pte_mkdirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | { |
| 745 | /* We do not explicitly set the dirty bit because the |
| 746 | * sske instruction is slow. It is faster to let the |
| 747 | * next instruction set the dirty bit. |
| 748 | */ |
| 749 | return pte; |
| 750 | } |
| 751 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 752 | static inline pte_t pte_mkold(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 753 | { |
| 754 | /* S/390 doesn't keep its dirty/referenced bit in the pte. |
| 755 | * There is no point in clearing the real referenced bit. |
| 756 | */ |
| 757 | return pte; |
| 758 | } |
| 759 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 760 | static inline pte_t pte_mkyoung(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | { |
| 762 | /* S/390 doesn't keep its dirty/referenced bit in the pte. |
| 763 | * There is no point in setting the real referenced bit. |
| 764 | */ |
| 765 | return pte; |
| 766 | } |
| 767 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 768 | static inline pte_t pte_mkspecial(pte_t pte) |
| 769 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 770 | pte_val(pte) |= _PAGE_SPECIAL; |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 771 | return pte; |
| 772 | } |
| 773 | |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 774 | #ifdef CONFIG_PGSTE |
| 775 | /* |
| 776 | * Get (and clear) the user dirty bit for a PTE. |
| 777 | */ |
| 778 | static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, |
| 779 | pte_t *ptep) |
| 780 | { |
| 781 | int dirty; |
| 782 | unsigned long *pgste; |
| 783 | struct page *page; |
| 784 | unsigned int skey; |
| 785 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 786 | if (!mm->context.has_pgste) |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 787 | return -EINVAL; |
| 788 | rcp_lock(ptep); |
| 789 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 790 | page = virt_to_page(pte_val(*ptep)); |
| 791 | skey = page_get_storage_key(page_to_phys(page)); |
| 792 | if (skey & _PAGE_CHANGED) { |
| 793 | set_bit_simple(RCP_GC_BIT, pgste); |
| 794 | set_bit_simple(KVM_UD_BIT, pgste); |
| 795 | } |
| 796 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { |
| 797 | SetPageDirty(page); |
| 798 | set_bit_simple(KVM_UD_BIT, pgste); |
| 799 | } |
| 800 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); |
| 801 | if (skey & _PAGE_CHANGED) |
| 802 | page_clear_dirty(page); |
| 803 | rcp_unlock(ptep); |
| 804 | return dirty; |
| 805 | } |
| 806 | #endif |
| 807 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 808 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 809 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 810 | unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 812 | #ifdef CONFIG_PGSTE |
| 813 | unsigned long physpage; |
| 814 | int young; |
| 815 | unsigned long *pgste; |
| 816 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 817 | if (!vma->vm_mm->context.has_pgste) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 818 | return 0; |
| 819 | physpage = pte_val(*ptep) & PAGE_MASK; |
| 820 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 821 | |
| 822 | young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); |
| 823 | rcp_lock(ptep); |
| 824 | if (young) |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 825 | set_bit_simple(RCP_GR_BIT, pgste); |
| 826 | young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 827 | rcp_unlock(ptep); |
| 828 | return young; |
| 829 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | return 0; |
| 831 | } |
| 832 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 833 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 834 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 835 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 | { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 837 | /* No need to flush TLB |
| 838 | * On s390 reference bits are in storage key and never in TLB |
| 839 | * With virtualization we handle the reference bit, without we |
| 840 | * we can simply return */ |
| 841 | #ifdef CONFIG_PGSTE |
| 842 | return ptep_test_and_clear_young(vma, address, ptep); |
| 843 | #endif |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 844 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | } |
| 846 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 847 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
| 848 | { |
| 849 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 850 | #ifndef __s390x__ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 851 | /* pto must point to the start of the segment table */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 852 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); |
| 853 | #else |
| 854 | /* ipte in zarch mode can do the math */ |
| 855 | pte_t *pto = ptep; |
| 856 | #endif |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 857 | asm volatile( |
| 858 | " ipte %2,%3" |
| 859 | : "=m" (*ptep) : "m" (*ptep), |
| 860 | "a" (pto), "a" (address)); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 861 | } |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 862 | } |
| 863 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 864 | static inline void ptep_invalidate(struct mm_struct *mm, |
| 865 | unsigned long address, pte_t *ptep) |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 866 | { |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 867 | if (mm->context.has_pgste) { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 868 | rcp_lock(ptep); |
| 869 | __ptep_ipte(address, ptep); |
| 870 | ptep_rcp_copy(ptep); |
| 871 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 872 | rcp_unlock(ptep); |
| 873 | return; |
| 874 | } |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 875 | __ptep_ipte(address, ptep); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 876 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 877 | if (mm->context.noexec) { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 878 | __ptep_ipte(address, ptep + PTRS_PER_PTE); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 879 | pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; |
| 880 | } |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 881 | } |
| 882 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 883 | /* |
| 884 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
| 885 | * both clear the TLB for the unmapped pte. The reason is that |
| 886 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) |
| 887 | * to modify an active pte. The sequence is |
| 888 | * 1) ptep_get_and_clear |
| 889 | * 2) set_pte_at |
| 890 | * 3) flush_tlb_range |
| 891 | * On s390 the tlb needs to get flushed with the modification of the pte |
| 892 | * if the pte is active. The only way how this can be implemented is to |
| 893 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range |
| 894 | * is a nop. |
| 895 | */ |
| 896 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 897 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
| 898 | ({ \ |
| 899 | pte_t __pte = *(__ptep); \ |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 900 | (__mm)->context.flush_mm = 1; \ |
| 901 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 902 | (__mm) != current->active_mm) \ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 903 | ptep_invalidate(__mm, __address, __ptep); \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 904 | else \ |
| 905 | pte_clear((__mm), (__address), (__ptep)); \ |
| 906 | __pte; \ |
| 907 | }) |
| 908 | |
| 909 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 910 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 911 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | { |
| 913 | pte_t pte = *ptep; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 914 | ptep_invalidate(vma->vm_mm, address, ptep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 915 | return pte; |
| 916 | } |
| 917 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 918 | /* |
| 919 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the |
| 920 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all |
| 921 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct |
| 922 | * cannot be accessed while the batched unmap is running. In this case |
| 923 | * full==1 and a simple pte_clear is enough. See tlb.h. |
| 924 | */ |
| 925 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 926 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
| 927 | unsigned long addr, |
| 928 | pte_t *ptep, int full) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | { |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 930 | pte_t pte = *ptep; |
| 931 | |
| 932 | if (full) |
| 933 | pte_clear(mm, addr, ptep); |
| 934 | else |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 935 | ptep_invalidate(mm, addr, ptep); |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 936 | return pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | } |
| 938 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 939 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 940 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ |
| 941 | ({ \ |
| 942 | pte_t __pte = *(__ptep); \ |
| 943 | if (pte_write(__pte)) { \ |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 944 | (__mm)->context.flush_mm = 1; \ |
| 945 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 946 | (__mm) != current->active_mm) \ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 947 | ptep_invalidate(__mm, __addr, __ptep); \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 948 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ |
| 949 | } \ |
| 950 | }) |
| 951 | |
| 952 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 953 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
| 954 | ({ \ |
| 955 | int __changed = !pte_same(*(__ptep), __entry); \ |
| 956 | if (__changed) { \ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 957 | ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 958 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ |
| 959 | } \ |
| 960 | __changed; \ |
Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 961 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | |
| 963 | /* |
| 964 | * Test and clear dirty bit in storage key. |
| 965 | * We can't clear the changed bit atomically. This is a potential |
| 966 | * race against modification of the referenced bit. This function |
| 967 | * should therefore only be called if it is not mapped in any |
| 968 | * address space. |
| 969 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 970 | #define __HAVE_ARCH_PAGE_TEST_DIRTY |
Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 971 | static inline int page_test_dirty(struct page *page) |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 972 | { |
Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 973 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
| 974 | } |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 975 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 976 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY |
Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 977 | static inline void page_clear_dirty(struct page *page) |
| 978 | { |
| 979 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY); |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 980 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | |
| 982 | /* |
| 983 | * Test and clear referenced bit in storage key. |
| 984 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 985 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 986 | static inline int page_test_and_clear_young(struct page *page) |
| 987 | { |
Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 988 | unsigned long physpage = page_to_phys(page); |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 989 | int ccode; |
| 990 | |
Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 991 | asm volatile( |
| 992 | " rrbe 0,%1\n" |
| 993 | " ipm %0\n" |
| 994 | " srl %0,28\n" |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 995 | : "=d" (ccode) : "a" (physpage) : "cc" ); |
| 996 | return ccode & 2; |
| 997 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | |
| 999 | /* |
| 1000 | * Conversion functions: convert a page and protection to a page entry, |
| 1001 | * and a page entry and page directory to the page they refer to. |
| 1002 | */ |
| 1003 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) |
| 1004 | { |
| 1005 | pte_t __pte; |
| 1006 | pte_val(__pte) = physpage + pgprot_val(pgprot); |
| 1007 | return __pte; |
| 1008 | } |
| 1009 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1010 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 1011 | { |
Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1012 | unsigned long physpage = page_to_phys(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1014 | return mk_pte_phys(physpage, pgprot); |
| 1015 | } |
| 1016 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1018 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 1019 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 1020 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1022 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 1024 | |
| 1025 | #ifndef __s390x__ |
| 1026 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1027 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1028 | #define pud_deref(pmd) ({ BUG(); 0UL; }) |
| 1029 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) |
| 1030 | |
| 1031 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
| 1032 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | |
| 1034 | #else /* __s390x__ */ |
| 1035 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1036 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1037 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1038 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1039 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1040 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
| 1041 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1042 | pud_t *pud = (pud_t *) pgd; |
| 1043 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 1044 | pud = (pud_t *) pgd_deref(*pgd); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1045 | return pud + pud_index(address); |
| 1046 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1047 | |
| 1048 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 1049 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1050 | pmd_t *pmd = (pmd_t *) pud; |
| 1051 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 1052 | pmd = (pmd_t *) pud_deref(*pud); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1053 | return pmd + pmd_index(address); |
| 1054 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | |
| 1056 | #endif /* __s390x__ */ |
| 1057 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1058 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
| 1059 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
| 1060 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 1061 | |
| 1062 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
| 1063 | |
| 1064 | /* Find an entry in the lowest level page table.. */ |
| 1065 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
| 1066 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
| 1068 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) |
| 1069 | #define pte_unmap(pte) do { } while (0) |
| 1070 | #define pte_unmap_nested(pte) do { } while (0) |
| 1071 | |
| 1072 | /* |
| 1073 | * 31 bit swap entry format: |
| 1074 | * A page-table entry has some bits we have to treat in a special way. |
| 1075 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification |
| 1076 | * exception will occur instead of a page translation exception. The |
| 1077 | * specifiation exception has the bad habit not to store necessary |
| 1078 | * information in the lowcore. |
| 1079 | * Bit 21 and bit 22 are the page invalid bit and the page protection |
| 1080 | * bit. We set both to indicate a swapped page. |
| 1081 | * Bit 30 and 31 are used to distinguish the different page types. For |
| 1082 | * a swapped page these bits need to be zero. |
| 1083 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. |
| 1084 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 |
| 1085 | * plus 24 for the offset. |
| 1086 | * 0| offset |0110|o|type |00| |
| 1087 | * 0 0000000001111111111 2222 2 22222 33 |
| 1088 | * 0 1234567890123456789 0123 4 56789 01 |
| 1089 | * |
| 1090 | * 64 bit swap entry format: |
| 1091 | * A page-table entry has some bits we have to treat in a special way. |
| 1092 | * Bits 52 and bit 55 have to be zero, otherwise an specification |
| 1093 | * exception will occur instead of a page translation exception. The |
| 1094 | * specifiation exception has the bad habit not to store necessary |
| 1095 | * information in the lowcore. |
| 1096 | * Bit 53 and bit 54 are the page invalid bit and the page protection |
| 1097 | * bit. We set both to indicate a swapped page. |
| 1098 | * Bit 62 and 63 are used to distinguish the different page types. For |
| 1099 | * a swapped page these bits need to be zero. |
| 1100 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. |
| 1101 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 |
| 1102 | * plus 56 for the offset. |
| 1103 | * | offset |0110|o|type |00| |
| 1104 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 |
| 1105 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 |
| 1106 | */ |
| 1107 | #ifndef __s390x__ |
| 1108 | #define __SWP_OFFSET_MASK (~0UL >> 12) |
| 1109 | #else |
| 1110 | #define __SWP_OFFSET_MASK (~0UL >> 11) |
| 1111 | #endif |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 1112 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1113 | { |
| 1114 | pte_t pte; |
| 1115 | offset &= __SWP_OFFSET_MASK; |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1116 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
| 1118 | return pte; |
| 1119 | } |
| 1120 | |
| 1121 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) |
| 1122 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) |
| 1123 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) |
| 1124 | |
| 1125 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 1126 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 1127 | |
| 1128 | #ifndef __s390x__ |
| 1129 | # define PTE_FILE_MAX_BITS 26 |
| 1130 | #else /* __s390x__ */ |
| 1131 | # define PTE_FILE_MAX_BITS 59 |
| 1132 | #endif /* __s390x__ */ |
| 1133 | |
| 1134 | #define pte_to_pgoff(__pte) \ |
| 1135 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) |
| 1136 | |
| 1137 | #define pgoff_to_pte(__off) \ |
| 1138 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1139 | | _PAGE_TYPE_FILE }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | |
| 1141 | #endif /* !__ASSEMBLY__ */ |
| 1142 | |
| 1143 | #define kern_addr_valid(addr) (1) |
| 1144 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 1145 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
| 1146 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 1147 | extern int s390_enable_sie(void); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1148 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | /* |
| 1150 | * No page table caches to initialise |
| 1151 | */ |
| 1152 | #define pgtable_cache_init() do { } while (0) |
| 1153 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | #include <asm-generic/pgtable.h> |
| 1155 | |
| 1156 | #endif /* _S390_PAGE_H */ |