Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-s390/pgtable.h |
| 3 | * |
| 4 | * S390 version |
| 5 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation |
| 6 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 7 | * Ulrich Weigand (weigand@de.ibm.com) |
| 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 9 | * |
| 10 | * Derived from "include/asm-i386/pgtable.h" |
| 11 | */ |
| 12 | |
| 13 | #ifndef _ASM_S390_PGTABLE_H |
| 14 | #define _ASM_S390_PGTABLE_H |
| 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | /* |
| 17 | * The Linux memory management assumes a three-level page table setup. For |
| 18 | * s390 31 bit we "fold" the mid level into the top-level page table, so |
| 19 | * that we physically have the same two-level page table as the s390 mmu |
| 20 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels |
| 21 | * the hardware provides (region first and region second tables are not |
| 22 | * used). |
| 23 | * |
| 24 | * The "pgd_xxx()" functions are trivial for a folded two-level |
| 25 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
| 26 | * into the pgd entry) |
| 27 | * |
| 28 | * This file contains the functions and defines necessary to modify and use |
| 29 | * the S390 page table tree. |
| 30 | */ |
| 31 | #ifndef __ASSEMBLY__ |
Heiko Carstens | 9789db0 | 2008-07-14 09:59:11 +0200 | [diff] [blame] | 32 | #include <linux/sched.h> |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 33 | #include <linux/mm_types.h> |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 34 | #include <asm/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/bug.h> |
| 36 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
| 39 | extern void paging_init(void); |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 40 | extern void vmem_map_init(void); |
Martin Schwidefsky | 92f842e | 2010-10-25 16:10:13 +0200 | [diff] [blame] | 41 | extern void fault_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * The S390 doesn't have any external MMU info: the kernel page |
| 45 | * tables contain all the necessary information. |
| 46 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 47 | #define update_mmu_cache(vma, address, ptep) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
| 49 | /* |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 50 | * ZERO_PAGE is a global shared page that is always zero; used |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | * for zero-mapped memory areas etc.. |
| 52 | */ |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 53 | |
| 54 | extern unsigned long empty_zero_page; |
| 55 | extern unsigned long zero_page_mask; |
| 56 | |
| 57 | #define ZERO_PAGE(vaddr) \ |
| 58 | (virt_to_page((void *)(empty_zero_page + \ |
| 59 | (((unsigned long)(vaddr)) &zero_page_mask)))) |
| 60 | |
| 61 | #define is_zero_pfn is_zero_pfn |
| 62 | static inline int is_zero_pfn(unsigned long pfn) |
| 63 | { |
| 64 | extern unsigned long zero_pfn; |
| 65 | unsigned long offset_from_zero_pfn = pfn - zero_pfn; |
| 66 | return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); |
| 67 | } |
| 68 | |
| 69 | #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) |
| 70 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | #endif /* !__ASSEMBLY__ */ |
| 72 | |
| 73 | /* |
| 74 | * PMD_SHIFT determines the size of the area a second-level page |
| 75 | * table can map |
| 76 | * PGDIR_SHIFT determines what a third-level page table entry can map |
| 77 | */ |
| 78 | #ifndef __s390x__ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 79 | # define PMD_SHIFT 20 |
| 80 | # define PUD_SHIFT 20 |
| 81 | # define PGDIR_SHIFT 20 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | #else /* __s390x__ */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 83 | # define PMD_SHIFT 20 |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 84 | # define PUD_SHIFT 31 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 85 | # define PGDIR_SHIFT 42 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | #endif /* __s390x__ */ |
| 87 | |
| 88 | #define PMD_SIZE (1UL << PMD_SHIFT) |
| 89 | #define PMD_MASK (~(PMD_SIZE-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 90 | #define PUD_SIZE (1UL << PUD_SHIFT) |
| 91 | #define PUD_MASK (~(PUD_SIZE-1)) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 92 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
| 93 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
| 95 | /* |
| 96 | * entries per page directory level: the S390 is two-level, so |
| 97 | * we don't really have any PMD directory physically. |
| 98 | * for S390 segment-table entries are combined to one PGD |
| 99 | * that leads to 1024 pte per pgd |
| 100 | */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 101 | #define PTRS_PER_PTE 256 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | #ifndef __s390x__ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 103 | #define PTRS_PER_PMD 1 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 104 | #define PTRS_PER_PUD 1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | #else /* __s390x__ */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 106 | #define PTRS_PER_PMD 2048 |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 107 | #define PTRS_PER_PUD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | #endif /* __s390x__ */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 109 | #define PTRS_PER_PGD 2048 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Hugh Dickins | d455a36 | 2005-04-19 13:29:23 -0700 | [diff] [blame] | 111 | #define FIRST_USER_ADDRESS 0 |
| 112 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | #define pte_ERROR(e) \ |
| 114 | printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) |
| 115 | #define pmd_ERROR(e) \ |
| 116 | printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 117 | #define pud_ERROR(e) \ |
| 118 | printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | #define pgd_ERROR(e) \ |
| 120 | printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) |
| 121 | |
| 122 | #ifndef __ASSEMBLY__ |
| 123 | /* |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 124 | * The vmalloc area will always be on the topmost area of the kernel |
Martin Schwidefsky | 7d3f661 | 2010-04-09 13:43:02 +0200 | [diff] [blame] | 125 | * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc, |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 126 | * which should be enough for any sane case. |
| 127 | * By putting vmalloc at the top, we maximise the gap between physical |
| 128 | * memory and vmalloc to catch misplaced memory accesses. As a side |
| 129 | * effect, this also makes sure that 64 bit module code cannot be used |
| 130 | * as system call address. |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 131 | */ |
Heiko Carstens | 239a642 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 132 | |
| 133 | extern unsigned long VMALLOC_START; |
| 134 | |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 135 | #ifndef __s390x__ |
Heiko Carstens | 239a642 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 136 | #define VMALLOC_SIZE (96UL << 20) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 137 | #define VMALLOC_END 0x7e000000UL |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 138 | #define VMEM_MAP_END 0x80000000UL |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 139 | #else /* __s390x__ */ |
Martin Schwidefsky | 7d3f661 | 2010-04-09 13:43:02 +0200 | [diff] [blame] | 140 | #define VMALLOC_SIZE (128UL << 30) |
| 141 | #define VMALLOC_END 0x3e000000000UL |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 142 | #define VMEM_MAP_END 0x40000000000UL |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 143 | #endif /* __s390x__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | |
Heiko Carstens | 0189103 | 2008-02-05 16:50:49 +0100 | [diff] [blame] | 145 | /* |
| 146 | * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 |
| 147 | * mapping. This needs to be calculated at compile time since the size of the |
| 148 | * VMEM_MAP is static but the size of struct page can change. |
| 149 | */ |
Martin Schwidefsky | 522d8dc | 2008-02-09 18:24:31 +0100 | [diff] [blame] | 150 | #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) |
| 151 | #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) |
| 152 | #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 153 | #define vmemmap ((struct page *) VMALLOC_END) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 154 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | /* |
| 156 | * A 31 bit pagetable entry of S390 has following format: |
| 157 | * | PFRA | | OS | |
| 158 | * 0 0IP0 |
| 159 | * 00000000001111111111222222222233 |
| 160 | * 01234567890123456789012345678901 |
| 161 | * |
| 162 | * I Page-Invalid Bit: Page is not available for address-translation |
| 163 | * P Page-Protection Bit: Store access not possible for page |
| 164 | * |
| 165 | * A 31 bit segmenttable entry of S390 has following format: |
| 166 | * | P-table origin | |PTL |
| 167 | * 0 IC |
| 168 | * 00000000001111111111222222222233 |
| 169 | * 01234567890123456789012345678901 |
| 170 | * |
| 171 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 172 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 173 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) |
| 174 | * |
| 175 | * The 31 bit segmenttable origin of S390 has following format: |
| 176 | * |
| 177 | * |S-table origin | | STL | |
| 178 | * X **GPS |
| 179 | * 00000000001111111111222222222233 |
| 180 | * 01234567890123456789012345678901 |
| 181 | * |
| 182 | * X Space-Switch event: |
| 183 | * G Segment-Invalid Bit: * |
| 184 | * P Private-Space Bit: Segment is not private (PoP 3-30) |
| 185 | * S Storage-Alteration: |
| 186 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) |
| 187 | * |
| 188 | * A 64 bit pagetable entry of S390 has following format: |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 189 | * | PFRA |0IPC| OS | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 191 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 192 | * |
| 193 | * I Page-Invalid Bit: Page is not available for address-translation |
| 194 | * P Page-Protection Bit: Store access not possible for page |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 195 | * C Change-bit override: HW is not required to set change bit |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | * |
| 197 | * A 64 bit segmenttable entry of S390 has following format: |
| 198 | * | P-table origin | TT |
| 199 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 200 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 201 | * |
| 202 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 203 | * C Common-Segment Bit: Segment is not private (PoP 3-30) |
| 204 | * P Page-Protection Bit: Store access not possible for page |
| 205 | * TT Type 00 |
| 206 | * |
| 207 | * A 64 bit region table entry of S390 has following format: |
| 208 | * | S-table origin | TF TTTL |
| 209 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 210 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 211 | * |
| 212 | * I Segment-Invalid Bit: Segment is not available for address-translation |
| 213 | * TT Type 01 |
| 214 | * TF |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 215 | * TL Table length |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | * |
| 217 | * The 64 bit regiontable origin of S390 has following format: |
| 218 | * | region table origon | DTTL |
| 219 | * 0000000000111111111122222222223333333333444444444455555555556666 |
| 220 | * 0123456789012345678901234567890123456789012345678901234567890123 |
| 221 | * |
| 222 | * X Space-Switch event: |
| 223 | * G Segment-Invalid Bit: |
| 224 | * P Private-Space Bit: |
| 225 | * S Storage-Alteration: |
| 226 | * R Real space |
| 227 | * TL Table-Length: |
| 228 | * |
| 229 | * A storage key has the following format: |
| 230 | * | ACC |F|R|C|0| |
| 231 | * 0 3 4 5 6 7 |
| 232 | * ACC: access key |
| 233 | * F : fetch protection bit |
| 234 | * R : referenced bit |
| 235 | * C : changed bit |
| 236 | */ |
| 237 | |
| 238 | /* Hardware bits in the page table entry */ |
Christian Borntraeger | 6a985c6 | 2009-12-07 12:52:11 +0100 | [diff] [blame] | 239 | #define _PAGE_CO 0x100 /* HW Change-bit override */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 240 | #define _PAGE_RO 0x200 /* HW read-only bit */ |
| 241 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 242 | |
| 243 | /* Software bits in the page table entry */ |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 244 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ |
| 245 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 246 | #define _PAGE_SPECIAL 0x004 /* SW associated with special page */ |
| 247 | #define __HAVE_ARCH_PTE_SPECIAL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 249 | /* Set of bits not changed in pte_modify */ |
| 250 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL) |
| 251 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 252 | /* Six different types of pages. */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 253 | #define _PAGE_TYPE_EMPTY 0x400 |
| 254 | #define _PAGE_TYPE_NONE 0x401 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 255 | #define _PAGE_TYPE_SWAP 0x403 |
| 256 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 257 | #define _PAGE_TYPE_RO 0x200 |
| 258 | #define _PAGE_TYPE_RW 0x000 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 259 | #define _PAGE_TYPE_EX_RO 0x202 |
| 260 | #define _PAGE_TYPE_EX_RW 0x002 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 262 | /* |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 263 | * Only four types for huge pages, using the invalid bit and protection bit |
| 264 | * of a segment table entry. |
| 265 | */ |
| 266 | #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ |
| 267 | #define _HPAGE_TYPE_NONE 0x220 |
| 268 | #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ |
| 269 | #define _HPAGE_TYPE_RW 0x000 |
| 270 | |
| 271 | /* |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 272 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, |
| 273 | * pte_none and pte_file to find out the pte type WITHOUT holding the page |
| 274 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to |
| 275 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs |
| 276 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. |
| 277 | * This change is done while holding the lock, but the intermediate step |
| 278 | * of a previously valid pte with the hw invalid bit set can be observed by |
| 279 | * handle_pte_fault. That makes it necessary that all valid pte types with |
| 280 | * the hw invalid bit set must be distinguishable from the four pte types |
| 281 | * empty, none, swap and file. |
| 282 | * |
| 283 | * irxt ipte irxt |
| 284 | * _PAGE_TYPE_EMPTY 1000 -> 1000 |
| 285 | * _PAGE_TYPE_NONE 1001 -> 1001 |
| 286 | * _PAGE_TYPE_SWAP 1011 -> 1011 |
| 287 | * _PAGE_TYPE_FILE 11?1 -> 11?1 |
| 288 | * _PAGE_TYPE_RO 0100 -> 1100 |
| 289 | * _PAGE_TYPE_RW 0000 -> 1000 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 290 | * _PAGE_TYPE_EX_RO 0110 -> 1110 |
| 291 | * _PAGE_TYPE_EX_RW 0010 -> 1010 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 292 | * |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 293 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 294 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
| 295 | * pte_file is true for bits combinations 1101, 1111 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 296 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 297 | */ |
| 298 | |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 299 | /* Page status table bits for virtualization */ |
| 300 | #define RCP_PCL_BIT 55 |
| 301 | #define RCP_HR_BIT 54 |
| 302 | #define RCP_HC_BIT 53 |
| 303 | #define RCP_GR_BIT 50 |
| 304 | #define RCP_GC_BIT 49 |
| 305 | |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 306 | /* User dirty bit for KVM's migration feature */ |
| 307 | #define KVM_UD_BIT 47 |
| 308 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | #ifndef __s390x__ |
| 310 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 311 | /* Bits in the segment table address-space-control-element */ |
| 312 | #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ |
| 313 | #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */ |
| 314 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 315 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 316 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ |
| 317 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | /* Bits in the segment table entry */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 319 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 320 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 321 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
| 322 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ |
| 323 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ |
| 324 | |
| 325 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
| 326 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) |
| 327 | |
| 328 | #else /* __s390x__ */ |
| 329 | |
| 330 | /* Bits in the segment/region table address-space-control-element */ |
| 331 | #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ |
| 332 | #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */ |
| 333 | #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */ |
| 334 | #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */ |
| 335 | #define _ASCE_REAL_SPACE 0x20 /* real space control */ |
| 336 | #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */ |
| 337 | #define _ASCE_TYPE_REGION1 0x0c /* region first table type */ |
| 338 | #define _ASCE_TYPE_REGION2 0x08 /* region second table type */ |
| 339 | #define _ASCE_TYPE_REGION3 0x04 /* region third table type */ |
| 340 | #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */ |
| 341 | #define _ASCE_TABLE_LENGTH 0x03 /* region table length */ |
| 342 | |
| 343 | /* Bits in the region table entry */ |
| 344 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ |
| 345 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ |
| 346 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ |
| 347 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
| 348 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ |
| 349 | #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */ |
| 350 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ |
| 351 | |
| 352 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) |
| 353 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) |
| 354 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
| 355 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) |
| 356 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) |
| 357 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) |
| 358 | |
| 359 | /* Bits in the segment table entry */ |
| 360 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
| 361 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
| 362 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
| 363 | |
| 364 | #define _SEGMENT_ENTRY (0) |
| 365 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) |
| 366 | |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 367 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ |
| 368 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ |
| 369 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 370 | #endif /* __s390x__ */ |
| 371 | |
| 372 | /* |
| 373 | * A user page table pointer has the space-switch-event bit, the |
| 374 | * private-space-control bit and the storage-alteration-event-control |
| 375 | * bit set. A kernel page table pointer doesn't need them. |
| 376 | */ |
| 377 | #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ |
| 378 | _ASCE_ALT_EVENT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | |
| 380 | /* Bits int the storage key */ |
| 381 | #define _PAGE_CHANGED 0x02 /* HW changed bit */ |
| 382 | #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ |
| 383 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | /* |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 385 | * Page protection definitions. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 387 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
| 388 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) |
| 389 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 390 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) |
| 391 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 392 | |
| 393 | #define PAGE_KERNEL PAGE_RW |
| 394 | #define PAGE_COPY PAGE_RO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | |
| 396 | /* |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 397 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. |
| 398 | * Write permission always implies read permission. In theory with a |
| 399 | * primary/secondary page table execute only can be implemented but |
| 400 | * it would cost an additional bit in the pte to distinguish all the |
| 401 | * different pte types. To avoid that execute permission currently |
| 402 | * implies read permission as well. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | */ |
| 404 | /*xwr*/ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 405 | #define __P000 PAGE_NONE |
| 406 | #define __P001 PAGE_RO |
| 407 | #define __P010 PAGE_RO |
| 408 | #define __P011 PAGE_RO |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 409 | #define __P100 PAGE_EX_RO |
| 410 | #define __P101 PAGE_EX_RO |
| 411 | #define __P110 PAGE_EX_RO |
| 412 | #define __P111 PAGE_EX_RO |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 414 | #define __S000 PAGE_NONE |
| 415 | #define __S001 PAGE_RO |
| 416 | #define __S010 PAGE_RW |
| 417 | #define __S011 PAGE_RW |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 418 | #define __S100 PAGE_EX_RO |
| 419 | #define __S101 PAGE_EX_RO |
| 420 | #define __S110 PAGE_EX_RW |
| 421 | #define __S111 PAGE_EX_RW |
| 422 | |
| 423 | #ifndef __s390x__ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 424 | # define PxD_SHADOW_SHIFT 1 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 425 | #else /* __s390x__ */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 426 | # define PxD_SHADOW_SHIFT 2 |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 427 | #endif /* __s390x__ */ |
| 428 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 429 | static inline void *get_shadow_table(void *table) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 430 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 431 | unsigned long addr, offset; |
| 432 | struct page *page; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 433 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 434 | addr = (unsigned long) table; |
| 435 | offset = addr & ((PAGE_SIZE << PxD_SHADOW_SHIFT) - 1); |
| 436 | page = virt_to_page((void *)(addr ^ offset)); |
| 437 | return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 438 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
| 440 | /* |
| 441 | * Certain architectures need to do special things when PTEs |
| 442 | * within a page table are directly modified. Thus, the following |
| 443 | * hook is made available. |
| 444 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 445 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 446 | pte_t *ptep, pte_t entry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 448 | *ptep = entry; |
| 449 | if (mm->context.noexec) { |
| 450 | if (!(pte_val(entry) & _PAGE_INVALID) && |
| 451 | (pte_val(entry) & _PAGE_SWX)) |
| 452 | pte_val(entry) |= _PAGE_RO; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 453 | else |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 454 | pte_val(entry) = _PAGE_TYPE_EMPTY; |
| 455 | ptep[PTRS_PER_PTE] = entry; |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 456 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | |
| 459 | /* |
| 460 | * pgd/pmd/pte query functions |
| 461 | */ |
| 462 | #ifndef __s390x__ |
| 463 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 464 | static inline int pgd_present(pgd_t pgd) { return 1; } |
| 465 | static inline int pgd_none(pgd_t pgd) { return 0; } |
| 466 | static inline int pgd_bad(pgd_t pgd) { return 0; } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 468 | static inline int pud_present(pud_t pud) { return 1; } |
| 469 | static inline int pud_none(pud_t pud) { return 0; } |
| 470 | static inline int pud_bad(pud_t pud) { return 0; } |
| 471 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | #else /* __s390x__ */ |
| 473 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 474 | static inline int pgd_present(pgd_t pgd) |
| 475 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 476 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 477 | return 1; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 478 | return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL; |
| 479 | } |
| 480 | |
| 481 | static inline int pgd_none(pgd_t pgd) |
| 482 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 483 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
| 484 | return 0; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 485 | return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; |
| 486 | } |
| 487 | |
| 488 | static inline int pgd_bad(pgd_t pgd) |
| 489 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 490 | /* |
| 491 | * With dynamic page table levels the pgd can be a region table |
| 492 | * entry or a segment table entry. Check for the bit that are |
| 493 | * invalid for either table entry. |
| 494 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 495 | unsigned long mask = |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 496 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 497 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 498 | return (pgd_val(pgd) & mask) != 0; |
| 499 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 500 | |
| 501 | static inline int pud_present(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 502 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 503 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 504 | return 1; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 505 | return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | } |
| 507 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 508 | static inline int pud_none(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 510 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
| 511 | return 0; |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 512 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | } |
| 514 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 515 | static inline int pud_bad(pud_t pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 517 | /* |
| 518 | * With dynamic page table levels the pud can be a region table |
| 519 | * entry or a segment table entry. Check for the bit that are |
| 520 | * invalid for either table entry. |
| 521 | */ |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 522 | unsigned long mask = |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 523 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 524 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
| 525 | return (pud_val(pud) & mask) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | } |
| 527 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 528 | #endif /* __s390x__ */ |
| 529 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 530 | static inline int pmd_present(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | { |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 532 | return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | } |
| 534 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 535 | static inline int pmd_none(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | { |
Martin Schwidefsky | 0d01792 | 2007-12-17 16:25:48 +0100 | [diff] [blame] | 537 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | } |
| 539 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 540 | static inline int pmd_bad(pmd_t pmd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 542 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; |
| 543 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | } |
| 545 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 546 | static inline int pte_none(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 548 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | } |
| 550 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 551 | static inline int pte_present(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 553 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; |
| 554 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || |
| 555 | (!(pte_val(pte) & _PAGE_INVALID) && |
| 556 | !(pte_val(pte) & _PAGE_SWT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 | } |
| 558 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 559 | static inline int pte_file(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | { |
Martin Schwidefsky | 8337748 | 2006-10-18 18:30:51 +0200 | [diff] [blame] | 561 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; |
| 562 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | } |
| 564 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 565 | static inline int pte_special(pte_t pte) |
| 566 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 567 | return (pte_val(pte) & _PAGE_SPECIAL); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 568 | } |
| 569 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 570 | #define __HAVE_ARCH_PTE_SAME |
| 571 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 573 | static inline void rcp_lock(pte_t *ptep) |
| 574 | { |
| 575 | #ifdef CONFIG_PGSTE |
| 576 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 577 | preempt_disable(); |
| 578 | while (test_and_set_bit(RCP_PCL_BIT, pgste)) |
| 579 | ; |
| 580 | #endif |
| 581 | } |
| 582 | |
| 583 | static inline void rcp_unlock(pte_t *ptep) |
| 584 | { |
| 585 | #ifdef CONFIG_PGSTE |
| 586 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 587 | clear_bit(RCP_PCL_BIT, pgste); |
| 588 | preempt_enable(); |
| 589 | #endif |
| 590 | } |
| 591 | |
| 592 | /* forward declaration for SetPageUptodate in page-flags.h*/ |
Martin Schwidefsky | e2b8d7a | 2010-10-25 16:10:14 +0200 | [diff] [blame] | 593 | static inline void page_clear_dirty(struct page *page, int mapped); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 594 | #include <linux/page-flags.h> |
| 595 | |
| 596 | static inline void ptep_rcp_copy(pte_t *ptep) |
| 597 | { |
| 598 | #ifdef CONFIG_PGSTE |
| 599 | struct page *page = virt_to_page(pte_val(*ptep)); |
| 600 | unsigned int skey; |
| 601 | unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 602 | |
| 603 | skey = page_get_storage_key(page_to_phys(page)); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 604 | if (skey & _PAGE_CHANGED) { |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 605 | set_bit_simple(RCP_GC_BIT, pgste); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 606 | set_bit_simple(KVM_UD_BIT, pgste); |
| 607 | } |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 608 | if (skey & _PAGE_REFERENCED) |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 609 | set_bit_simple(RCP_GR_BIT, pgste); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 610 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 611 | SetPageDirty(page); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 612 | set_bit_simple(KVM_UD_BIT, pgste); |
| 613 | } |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 614 | if (test_and_clear_bit_simple(RCP_HR_BIT, pgste)) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 615 | SetPageReferenced(page); |
| 616 | #endif |
| 617 | } |
| 618 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | /* |
| 620 | * query functions pte_write/pte_dirty/pte_young only work if |
| 621 | * pte_present() is true. Undefined behaviour if not.. |
| 622 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 623 | static inline int pte_write(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | { |
| 625 | return (pte_val(pte) & _PAGE_RO) == 0; |
| 626 | } |
| 627 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 628 | static inline int pte_dirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | { |
| 630 | /* A pte is neither clean nor dirty on s/390. The dirty bit |
| 631 | * is in the storage key. See page_test_and_clear_dirty for |
| 632 | * details. |
| 633 | */ |
| 634 | return 0; |
| 635 | } |
| 636 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 637 | static inline int pte_young(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 638 | { |
| 639 | /* A pte is neither young nor old on s/390. The young bit |
| 640 | * is in the storage key. See page_test_and_clear_young for |
| 641 | * details. |
| 642 | */ |
| 643 | return 0; |
| 644 | } |
| 645 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | /* |
| 647 | * pgd/pmd/pte modification functions |
| 648 | */ |
| 649 | |
| 650 | #ifndef __s390x__ |
| 651 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 652 | #define pgd_clear(pgd) do { } while (0) |
| 653 | #define pud_clear(pud) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | #else /* __s390x__ */ |
| 656 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 657 | static inline void pgd_clear_kernel(pgd_t * pgd) |
| 658 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 659 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 660 | pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 661 | } |
| 662 | |
| 663 | static inline void pgd_clear(pgd_t * pgd) |
| 664 | { |
| 665 | pgd_t *shadow = get_shadow_table(pgd); |
| 666 | |
| 667 | pgd_clear_kernel(pgd); |
| 668 | if (shadow) |
| 669 | pgd_clear_kernel(shadow); |
| 670 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 671 | |
| 672 | static inline void pud_clear_kernel(pud_t *pud) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 674 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 675 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | } |
| 677 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 678 | static inline void pud_clear(pud_t *pud) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 679 | { |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 680 | pud_t *shadow = get_shadow_table(pud); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 681 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 682 | pud_clear_kernel(pud); |
| 683 | if (shadow) |
| 684 | pud_clear_kernel(shadow); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 685 | } |
| 686 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 687 | #endif /* __s390x__ */ |
| 688 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 689 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 691 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | } |
| 693 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 694 | static inline void pmd_clear(pmd_t *pmd) |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 695 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 696 | pmd_t *shadow = get_shadow_table(pmd); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 697 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 698 | pmd_clear_kernel(pmd); |
| 699 | if (shadow) |
| 700 | pmd_clear_kernel(shadow); |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 701 | } |
| 702 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 703 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 705 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 706 | if (mm->context.noexec) |
| 707 | pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | } |
| 709 | |
| 710 | /* |
| 711 | * The following pte modification functions only work if |
| 712 | * pte_present() is true. Undefined behaviour if not.. |
| 713 | */ |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 714 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | { |
Nick Piggin | 138c902 | 2008-07-08 11:31:06 +0200 | [diff] [blame] | 716 | pte_val(pte) &= _PAGE_CHG_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | pte_val(pte) |= pgprot_val(newprot); |
| 718 | return pte; |
| 719 | } |
| 720 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 721 | static inline pte_t pte_wrprotect(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | { |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 723 | /* Do not clobber _PAGE_TYPE_NONE pages! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | if (!(pte_val(pte) & _PAGE_INVALID)) |
| 725 | pte_val(pte) |= _PAGE_RO; |
| 726 | return pte; |
| 727 | } |
| 728 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 729 | static inline pte_t pte_mkwrite(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | { |
| 731 | pte_val(pte) &= ~_PAGE_RO; |
| 732 | return pte; |
| 733 | } |
| 734 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 735 | static inline pte_t pte_mkclean(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | { |
| 737 | /* The only user of pte_mkclean is the fork() code. |
| 738 | We must *not* clear the *physical* page dirty bit |
| 739 | just because fork() wants to clear the dirty bit in |
| 740 | *one* of the page's mappings. So we just do nothing. */ |
| 741 | return pte; |
| 742 | } |
| 743 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 744 | static inline pte_t pte_mkdirty(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 745 | { |
| 746 | /* We do not explicitly set the dirty bit because the |
| 747 | * sske instruction is slow. It is faster to let the |
| 748 | * next instruction set the dirty bit. |
| 749 | */ |
| 750 | return pte; |
| 751 | } |
| 752 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 753 | static inline pte_t pte_mkold(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | { |
| 755 | /* S/390 doesn't keep its dirty/referenced bit in the pte. |
| 756 | * There is no point in clearing the real referenced bit. |
| 757 | */ |
| 758 | return pte; |
| 759 | } |
| 760 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 761 | static inline pte_t pte_mkyoung(pte_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | { |
| 763 | /* S/390 doesn't keep its dirty/referenced bit in the pte. |
| 764 | * There is no point in setting the real referenced bit. |
| 765 | */ |
| 766 | return pte; |
| 767 | } |
| 768 | |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 769 | static inline pte_t pte_mkspecial(pte_t pte) |
| 770 | { |
Nick Piggin | a08cb62 | 2008-04-28 02:13:03 -0700 | [diff] [blame] | 771 | pte_val(pte) |= _PAGE_SPECIAL; |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 772 | return pte; |
| 773 | } |
| 774 | |
Heiko Carstens | 84afdce | 2010-10-25 16:10:36 +0200 | [diff] [blame] | 775 | #ifdef CONFIG_HUGETLB_PAGE |
| 776 | static inline pte_t pte_mkhuge(pte_t pte) |
| 777 | { |
| 778 | /* |
| 779 | * PROT_NONE needs to be remapped from the pte type to the ste type. |
| 780 | * The HW invalid bit is also different for pte and ste. The pte |
| 781 | * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE |
| 782 | * bit, so we don't have to clear it. |
| 783 | */ |
| 784 | if (pte_val(pte) & _PAGE_INVALID) { |
| 785 | if (pte_val(pte) & _PAGE_SWT) |
| 786 | pte_val(pte) |= _HPAGE_TYPE_NONE; |
| 787 | pte_val(pte) |= _SEGMENT_ENTRY_INV; |
| 788 | } |
| 789 | /* |
| 790 | * Clear SW pte bits SWT and SWX, there are no SW bits in a segment |
| 791 | * table entry. |
| 792 | */ |
| 793 | pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX); |
| 794 | /* |
| 795 | * Also set the change-override bit because we don't need dirty bit |
| 796 | * tracking for hugetlbfs pages. |
| 797 | */ |
| 798 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); |
| 799 | return pte; |
| 800 | } |
| 801 | #endif |
| 802 | |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 803 | #ifdef CONFIG_PGSTE |
| 804 | /* |
| 805 | * Get (and clear) the user dirty bit for a PTE. |
| 806 | */ |
| 807 | static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, |
| 808 | pte_t *ptep) |
| 809 | { |
| 810 | int dirty; |
| 811 | unsigned long *pgste; |
| 812 | struct page *page; |
| 813 | unsigned int skey; |
| 814 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 815 | if (!mm->context.has_pgste) |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 816 | return -EINVAL; |
| 817 | rcp_lock(ptep); |
| 818 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 819 | page = virt_to_page(pte_val(*ptep)); |
| 820 | skey = page_get_storage_key(page_to_phys(page)); |
| 821 | if (skey & _PAGE_CHANGED) { |
| 822 | set_bit_simple(RCP_GC_BIT, pgste); |
| 823 | set_bit_simple(KVM_UD_BIT, pgste); |
| 824 | } |
| 825 | if (test_and_clear_bit_simple(RCP_HC_BIT, pgste)) { |
| 826 | SetPageDirty(page); |
| 827 | set_bit_simple(KVM_UD_BIT, pgste); |
| 828 | } |
| 829 | dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); |
| 830 | if (skey & _PAGE_CHANGED) |
Martin Schwidefsky | e2b8d7a | 2010-10-25 16:10:14 +0200 | [diff] [blame] | 831 | page_clear_dirty(page, 1); |
Florian Funke | 15e86b0 | 2008-10-10 21:33:26 +0200 | [diff] [blame] | 832 | rcp_unlock(ptep); |
| 833 | return dirty; |
| 834 | } |
| 835 | #endif |
| 836 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 837 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 838 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 839 | unsigned long addr, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 841 | #ifdef CONFIG_PGSTE |
| 842 | unsigned long physpage; |
| 843 | int young; |
| 844 | unsigned long *pgste; |
| 845 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 846 | if (!vma->vm_mm->context.has_pgste) |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 847 | return 0; |
| 848 | physpage = pte_val(*ptep) & PAGE_MASK; |
| 849 | pgste = (unsigned long *) (ptep + PTRS_PER_PTE); |
| 850 | |
| 851 | young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); |
| 852 | rcp_lock(ptep); |
| 853 | if (young) |
Heiko Carstens | c71799c | 2008-04-04 16:03:34 +0200 | [diff] [blame] | 854 | set_bit_simple(RCP_GR_BIT, pgste); |
| 855 | young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 856 | rcp_unlock(ptep); |
| 857 | return young; |
| 858 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 859 | return 0; |
| 860 | } |
| 861 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 862 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 863 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 864 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 866 | /* No need to flush TLB |
| 867 | * On s390 reference bits are in storage key and never in TLB |
| 868 | * With virtualization we handle the reference bit, without we |
| 869 | * we can simply return */ |
| 870 | #ifdef CONFIG_PGSTE |
| 871 | return ptep_test_and_clear_young(vma, address, ptep); |
| 872 | #endif |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 873 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | } |
| 875 | |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 876 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) |
| 877 | { |
| 878 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
| 879 | #ifndef __s390x__ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 880 | /* pto must point to the start of the segment table */ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 881 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); |
| 882 | #else |
| 883 | /* ipte in zarch mode can do the math */ |
| 884 | pte_t *pto = ptep; |
| 885 | #endif |
Martin Schwidefsky | 94c12cc | 2006-09-28 16:56:43 +0200 | [diff] [blame] | 886 | asm volatile( |
| 887 | " ipte %2,%3" |
| 888 | : "=m" (*ptep) : "m" (*ptep), |
| 889 | "a" (pto), "a" (address)); |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 890 | } |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 891 | } |
| 892 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 893 | static inline void ptep_invalidate(struct mm_struct *mm, |
| 894 | unsigned long address, pte_t *ptep) |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 895 | { |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 896 | if (mm->context.has_pgste) { |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 897 | rcp_lock(ptep); |
| 898 | __ptep_ipte(address, ptep); |
| 899 | ptep_rcp_copy(ptep); |
| 900 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 901 | rcp_unlock(ptep); |
| 902 | return; |
| 903 | } |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 904 | __ptep_ipte(address, ptep); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 905 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
| 906 | if (mm->context.noexec) { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 907 | __ptep_ipte(address, ptep + PTRS_PER_PTE); |
Christian Borntraeger | 5b7baf0 | 2008-03-25 18:47:12 +0100 | [diff] [blame] | 908 | pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY; |
| 909 | } |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 910 | } |
| 911 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 912 | /* |
| 913 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
| 914 | * both clear the TLB for the unmapped pte. The reason is that |
| 915 | * ptep_get_and_clear is used in common code (e.g. change_pte_range) |
| 916 | * to modify an active pte. The sequence is |
| 917 | * 1) ptep_get_and_clear |
| 918 | * 2) set_pte_at |
| 919 | * 3) flush_tlb_range |
| 920 | * On s390 the tlb needs to get flushed with the modification of the pte |
| 921 | * if the pte is active. The only way how this can be implemented is to |
| 922 | * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range |
| 923 | * is a nop. |
| 924 | */ |
| 925 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 926 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
| 927 | ({ \ |
| 928 | pte_t __pte = *(__ptep); \ |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 929 | (__mm)->context.flush_mm = 1; \ |
| 930 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 931 | (__mm) != current->active_mm) \ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 932 | ptep_invalidate(__mm, __address, __ptep); \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 933 | else \ |
| 934 | pte_clear((__mm), (__address), (__ptep)); \ |
| 935 | __pte; \ |
| 936 | }) |
| 937 | |
| 938 | #define __HAVE_ARCH_PTEP_CLEAR_FLUSH |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 939 | static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, |
| 940 | unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | { |
| 942 | pte_t pte = *ptep; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 943 | ptep_invalidate(vma->vm_mm, address, ptep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 944 | return pte; |
| 945 | } |
| 946 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 947 | /* |
| 948 | * The batched pte unmap code uses ptep_get_and_clear_full to clear the |
| 949 | * ptes. Here an optimization is possible. tlb_gather_mmu flushes all |
| 950 | * tlbs of an mm if it can guarantee that the ptes of the mm_struct |
| 951 | * cannot be accessed while the batched unmap is running. In this case |
| 952 | * full==1 and a simple pte_clear is enough. See tlb.h. |
| 953 | */ |
| 954 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 955 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
| 956 | unsigned long addr, |
| 957 | pte_t *ptep, int full) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | { |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 959 | pte_t pte = *ptep; |
| 960 | |
| 961 | if (full) |
| 962 | pte_clear(mm, addr, ptep); |
| 963 | else |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 964 | ptep_invalidate(mm, addr, ptep); |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 965 | return pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 966 | } |
| 967 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 968 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 969 | #define ptep_set_wrprotect(__mm, __addr, __ptep) \ |
| 970 | ({ \ |
| 971 | pte_t __pte = *(__ptep); \ |
| 972 | if (pte_write(__pte)) { \ |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 973 | (__mm)->context.flush_mm = 1; \ |
| 974 | if (atomic_read(&(__mm)->context.attach_count) > 1 || \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 975 | (__mm) != current->active_mm) \ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 976 | ptep_invalidate(__mm, __addr, __ptep); \ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 977 | set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \ |
| 978 | } \ |
| 979 | }) |
| 980 | |
| 981 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 982 | #define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ |
| 983 | ({ \ |
| 984 | int __changed = !pte_same(*(__ptep), __entry); \ |
| 985 | if (__changed) { \ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 986 | ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ |
Martin Schwidefsky | f0e47c2 | 2007-07-17 04:03:03 -0700 | [diff] [blame] | 987 | set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ |
| 988 | } \ |
| 989 | __changed; \ |
Benjamin Herrenschmidt | 8dab524 | 2007-06-16 10:16:12 -0700 | [diff] [blame] | 990 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | |
| 992 | /* |
| 993 | * Test and clear dirty bit in storage key. |
| 994 | * We can't clear the changed bit atomically. This is a potential |
| 995 | * race against modification of the referenced bit. This function |
| 996 | * should therefore only be called if it is not mapped in any |
| 997 | * address space. |
| 998 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 999 | #define __HAVE_ARCH_PAGE_TEST_DIRTY |
Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 1000 | static inline int page_test_dirty(struct page *page) |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1001 | { |
Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 1002 | return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0; |
| 1003 | } |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1004 | |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1005 | #define __HAVE_ARCH_PAGE_CLEAR_DIRTY |
Martin Schwidefsky | e2b8d7a | 2010-10-25 16:10:14 +0200 | [diff] [blame] | 1006 | static inline void page_clear_dirty(struct page *page, int mapped) |
Martin Schwidefsky | 6c21048 | 2007-04-27 16:01:57 +0200 | [diff] [blame] | 1007 | { |
Martin Schwidefsky | e2b8d7a | 2010-10-25 16:10:14 +0200 | [diff] [blame] | 1008 | page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped); |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1009 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | |
| 1011 | /* |
| 1012 | * Test and clear referenced bit in storage key. |
| 1013 | */ |
Martin Schwidefsky | ba8a922 | 2007-10-22 12:52:44 +0200 | [diff] [blame] | 1014 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1015 | static inline int page_test_and_clear_young(struct page *page) |
| 1016 | { |
Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1017 | unsigned long physpage = page_to_phys(page); |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1018 | int ccode; |
| 1019 | |
Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1020 | asm volatile( |
| 1021 | " rrbe 0,%1\n" |
| 1022 | " ipm %0\n" |
| 1023 | " srl %0,28\n" |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1024 | : "=d" (ccode) : "a" (physpage) : "cc" ); |
| 1025 | return ccode & 2; |
| 1026 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | |
| 1028 | /* |
| 1029 | * Conversion functions: convert a page and protection to a page entry, |
| 1030 | * and a page entry and page directory to the page they refer to. |
| 1031 | */ |
| 1032 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) |
| 1033 | { |
| 1034 | pte_t __pte; |
| 1035 | pte_val(__pte) = physpage + pgprot_val(pgprot); |
| 1036 | return __pte; |
| 1037 | } |
| 1038 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1039 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 1040 | { |
Heiko Carstens | 0b2b6e1 | 2006-10-04 20:02:23 +0200 | [diff] [blame] | 1041 | unsigned long physpage = page_to_phys(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1042 | |
Heiko Carstens | 2dcea57 | 2006-09-29 01:58:41 -0700 | [diff] [blame] | 1043 | return mk_pte_phys(physpage, pgprot); |
| 1044 | } |
| 1045 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1047 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 1048 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 1049 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1051 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 1053 | |
| 1054 | #ifndef __s390x__ |
| 1055 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1056 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1057 | #define pud_deref(pmd) ({ BUG(); 0UL; }) |
| 1058 | #define pgd_deref(pmd) ({ BUG(); 0UL; }) |
| 1059 | |
| 1060 | #define pud_offset(pgd, address) ((pud_t *) pgd) |
| 1061 | #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1062 | |
| 1063 | #else /* __s390x__ */ |
| 1064 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1065 | #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) |
| 1066 | #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1067 | #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1068 | |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1069 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
| 1070 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1071 | pud_t *pud = (pud_t *) pgd; |
| 1072 | if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) |
| 1073 | pud = (pud_t *) pgd_deref(*pgd); |
Martin Schwidefsky | 5a216a2 | 2008-02-09 18:24:36 +0100 | [diff] [blame] | 1074 | return pud + pud_index(address); |
| 1075 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1076 | |
| 1077 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 1078 | { |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 1079 | pmd_t *pmd = (pmd_t *) pud; |
| 1080 | if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) |
| 1081 | pmd = (pmd_t *) pud_deref(*pud); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1082 | return pmd + pmd_index(address); |
| 1083 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | |
| 1085 | #endif /* __s390x__ */ |
| 1086 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 1087 | #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) |
| 1088 | #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) |
| 1089 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 1090 | |
| 1091 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
| 1092 | |
| 1093 | /* Find an entry in the lowest level page table.. */ |
| 1094 | #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) |
| 1095 | #define pte_offset_kernel(pmd, address) pte_offset(pmd,address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | #define pte_unmap(pte) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | |
| 1099 | /* |
| 1100 | * 31 bit swap entry format: |
| 1101 | * A page-table entry has some bits we have to treat in a special way. |
| 1102 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification |
| 1103 | * exception will occur instead of a page translation exception. The |
| 1104 | * specifiation exception has the bad habit not to store necessary |
| 1105 | * information in the lowcore. |
| 1106 | * Bit 21 and bit 22 are the page invalid bit and the page protection |
| 1107 | * bit. We set both to indicate a swapped page. |
| 1108 | * Bit 30 and 31 are used to distinguish the different page types. For |
| 1109 | * a swapped page these bits need to be zero. |
| 1110 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. |
| 1111 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 |
| 1112 | * plus 24 for the offset. |
| 1113 | * 0| offset |0110|o|type |00| |
| 1114 | * 0 0000000001111111111 2222 2 22222 33 |
| 1115 | * 0 1234567890123456789 0123 4 56789 01 |
| 1116 | * |
| 1117 | * 64 bit swap entry format: |
| 1118 | * A page-table entry has some bits we have to treat in a special way. |
| 1119 | * Bits 52 and bit 55 have to be zero, otherwise an specification |
| 1120 | * exception will occur instead of a page translation exception. The |
| 1121 | * specifiation exception has the bad habit not to store necessary |
| 1122 | * information in the lowcore. |
| 1123 | * Bit 53 and bit 54 are the page invalid bit and the page protection |
| 1124 | * bit. We set both to indicate a swapped page. |
| 1125 | * Bit 62 and 63 are used to distinguish the different page types. For |
| 1126 | * a swapped page these bits need to be zero. |
| 1127 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. |
| 1128 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 |
| 1129 | * plus 56 for the offset. |
| 1130 | * | offset |0110|o|type |00| |
| 1131 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 |
| 1132 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 |
| 1133 | */ |
| 1134 | #ifndef __s390x__ |
| 1135 | #define __SWP_OFFSET_MASK (~0UL >> 12) |
| 1136 | #else |
| 1137 | #define __SWP_OFFSET_MASK (~0UL >> 11) |
| 1138 | #endif |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 1139 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | { |
| 1141 | pte_t pte; |
| 1142 | offset &= __SWP_OFFSET_MASK; |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1143 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
| 1145 | return pte; |
| 1146 | } |
| 1147 | |
| 1148 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) |
| 1149 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) |
| 1150 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) |
| 1151 | |
| 1152 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 1153 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 1154 | |
| 1155 | #ifndef __s390x__ |
| 1156 | # define PTE_FILE_MAX_BITS 26 |
| 1157 | #else /* __s390x__ */ |
| 1158 | # define PTE_FILE_MAX_BITS 59 |
| 1159 | #endif /* __s390x__ */ |
| 1160 | |
| 1161 | #define pte_to_pgoff(__pte) \ |
| 1162 | ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) |
| 1163 | |
| 1164 | #define pgoff_to_pte(__off) \ |
| 1165 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ |
Gerald Schaefer | 9282ed9 | 2006-09-20 15:59:37 +0200 | [diff] [blame] | 1166 | | _PAGE_TYPE_FILE }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | |
| 1168 | #endif /* !__ASSEMBLY__ */ |
| 1169 | |
| 1170 | #define kern_addr_valid(addr) (1) |
| 1171 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 1172 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
| 1173 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 1174 | extern int s390_enable_sie(void); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1176 | /* |
| 1177 | * No page table caches to initialise |
| 1178 | */ |
| 1179 | #define pgtable_cache_init() do { } while (0) |
| 1180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | #include <asm-generic/pgtable.h> |
| 1182 | |
| 1183 | #endif /* _S390_PAGE_H */ |