blob: 4105b8221fddfd3c180caf78ef65eb9350c93193 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
20 * used).
21 *
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
24 * into the pgd entry)
25 *
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
28 */
29#ifndef __ASSEMBLY__
Heiko Carstens9789db02008-07-14 09:59:11 +020030#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070031#include <linux/mm_types.h>
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010032#include <linux/page-flags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020034#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37extern void paging_init(void);
Heiko Carstens2b67fc42007-02-05 21:16:47 +010038extern void vmem_map_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/*
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
43 */
Russell King4b3073e2009-12-18 16:40:18 +000044#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070045#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020048 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * for zero-mapped memory areas etc..
50 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020051
52extern unsigned long empty_zero_page;
53extern unsigned long zero_page_mask;
54
55#define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080058#define __HAVE_COLOR_ZERO_PAGE
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020059
Linus Torvalds4f2e2902013-04-17 08:46:19 -070060/* TODO: s390 cannot support io_remap_pfn_range... */
61#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
62 remap_pfn_range(vma, vaddr, pfn, size, prot)
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#endif /* !__ASSEMBLY__ */
65
66/*
67 * PMD_SHIFT determines the size of the area a second-level page
68 * table can map
69 * PGDIR_SHIFT determines what a third-level page table entry can map
70 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +020071#ifndef CONFIG_64BIT
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010072# define PMD_SHIFT 20
73# define PUD_SHIFT 20
74# define PGDIR_SHIFT 20
Heiko Carstensf4815ac2012-05-23 16:24:51 +020075#else /* CONFIG_64BIT */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010076# define PMD_SHIFT 20
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020077# define PUD_SHIFT 31
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010078# define PGDIR_SHIFT 42
Heiko Carstensf4815ac2012-05-23 16:24:51 +020079#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81#define PMD_SIZE (1UL << PMD_SHIFT)
82#define PMD_MASK (~(PMD_SIZE-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020083#define PUD_SIZE (1UL << PUD_SHIFT)
84#define PUD_MASK (~(PUD_SIZE-1))
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010085#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
86#define PGDIR_MASK (~(PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
88/*
89 * entries per page directory level: the S390 is two-level, so
90 * we don't really have any PMD directory physically.
91 * for S390 segment-table entries are combined to one PGD
92 * that leads to 1024 pte per pgd
93 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010094#define PTRS_PER_PTE 256
Heiko Carstensf4815ac2012-05-23 16:24:51 +020095#ifndef CONFIG_64BIT
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010096#define PTRS_PER_PMD 1
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010097#define PTRS_PER_PUD 1
Heiko Carstensf4815ac2012-05-23 16:24:51 +020098#else /* CONFIG_64BIT */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010099#define PTRS_PER_PMD 2048
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100100#define PTRS_PER_PUD 2048
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200101#endif /* CONFIG_64BIT */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100102#define PTRS_PER_PGD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
Hugh Dickinsd455a362005-04-19 13:29:23 -0700104#define FIRST_USER_ADDRESS 0
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#define pte_ERROR(e) \
107 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
108#define pmd_ERROR(e) \
109 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200110#define pud_ERROR(e) \
111 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#define pgd_ERROR(e) \
113 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
114
115#ifndef __ASSEMBLY__
116/*
Heiko Carstensc972cc62012-10-05 16:52:18 +0200117 * The vmalloc and module area will always be on the topmost area of the kernel
118 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
119 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
120 * modules will reside. That makes sure that inter module branches always
121 * happen without trampolines and in addition the placement within a 2GB frame
122 * is branch prediction unit friendly.
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100123 */
Heiko Carstens239a64252009-06-12 10:26:33 +0200124extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100125extern unsigned long VMALLOC_END;
126extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +0200127
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100128#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100129
Heiko Carstensc972cc62012-10-05 16:52:18 +0200130#ifdef CONFIG_64BIT
131extern unsigned long MODULES_VADDR;
132extern unsigned long MODULES_END;
133#define MODULES_VADDR MODULES_VADDR
134#define MODULES_END MODULES_END
135#define MODULES_LEN (1UL << 31)
136#endif
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138/*
139 * A 31 bit pagetable entry of S390 has following format:
140 * | PFRA | | OS |
141 * 0 0IP0
142 * 00000000001111111111222222222233
143 * 01234567890123456789012345678901
144 *
145 * I Page-Invalid Bit: Page is not available for address-translation
146 * P Page-Protection Bit: Store access not possible for page
147 *
148 * A 31 bit segmenttable entry of S390 has following format:
149 * | P-table origin | |PTL
150 * 0 IC
151 * 00000000001111111111222222222233
152 * 01234567890123456789012345678901
153 *
154 * I Segment-Invalid Bit: Segment is not available for address-translation
155 * C Common-Segment Bit: Segment is not private (PoP 3-30)
156 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
157 *
158 * The 31 bit segmenttable origin of S390 has following format:
159 *
160 * |S-table origin | | STL |
161 * X **GPS
162 * 00000000001111111111222222222233
163 * 01234567890123456789012345678901
164 *
165 * X Space-Switch event:
166 * G Segment-Invalid Bit: *
167 * P Private-Space Bit: Segment is not private (PoP 3-30)
168 * S Storage-Alteration:
169 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
170 *
171 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100172 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 * 0000000000111111111122222222223333333333444444444455555555556666
174 * 0123456789012345678901234567890123456789012345678901234567890123
175 *
176 * I Page-Invalid Bit: Page is not available for address-translation
177 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100178 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 *
180 * A 64 bit segmenttable entry of S390 has following format:
181 * | P-table origin | TT
182 * 0000000000111111111122222222223333333333444444444455555555556666
183 * 0123456789012345678901234567890123456789012345678901234567890123
184 *
185 * I Segment-Invalid Bit: Segment is not available for address-translation
186 * C Common-Segment Bit: Segment is not private (PoP 3-30)
187 * P Page-Protection Bit: Store access not possible for page
188 * TT Type 00
189 *
190 * A 64 bit region table entry of S390 has following format:
191 * | S-table origin | TF TTTL
192 * 0000000000111111111122222222223333333333444444444455555555556666
193 * 0123456789012345678901234567890123456789012345678901234567890123
194 *
195 * I Segment-Invalid Bit: Segment is not available for address-translation
196 * TT Type 01
197 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200198 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 *
200 * The 64 bit regiontable origin of S390 has following format:
201 * | region table origon | DTTL
202 * 0000000000111111111122222222223333333333444444444455555555556666
203 * 0123456789012345678901234567890123456789012345678901234567890123
204 *
205 * X Space-Switch event:
206 * G Segment-Invalid Bit:
207 * P Private-Space Bit:
208 * S Storage-Alteration:
209 * R Real space
210 * TL Table-Length:
211 *
212 * A storage key has the following format:
213 * | ACC |F|R|C|0|
214 * 0 3 4 5 6 7
215 * ACC: access key
216 * F : fetch protection bit
217 * R : referenced bit
218 * C : changed bit
219 */
220
221/* Hardware bits in the page table entry */
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100222#define _PAGE_CO 0x100 /* HW Change-bit override */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200223#define _PAGE_RO 0x200 /* HW read-only bit */
224#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200225
226/* Software bits in the page table entry */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200227#define _PAGE_SWT 0x001 /* SW pte type bit t */
228#define _PAGE_SWX 0x002 /* SW pte type bit x */
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100229#define _PAGE_SWC 0x004 /* SW pte changed bit */
230#define _PAGE_SWR 0x008 /* SW pte referenced bit */
231#define _PAGE_SWW 0x010 /* SW pte write bit */
232#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
Nick Piggina08cb622008-04-28 02:13:03 -0700233#define __HAVE_ARCH_PTE_SPECIAL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Nick Piggin138c9022008-07-08 11:31:06 +0200235/* Set of bits not changed in pte_modify */
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100236#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
237 _PAGE_SWC | _PAGE_SWR)
Nick Piggin138c9022008-07-08 11:31:06 +0200238
Martin Schwidefsky83377482006-10-18 18:30:51 +0200239/* Six different types of pages. */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200240#define _PAGE_TYPE_EMPTY 0x400
241#define _PAGE_TYPE_NONE 0x401
Martin Schwidefsky83377482006-10-18 18:30:51 +0200242#define _PAGE_TYPE_SWAP 0x403
243#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200244#define _PAGE_TYPE_RO 0x200
245#define _PAGE_TYPE_RW 0x000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Martin Schwidefsky83377482006-10-18 18:30:51 +0200247/*
Gerald Schaefer53492b12008-04-30 13:38:46 +0200248 * Only four types for huge pages, using the invalid bit and protection bit
249 * of a segment table entry.
250 */
251#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
252#define _HPAGE_TYPE_NONE 0x220
253#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
254#define _HPAGE_TYPE_RW 0x000
255
256/*
Martin Schwidefsky83377482006-10-18 18:30:51 +0200257 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
258 * pte_none and pte_file to find out the pte type WITHOUT holding the page
259 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
260 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
261 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
262 * This change is done while holding the lock, but the intermediate step
263 * of a previously valid pte with the hw invalid bit set can be observed by
264 * handle_pte_fault. That makes it necessary that all valid pte types with
265 * the hw invalid bit set must be distinguishable from the four pte types
266 * empty, none, swap and file.
267 *
268 * irxt ipte irxt
269 * _PAGE_TYPE_EMPTY 1000 -> 1000
270 * _PAGE_TYPE_NONE 1001 -> 1001
271 * _PAGE_TYPE_SWAP 1011 -> 1011
272 * _PAGE_TYPE_FILE 11?1 -> 11?1
273 * _PAGE_TYPE_RO 0100 -> 1100
274 * _PAGE_TYPE_RW 0000 -> 1000
275 *
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100276 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
Martin Schwidefsky83377482006-10-18 18:30:51 +0200277 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
278 * pte_file is true for bits combinations 1101, 1111
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100279 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200280 */
281
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200282#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200284/* Bits in the segment table address-space-control-element */
285#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
286#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
287#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
288#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
289#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
290
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291/* Bits in the segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200292#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200293#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200294#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
295#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
296#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
297
298#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
299#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
300
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200301/* Page status table bits for virtualization */
302#define RCP_ACC_BITS 0xf0000000UL
303#define RCP_FP_BIT 0x08000000UL
304#define RCP_PCL_BIT 0x00800000UL
305#define RCP_HR_BIT 0x00400000UL
306#define RCP_HC_BIT 0x00200000UL
307#define RCP_GR_BIT 0x00040000UL
308#define RCP_GC_BIT 0x00020000UL
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200309#define RCP_IN_BIT 0x00008000UL /* IPTE notify bit */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200310
311/* User dirty / referenced bit for KVM's migration feature */
312#define KVM_UR_BIT 0x00008000UL
313#define KVM_UC_BIT 0x00004000UL
314
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200315#else /* CONFIG_64BIT */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200316
317/* Bits in the segment/region table address-space-control-element */
318#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
319#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
320#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
321#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
322#define _ASCE_REAL_SPACE 0x20 /* real space control */
323#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
324#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
325#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
326#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
327#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
328#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
329
330/* Bits in the region table entry */
331#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100332#define _REGION_ENTRY_RO 0x200 /* region protection bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200333#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
334#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
335#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
336#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
337#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
338#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
339
340#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
341#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
342#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
343#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
344#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
345#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
346
Heiko Carstens18da2362012-10-08 09:18:26 +0200347#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
Heiko Carstens1819ed12013-02-16 11:47:27 +0100348#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
349#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
Heiko Carstens18da2362012-10-08 09:18:26 +0200350
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200351/* Bits in the segment table entry */
Heiko Carstensea815312013-03-21 12:50:39 +0100352#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200353#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
354#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
355#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
356
357#define _SEGMENT_ENTRY (0)
358#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
359
Gerald Schaefer53492b12008-04-30 13:38:46 +0200360#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
361#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
Gerald Schaefer75077af2012-10-08 16:30:15 -0700362#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
363#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
Gerald Schaefer53492b12008-04-30 13:38:46 +0200364
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700365/* Set of bits not changed in pmd_modify */
366#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
367 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
368
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200369/* Page status table bits for virtualization */
370#define RCP_ACC_BITS 0xf000000000000000UL
371#define RCP_FP_BIT 0x0800000000000000UL
372#define RCP_PCL_BIT 0x0080000000000000UL
373#define RCP_HR_BIT 0x0040000000000000UL
374#define RCP_HC_BIT 0x0020000000000000UL
375#define RCP_GR_BIT 0x0004000000000000UL
376#define RCP_GC_BIT 0x0002000000000000UL
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200377#define RCP_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200378
379/* User dirty / referenced bit for KVM's migration feature */
380#define KVM_UR_BIT 0x0000800000000000UL
381#define KVM_UC_BIT 0x0000400000000000UL
382
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200383#endif /* CONFIG_64BIT */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200384
385/*
386 * A user page table pointer has the space-switch-event bit, the
387 * private-space-control bit and the storage-alteration-event-control
388 * bit set. A kernel page table pointer doesn't need them.
389 */
390#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
391 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200394 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200396#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
397#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100398#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
399#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200400
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100401#define PAGE_KERNEL PAGE_RWC
Heiko Carstensbddb7ae2013-01-30 16:38:55 +0100402#define PAGE_SHARED PAGE_KERNEL
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200403#define PAGE_COPY PAGE_RO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200406 * On s390 the page table entry has an invalid bit and a read-only bit.
407 * Read permission implies execute permission and write permission
408 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 */
410 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200411#define __P000 PAGE_NONE
412#define __P001 PAGE_RO
413#define __P010 PAGE_RO
414#define __P011 PAGE_RO
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200415#define __P100 PAGE_RO
416#define __P101 PAGE_RO
417#define __P110 PAGE_RO
418#define __P111 PAGE_RO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200420#define __S000 PAGE_NONE
421#define __S001 PAGE_RO
422#define __S010 PAGE_RW
423#define __S011 PAGE_RW
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200424#define __S100 PAGE_RO
425#define __S101 PAGE_RO
426#define __S110 PAGE_RW
427#define __S111 PAGE_RW
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Gerald Schaefer106c9922013-04-29 15:07:23 -0700429/*
430 * Segment entry (large page) protection definitions.
431 */
432#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
433#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
434#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
435
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200436static inline int mm_exclusive(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200438 return likely(mm == current->active_mm &&
439 atomic_read(&mm->context.attach_count) <= 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200442static inline int mm_has_pgste(struct mm_struct *mm)
443{
444#ifdef CONFIG_PGSTE
445 if (unlikely(mm->context.has_pgste))
446 return 1;
447#endif
448 return 0;
449}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450/*
451 * pgd/pmd/pte query functions
452 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200453#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800455static inline int pgd_present(pgd_t pgd) { return 1; }
456static inline int pgd_none(pgd_t pgd) { return 0; }
457static inline int pgd_bad(pgd_t pgd) { return 0; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200459static inline int pud_present(pud_t pud) { return 1; }
460static inline int pud_none(pud_t pud) { return 0; }
Heiko Carstens18da2362012-10-08 09:18:26 +0200461static inline int pud_large(pud_t pud) { return 0; }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200462static inline int pud_bad(pud_t pud) { return 0; }
463
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200464#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100466static inline int pgd_present(pgd_t pgd)
467{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100468 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
469 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100470 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
471}
472
473static inline int pgd_none(pgd_t pgd)
474{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100475 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
476 return 0;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100477 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
478}
479
480static inline int pgd_bad(pgd_t pgd)
481{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100482 /*
483 * With dynamic page table levels the pgd can be a region table
484 * entry or a segment table entry. Check for the bit that are
485 * invalid for either table entry.
486 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100487 unsigned long mask =
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100488 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100489 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
490 return (pgd_val(pgd) & mask) != 0;
491}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200492
493static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100495 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
496 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100497 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498}
499
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200500static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100502 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
503 return 0;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100504 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505}
506
Heiko Carstens18da2362012-10-08 09:18:26 +0200507static inline int pud_large(pud_t pud)
508{
509 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
510 return 0;
511 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
512}
513
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200514static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100516 /*
517 * With dynamic page table levels the pud can be a region table
518 * entry or a segment table entry. Check for the bit that are
519 * invalid for either table entry.
520 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100521 unsigned long mask =
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100522 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100523 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
524 return (pud_val(pud) & mask) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525}
526
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200527#endif /* CONFIG_64BIT */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200528
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800529static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
Gerald Schaeferd8e7a332012-10-25 17:42:50 +0200531 unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
532 return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
533 !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534}
535
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800536static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
Gerald Schaeferd8e7a332012-10-25 17:42:50 +0200538 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
539 !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
541
Heiko Carstens378b1e72012-10-01 12:58:34 +0200542static inline int pmd_large(pmd_t pmd)
543{
544#ifdef CONFIG_64BIT
545 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
546#else
547 return 0;
548#endif
549}
550
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800551static inline int pmd_bad(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200553 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
554 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
Gerald Schaefer75077af2012-10-08 16:30:15 -0700557#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
558extern void pmdp_splitting_flush(struct vm_area_struct *vma,
559 unsigned long addr, pmd_t *pmdp);
560
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700561#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
562extern int pmdp_set_access_flags(struct vm_area_struct *vma,
563 unsigned long address, pmd_t *pmdp,
564 pmd_t entry, int dirty);
565
566#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
567extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
568 unsigned long address, pmd_t *pmdp);
569
570#define __HAVE_ARCH_PMD_WRITE
571static inline int pmd_write(pmd_t pmd)
572{
573 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
574}
575
576static inline int pmd_young(pmd_t pmd)
577{
578 return 0;
579}
580
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800581static inline int pte_none(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200583 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584}
585
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800586static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200588 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
589 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
590 (!(pte_val(pte) & _PAGE_INVALID) &&
591 !(pte_val(pte) & _PAGE_SWT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800594static inline int pte_file(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200596 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
597 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598}
599
Nick Piggin7e675132008-04-28 02:13:00 -0700600static inline int pte_special(pte_t pte)
601{
Nick Piggina08cb622008-04-28 02:13:03 -0700602 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700603}
604
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200605#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200606static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100607{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200608 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100609}
610
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200611static inline pgste_t pgste_get_lock(pte_t *ptep)
612{
613 unsigned long new = 0;
614#ifdef CONFIG_PGSTE
615 unsigned long old;
616
617 preempt_disable();
618 asm(
619 " lg %0,%2\n"
620 "0: lgr %1,%0\n"
621 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
622 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
623 " csg %0,%1,%2\n"
624 " jl 0b\n"
625 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
626 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
627#endif
628 return __pgste(new);
629}
630
631static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100632{
633#ifdef CONFIG_PGSTE
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200634 asm(
635 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
636 " stg %1,%0\n"
637 : "=Q" (ptep[PTRS_PER_PTE])
638 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100639 preempt_enable();
640#endif
641}
642
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200643static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100644{
645#ifdef CONFIG_PGSTE
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200646 unsigned long address, bits;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200647 unsigned char skey;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100648
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100649 if (!pte_present(*ptep))
650 return pgste;
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200651 address = pte_val(*ptep) & PAGE_MASK;
652 skey = page_get_storage_key(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200653 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
654 /* Clear page changed & referenced bit in the storage key */
Carsten Otte7c818782011-12-01 13:32:16 +0100655 if (bits & _PAGE_CHANGED)
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100656 page_set_storage_key(address, skey ^ bits, 0);
Carsten Otte7c818782011-12-01 13:32:16 +0100657 else if (bits)
658 page_reset_referenced(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200659 /* Transfer page changed & referenced bit to guest bits in pgste */
660 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
661 /* Get host changed & referenced bits from pgste */
662 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100663 /* Transfer page changed & referenced bit to kvm user bits */
664 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
665 /* Clear relevant host bits in pgste. */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200666 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
667 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
668 /* Copy page access key and fetch protection bit to pgste */
669 pgste_val(pgste) |=
670 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100671 /* Transfer referenced bit to pte */
672 pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100673#endif
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200674 return pgste;
675
676}
677
678static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
679{
680#ifdef CONFIG_PGSTE
681 int young;
682
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100683 if (!pte_present(*ptep))
684 return pgste;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100685 /* Get referenced bit from storage key */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200686 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100687 if (young)
688 pgste_val(pgste) |= RCP_GR_BIT;
689 /* Get host referenced bit from pgste */
690 if (pgste_val(pgste) & RCP_HR_BIT) {
691 pgste_val(pgste) &= ~RCP_HR_BIT;
692 young = 1;
693 }
694 /* Transfer referenced bit to kvm user bits and pte */
695 if (young) {
696 pgste_val(pgste) |= KVM_UR_BIT;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200697 pte_val(*ptep) |= _PAGE_SWR;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100698 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200699#endif
700 return pgste;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200701}
702
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100703static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200704{
705#ifdef CONFIG_PGSTE
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200706 unsigned long address;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200707 unsigned long okey, nkey;
708
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100709 if (!pte_present(entry))
710 return;
711 address = pte_val(entry) & PAGE_MASK;
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200712 okey = nkey = page_get_storage_key(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200713 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
714 /* Set page access key and fetch protection bit from pgste */
715 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
716 if (okey != nkey)
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100717 page_set_storage_key(address, nkey, 0);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200718#endif
719}
720
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100721static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
722{
723 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
724 /*
725 * Without enhanced suppression-on-protection force
726 * the dirty bit on for all writable ptes.
727 */
728 pte_val(entry) |= _PAGE_SWC;
729 pte_val(entry) &= ~_PAGE_RO;
730 }
731 *ptep = entry;
732}
733
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200734/**
735 * struct gmap_struct - guest address space
736 * @mm: pointer to the parent mm_struct
737 * @table: pointer to the page directory
Christian Borntraeger480e5922011-09-20 17:07:28 +0200738 * @asce: address space control element for gmap page table
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200739 * @crst_list: list of all crst tables used in the guest address space
740 */
741struct gmap {
742 struct list_head list;
743 struct mm_struct *mm;
744 unsigned long *table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200745 unsigned long asce;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200746 struct list_head crst_list;
747};
748
749/**
750 * struct gmap_rmap - reverse mapping for segment table entries
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200751 * @gmap: pointer to the gmap_struct
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200752 * @entry: pointer to a segment table entry
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200753 * @vmaddr: virtual address in the guest address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200754 */
755struct gmap_rmap {
756 struct list_head list;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200757 struct gmap *gmap;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200758 unsigned long *entry;
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200759 unsigned long vmaddr;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200760};
761
762/**
763 * struct gmap_pgtable - gmap information attached to a page table
764 * @vmaddr: address of the 1MB segment in the process virtual memory
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200765 * @mapper: list of segment table entries mapping a page table
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200766 */
767struct gmap_pgtable {
768 unsigned long vmaddr;
769 struct list_head mapper;
770};
771
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200772/**
773 * struct gmap_notifier - notify function block for page invalidation
774 * @notifier_call: address of callback function
775 */
776struct gmap_notifier {
777 struct list_head list;
778 void (*notifier_call)(struct gmap *gmap, unsigned long address);
779};
780
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200781struct gmap *gmap_alloc(struct mm_struct *mm);
782void gmap_free(struct gmap *gmap);
783void gmap_enable(struct gmap *gmap);
784void gmap_disable(struct gmap *gmap);
785int gmap_map_segment(struct gmap *gmap, unsigned long from,
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200786 unsigned long to, unsigned long len);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200787int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
Heiko Carstensc5034942012-09-10 16:14:33 +0200788unsigned long __gmap_translate(unsigned long address, struct gmap *);
789unsigned long gmap_translate(unsigned long address, struct gmap *);
Carsten Otte499069e2011-10-30 15:17:02 +0100790unsigned long __gmap_fault(unsigned long address, struct gmap *);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200791unsigned long gmap_fault(unsigned long address, struct gmap *);
Christian Borntraeger388186b2011-10-30 15:17:03 +0100792void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200793
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200794void gmap_register_ipte_notifier(struct gmap_notifier *);
795void gmap_unregister_ipte_notifier(struct gmap_notifier *);
796int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
797void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
798
799static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
800 unsigned long addr,
801 pte_t *ptep, pgste_t pgste)
802{
803#ifdef CONFIG_PGSTE
804 if (pgste_val(pgste) & RCP_IN_BIT) {
805 pgste_val(pgste) &= ~RCP_IN_BIT;
806 gmap_do_ipte_notify(mm, addr, ptep);
807 }
808#endif
809 return pgste;
810}
811
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200812/*
813 * Certain architectures need to do special things when PTEs
814 * within a page table are directly modified. Thus, the following
815 * hook is made available.
816 */
817static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
818 pte_t *ptep, pte_t entry)
819{
820 pgste_t pgste;
821
822 if (mm_has_pgste(mm)) {
823 pgste = pgste_get_lock(ptep);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100824 pgste_set_key(ptep, pgste, entry);
825 pgste_set_pte(ptep, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200826 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100827 } else {
828 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
829 pte_val(entry) |= _PAGE_CO;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200830 *ptep = entry;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100831 }
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100832}
833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834/*
835 * query functions pte_write/pte_dirty/pte_young only work if
836 * pte_present() is true. Undefined behaviour if not..
837 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800838static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839{
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100840 return (pte_val(pte) & _PAGE_SWW) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841}
842
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800843static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100845 return (pte_val(pte) & _PAGE_SWC) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846}
847
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800848static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200850#ifdef CONFIG_PGSTE
851 if (pte_val(pte) & _PAGE_SWR)
852 return 1;
853#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 return 0;
855}
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857/*
858 * pgd/pmd/pte modification functions
859 */
860
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200861static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100862{
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200863#ifdef CONFIG_64BIT
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100864 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
865 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200866#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867}
868
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100869static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100870{
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200871#ifdef CONFIG_64BIT
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200872 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
873 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
874#endif
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100875}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100876
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200877static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200879 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880}
881
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800882static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883{
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200884 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885}
886
887/*
888 * The following pte modification functions only work if
889 * pte_present() is true. Undefined behaviour if not..
890 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800891static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892{
Nick Piggin138c9022008-07-08 11:31:06 +0200893 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 pte_val(pte) |= pgprot_val(newprot);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100895 if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
896 pte_val(pte) &= ~_PAGE_RO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 return pte;
898}
899
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800900static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100902 pte_val(pte) &= ~_PAGE_SWW;
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200903 /* Do not clobber _PAGE_TYPE_NONE pages! */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (!(pte_val(pte) & _PAGE_INVALID))
905 pte_val(pte) |= _PAGE_RO;
906 return pte;
907}
908
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800909static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100911 pte_val(pte) |= _PAGE_SWW;
912 if (pte_val(pte) & _PAGE_SWC)
913 pte_val(pte) &= ~_PAGE_RO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 return pte;
915}
916
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800917static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200919 pte_val(pte) &= ~_PAGE_SWC;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100920 /* Do not clobber _PAGE_TYPE_NONE pages! */
921 if (!(pte_val(pte) & _PAGE_INVALID))
922 pte_val(pte) |= _PAGE_RO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 return pte;
924}
925
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800926static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100928 pte_val(pte) |= _PAGE_SWC;
929 if (pte_val(pte) & _PAGE_SWW)
930 pte_val(pte) &= ~_PAGE_RO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 return pte;
932}
933
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800934static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200936#ifdef CONFIG_PGSTE
937 pte_val(pte) &= ~_PAGE_SWR;
938#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 return pte;
940}
941
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800942static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 return pte;
945}
946
Nick Piggin7e675132008-04-28 02:13:00 -0700947static inline pte_t pte_mkspecial(pte_t pte)
948{
Nick Piggina08cb622008-04-28 02:13:03 -0700949 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700950 return pte;
951}
952
Heiko Carstens84afdce2010-10-25 16:10:36 +0200953#ifdef CONFIG_HUGETLB_PAGE
954static inline pte_t pte_mkhuge(pte_t pte)
955{
Heiko Carstens84afdce2010-10-25 16:10:36 +0200956 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
957 return pte;
958}
959#endif
960
Florian Funke15e86b02008-10-10 21:33:26 +0200961/*
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200962 * Get (and clear) the user dirty bit for a pte.
Florian Funke15e86b02008-10-10 21:33:26 +0200963 */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200964static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
965 pte_t *ptep)
Florian Funke15e86b02008-10-10 21:33:26 +0200966{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200967 pgste_t pgste;
968 int dirty = 0;
Florian Funke15e86b02008-10-10 21:33:26 +0200969
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200970 if (mm_has_pgste(mm)) {
971 pgste = pgste_get_lock(ptep);
972 pgste = pgste_update_all(ptep, pgste);
973 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
974 pgste_val(pgste) &= ~KVM_UC_BIT;
975 pgste_set_unlock(ptep, pgste);
976 return dirty;
Florian Funke15e86b02008-10-10 21:33:26 +0200977 }
Florian Funke15e86b02008-10-10 21:33:26 +0200978 return dirty;
979}
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200980
981/*
982 * Get (and clear) the user referenced bit for a pte.
983 */
984static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
985 pte_t *ptep)
986{
987 pgste_t pgste;
988 int young = 0;
989
990 if (mm_has_pgste(mm)) {
991 pgste = pgste_get_lock(ptep);
992 pgste = pgste_update_young(ptep, pgste);
993 young = !!(pgste_val(pgste) & KVM_UR_BIT);
994 pgste_val(pgste) &= ~KVM_UR_BIT;
995 pgste_set_unlock(ptep, pgste);
996 }
997 return young;
998}
Florian Funke15e86b02008-10-10 21:33:26 +0200999
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001000#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1001static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1002 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001004 pgste_t pgste;
1005 pte_t pte;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +01001006
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001007 if (mm_has_pgste(vma->vm_mm)) {
1008 pgste = pgste_get_lock(ptep);
1009 pgste = pgste_update_young(ptep, pgste);
1010 pte = *ptep;
1011 *ptep = pte_mkold(pte);
1012 pgste_set_unlock(ptep, pgste);
1013 return pte_young(pte);
1014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 return 0;
1016}
1017
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001018#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1019static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1020 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
Christian Borntraeger5b7baf02008-03-25 18:47:12 +01001022 /* No need to flush TLB
1023 * On s390 reference bits are in storage key and never in TLB
1024 * With virtualization we handle the reference bit, without we
1025 * we can simply return */
Christian Borntraeger5b7baf02008-03-25 18:47:12 +01001026 return ptep_test_and_clear_young(vma, address, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027}
1028
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001029static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1030{
1031 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001032#ifndef CONFIG_64BIT
Martin Schwidefsky146e4b32008-02-09 18:24:35 +01001033 /* pto must point to the start of the segment table */
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001034 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
1035#else
1036 /* ipte in zarch mode can do the math */
1037 pte_t *pto = ptep;
1038#endif
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +02001039 asm volatile(
1040 " ipte %2,%3"
1041 : "=m" (*ptep) : "m" (*ptep),
1042 "a" (pto), "a" (address));
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001043 }
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001044}
1045
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001046/*
1047 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1048 * both clear the TLB for the unmapped pte. The reason is that
1049 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1050 * to modify an active pte. The sequence is
1051 * 1) ptep_get_and_clear
1052 * 2) set_pte_at
1053 * 3) flush_tlb_range
1054 * On s390 the tlb needs to get flushed with the modification of the pte
1055 * if the pte is active. The only way how this can be implemented is to
1056 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1057 * is a nop.
1058 */
1059#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001060static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1061 unsigned long address, pte_t *ptep)
1062{
1063 pgste_t pgste;
1064 pte_t pte;
1065
1066 mm->context.flush_mm = 1;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001067 if (mm_has_pgste(mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001068 pgste = pgste_get_lock(ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001069 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1070 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001071
1072 pte = *ptep;
1073 if (!mm_exclusive(mm))
1074 __ptep_ipte(address, ptep);
1075 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1076
1077 if (mm_has_pgste(mm)) {
1078 pgste = pgste_update_all(&pte, pgste);
1079 pgste_set_unlock(ptep, pgste);
1080 }
1081 return pte;
1082}
1083
1084#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1085static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1086 unsigned long address,
1087 pte_t *ptep)
1088{
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001089 pgste_t pgste;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001090 pte_t pte;
1091
1092 mm->context.flush_mm = 1;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001093 if (mm_has_pgste(mm)) {
1094 pgste = pgste_get_lock(ptep);
1095 pgste_ipte_notify(mm, address, ptep, pgste);
1096 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001097
1098 pte = *ptep;
1099 if (!mm_exclusive(mm))
1100 __ptep_ipte(address, ptep);
1101 return pte;
1102}
1103
1104static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1105 unsigned long address,
1106 pte_t *ptep, pte_t pte)
1107{
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001108 if (mm_has_pgste(mm)) {
1109 pgste_set_pte(ptep, pte);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001110 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001111 } else
1112 *ptep = pte;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001113}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001114
1115#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -07001116static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1117 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001119 pgste_t pgste;
1120 pte_t pte;
1121
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001122 if (mm_has_pgste(vma->vm_mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001123 pgste = pgste_get_lock(ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001124 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1125 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001126
1127 pte = *ptep;
1128 __ptep_ipte(address, ptep);
1129 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1130
1131 if (mm_has_pgste(vma->vm_mm)) {
1132 pgste = pgste_update_all(&pte, pgste);
1133 pgste_set_unlock(ptep, pgste);
1134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 return pte;
1136}
1137
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001138/*
1139 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1140 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1141 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1142 * cannot be accessed while the batched unmap is running. In this case
1143 * full==1 and a simple pte_clear is enough. See tlb.h.
1144 */
1145#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1146static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001147 unsigned long address,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001148 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001150 pgste_t pgste;
1151 pte_t pte;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001152
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001153 if (mm_has_pgste(mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001154 pgste = pgste_get_lock(ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001155 if (!full)
1156 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1157 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001158
1159 pte = *ptep;
1160 if (!full)
1161 __ptep_ipte(address, ptep);
1162 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1163
1164 if (mm_has_pgste(mm)) {
1165 pgste = pgste_update_all(&pte, pgste);
1166 pgste_set_unlock(ptep, pgste);
1167 }
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001168 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169}
1170
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001171#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001172static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1173 unsigned long address, pte_t *ptep)
1174{
1175 pgste_t pgste;
1176 pte_t pte = *ptep;
1177
1178 if (pte_write(pte)) {
1179 mm->context.flush_mm = 1;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001180 if (mm_has_pgste(mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001181 pgste = pgste_get_lock(ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001182 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1183 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001184
1185 if (!mm_exclusive(mm))
1186 __ptep_ipte(address, ptep);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001187 pte = pte_wrprotect(pte);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001188
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001189 if (mm_has_pgste(mm)) {
1190 pgste_set_pte(ptep, pte);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001191 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001192 } else
1193 *ptep = pte;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001194 }
1195 return pte;
1196}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001197
1198#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001199static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1200 unsigned long address, pte_t *ptep,
1201 pte_t entry, int dirty)
1202{
1203 pgste_t pgste;
1204
1205 if (pte_same(*ptep, entry))
1206 return 0;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001207 if (mm_has_pgste(vma->vm_mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001208 pgste = pgste_get_lock(ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001209 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
1210 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001211
1212 __ptep_ipte(address, ptep);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001213
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001214 if (mm_has_pgste(vma->vm_mm)) {
1215 pgste_set_pte(ptep, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001216 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001217 } else
1218 *ptep = entry;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001219 return 1;
1220}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
1222/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 * Conversion functions: convert a page and protection to a page entry,
1224 * and a page entry and page directory to the page they refer to.
1225 */
1226static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1227{
1228 pte_t __pte;
1229 pte_val(__pte) = physpage + pgprot_val(pgprot);
1230 return __pte;
1231}
1232
Heiko Carstens2dcea572006-09-29 01:58:41 -07001233static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1234{
Heiko Carstens0b2b6e12006-10-04 20:02:23 +02001235 unsigned long physpage = page_to_phys(page);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001236 pte_t __pte = mk_pte_phys(physpage, pgprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001238 if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
1239 pte_val(__pte) |= _PAGE_SWC;
1240 pte_val(__pte) &= ~_PAGE_RO;
1241 }
1242 return __pte;
Heiko Carstens2dcea572006-09-29 01:58:41 -07001243}
1244
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001246#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1247#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1248#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001250#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1252
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001253#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001255#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1256#define pud_deref(pmd) ({ BUG(); 0UL; })
1257#define pgd_deref(pmd) ({ BUG(); 0UL; })
1258
1259#define pud_offset(pgd, address) ((pud_t *) pgd)
1260#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001262#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001264#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1265#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001266#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001267
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001268static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1269{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001270 pud_t *pud = (pud_t *) pgd;
1271 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1272 pud = (pud_t *) pgd_deref(*pgd);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001273 return pud + pud_index(address);
1274}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001275
1276static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1277{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001278 pmd_t *pmd = (pmd_t *) pud;
1279 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1280 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001281 return pmd + pmd_index(address);
1282}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001284#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001286#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1287#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1288#define pte_page(x) pfn_to_page(pte_pfn(x))
1289
1290#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1291
1292/* Find an entry in the lowest level page table.. */
1293#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1294#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001298static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1299{
1300 unsigned long sto = (unsigned long) pmdp -
1301 pmd_index(address) * sizeof(pmd_t);
1302
1303 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
1304 asm volatile(
1305 " .insn rrf,0xb98e0000,%2,%3,0,0"
1306 : "=m" (*pmdp)
1307 : "m" (*pmdp), "a" (sto),
1308 "a" ((address & HPAGE_MASK))
1309 : "cc"
1310 );
1311 }
1312}
1313
Gerald Schaefer106c9922013-04-29 15:07:23 -07001314#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001315static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1316{
Gerald Schaeferd8e7a332012-10-25 17:42:50 +02001317 /*
1318 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
1319 * Convert to segment table entry format.
1320 */
1321 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1322 return pgprot_val(SEGMENT_NONE);
1323 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1324 return pgprot_val(SEGMENT_RO);
1325 return pgprot_val(SEGMENT_RW);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001326}
1327
1328static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1329{
1330 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1331 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1332 return pmd;
1333}
1334
Gerald Schaefer106c9922013-04-29 15:07:23 -07001335static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001336{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001337 pmd_t __pmd;
1338 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1339 return __pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001340}
1341
1342static inline pmd_t pmd_mkwrite(pmd_t pmd)
1343{
Gerald Schaeferd8e7a332012-10-25 17:42:50 +02001344 /* Do not clobber _HPAGE_TYPE_NONE pages! */
1345 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
1346 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001347 return pmd;
1348}
Gerald Schaefer106c9922013-04-29 15:07:23 -07001349#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1350
1351#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1352
1353#define __HAVE_ARCH_PGTABLE_DEPOSIT
1354extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1355
1356#define __HAVE_ARCH_PGTABLE_WITHDRAW
1357extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1358
1359static inline int pmd_trans_splitting(pmd_t pmd)
1360{
1361 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1362}
1363
1364static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1365 pmd_t *pmdp, pmd_t entry)
1366{
1367 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
1368 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1369 *pmdp = entry;
1370}
1371
1372static inline pmd_t pmd_mkhuge(pmd_t pmd)
1373{
1374 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1375 return pmd;
1376}
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001377
1378static inline pmd_t pmd_wrprotect(pmd_t pmd)
1379{
1380 pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
1381 return pmd;
1382}
1383
1384static inline pmd_t pmd_mkdirty(pmd_t pmd)
1385{
1386 /* No dirty bit in the segment table entry. */
1387 return pmd;
1388}
1389
1390static inline pmd_t pmd_mkold(pmd_t pmd)
1391{
1392 /* No referenced bit in the segment table entry. */
1393 return pmd;
1394}
1395
1396static inline pmd_t pmd_mkyoung(pmd_t pmd)
1397{
1398 /* No referenced bit in the segment table entry. */
1399 return pmd;
1400}
1401
1402#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1403static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1404 unsigned long address, pmd_t *pmdp)
1405{
1406 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
1407 long tmp, rc;
1408 int counter;
1409
1410 rc = 0;
1411 if (MACHINE_HAS_RRBM) {
1412 counter = PTRS_PER_PTE >> 6;
1413 asm volatile(
1414 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1415 " ogr %1,%0\n"
1416 " la %3,0(%4,%3)\n"
1417 " brct %2,0b\n"
1418 : "=&d" (tmp), "+&d" (rc), "+d" (counter),
1419 "+a" (pmd_addr)
1420 : "a" (64 * 4096UL) : "cc");
1421 rc = !!rc;
1422 } else {
1423 counter = PTRS_PER_PTE;
1424 asm volatile(
1425 "0: rrbe 0,%2\n"
1426 " la %2,0(%3,%2)\n"
1427 " brc 12,1f\n"
1428 " lhi %0,1\n"
1429 "1: brct %1,0b\n"
1430 : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1431 : "a" (4096UL) : "cc");
1432 }
1433 return rc;
1434}
1435
1436#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1437static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1438 unsigned long address, pmd_t *pmdp)
1439{
1440 pmd_t pmd = *pmdp;
1441
1442 __pmd_idte(address, pmdp);
1443 pmd_clear(pmdp);
1444 return pmd;
1445}
1446
1447#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1448static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1449 unsigned long address, pmd_t *pmdp)
1450{
1451 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1452}
1453
1454#define __HAVE_ARCH_PMDP_INVALIDATE
1455static inline void pmdp_invalidate(struct vm_area_struct *vma,
1456 unsigned long address, pmd_t *pmdp)
1457{
1458 __pmd_idte(address, pmdp);
1459}
1460
Gerald Schaeferbe328652013-01-21 16:48:07 +01001461#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1462static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1463 unsigned long address, pmd_t *pmdp)
1464{
1465 pmd_t pmd = *pmdp;
1466
1467 if (pmd_write(pmd)) {
1468 __pmd_idte(address, pmdp);
1469 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1470 }
1471}
1472
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001473#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1474#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1475
1476static inline int pmd_trans_huge(pmd_t pmd)
1477{
1478 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1479}
1480
1481static inline int has_transparent_hugepage(void)
1482{
1483 return MACHINE_HAS_HPAGE ? 1 : 0;
1484}
1485
1486static inline unsigned long pmd_pfn(pmd_t pmd)
1487{
Gerald Schaefer171c4002013-01-09 18:49:51 +01001488 return pmd_val(pmd) >> PAGE_SHIFT;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001489}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001490#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492/*
1493 * 31 bit swap entry format:
1494 * A page-table entry has some bits we have to treat in a special way.
1495 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1496 * exception will occur instead of a page translation exception. The
1497 * specifiation exception has the bad habit not to store necessary
1498 * information in the lowcore.
1499 * Bit 21 and bit 22 are the page invalid bit and the page protection
1500 * bit. We set both to indicate a swapped page.
1501 * Bit 30 and 31 are used to distinguish the different page types. For
1502 * a swapped page these bits need to be zero.
1503 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1504 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1505 * plus 24 for the offset.
1506 * 0| offset |0110|o|type |00|
1507 * 0 0000000001111111111 2222 2 22222 33
1508 * 0 1234567890123456789 0123 4 56789 01
1509 *
1510 * 64 bit swap entry format:
1511 * A page-table entry has some bits we have to treat in a special way.
1512 * Bits 52 and bit 55 have to be zero, otherwise an specification
1513 * exception will occur instead of a page translation exception. The
1514 * specifiation exception has the bad habit not to store necessary
1515 * information in the lowcore.
1516 * Bit 53 and bit 54 are the page invalid bit and the page protection
1517 * bit. We set both to indicate a swapped page.
1518 * Bit 62 and 63 are used to distinguish the different page types. For
1519 * a swapped page these bits need to be zero.
1520 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1521 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1522 * plus 56 for the offset.
1523 * | offset |0110|o|type |00|
1524 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1525 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1526 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001527#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528#define __SWP_OFFSET_MASK (~0UL >> 12)
1529#else
1530#define __SWP_OFFSET_MASK (~0UL >> 11)
1531#endif
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001532static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
1534 pte_t pte;
1535 offset &= __SWP_OFFSET_MASK;
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001536 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1538 return pte;
1539}
1540
1541#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1542#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1543#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1544
1545#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1546#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1547
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001548#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549# define PTE_FILE_MAX_BITS 26
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001550#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551# define PTE_FILE_MAX_BITS 59
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001552#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554#define pte_to_pgoff(__pte) \
1555 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1556
1557#define pgoff_to_pte(__off) \
1558 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001559 | _PAGE_TYPE_FILE })
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
1561#endif /* !__ASSEMBLY__ */
1562
1563#define kern_addr_valid(addr) (1)
1564
Heiko Carstens17f34582008-04-30 13:38:47 +02001565extern int vmem_add_mapping(unsigned long start, unsigned long size);
1566extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001567extern int s390_enable_sie(void);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569/*
1570 * No page table caches to initialise
1571 */
Heiko Carstens765a0ca2013-03-23 10:29:01 +01001572static inline void pgtable_cache_init(void) { }
1573static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575#include <asm-generic/pgtable.h>
1576
1577#endif /* _S390_PAGE_H */