blob: c4773a2ef3d3ab2f3c672ed70dcc6c7117855c4f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/pgtable.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 *
10 * Derived from "include/asm-i386/pgtable.h"
11 */
12
13#ifndef _ASM_S390_PGTABLE_H
14#define _ASM_S390_PGTABLE_H
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016/*
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
22 * used).
23 *
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * into the pgd entry)
27 *
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
30 */
31#ifndef __ASSEMBLY__
Heiko Carstens9789db02008-07-14 09:59:11 +020032#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070033#include <linux/mm_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020035#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38extern void paging_init(void);
Heiko Carstens2b67fc42007-02-05 21:16:47 +010039extern void vmem_map_init(void);
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020040extern void fault_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/*
43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information.
45 */
Russell King4b3073e2009-12-18 16:40:18 +000046#define update_mmu_cache(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020049 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * for zero-mapped memory areas etc..
51 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020052
53extern unsigned long empty_zero_page;
54extern unsigned long zero_page_mask;
55
56#define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
59
60#define is_zero_pfn is_zero_pfn
61static inline int is_zero_pfn(unsigned long pfn)
62{
63 extern unsigned long zero_pfn;
64 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
65 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
66}
67
68#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#endif /* !__ASSEMBLY__ */
71
72/*
73 * PMD_SHIFT determines the size of the area a second-level page
74 * table can map
75 * PGDIR_SHIFT determines what a third-level page table entry can map
76 */
77#ifndef __s390x__
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010078# define PMD_SHIFT 20
79# define PUD_SHIFT 20
80# define PGDIR_SHIFT 20
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#else /* __s390x__ */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010082# define PMD_SHIFT 20
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020083# define PUD_SHIFT 31
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010084# define PGDIR_SHIFT 42
Linus Torvalds1da177e2005-04-16 15:20:36 -070085#endif /* __s390x__ */
86
87#define PMD_SIZE (1UL << PMD_SHIFT)
88#define PMD_MASK (~(PMD_SIZE-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020089#define PUD_SIZE (1UL << PUD_SHIFT)
90#define PUD_MASK (~(PUD_SIZE-1))
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010091#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
92#define PGDIR_MASK (~(PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/*
95 * entries per page directory level: the S390 is two-level, so
96 * we don't really have any PMD directory physically.
97 * for S390 segment-table entries are combined to one PGD
98 * that leads to 1024 pte per pgd
99 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100100#define PTRS_PER_PTE 256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#ifndef __s390x__
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100102#define PTRS_PER_PMD 1
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100103#define PTRS_PER_PUD 1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104#else /* __s390x__ */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100105#define PTRS_PER_PMD 2048
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100106#define PTRS_PER_PUD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#endif /* __s390x__ */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100108#define PTRS_PER_PGD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Hugh Dickinsd455a362005-04-19 13:29:23 -0700110#define FIRST_USER_ADDRESS 0
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#define pte_ERROR(e) \
113 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
114#define pmd_ERROR(e) \
115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200116#define pud_ERROR(e) \
117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#define pgd_ERROR(e) \
119 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
120
121#ifndef __ASSEMBLY__
122/*
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100123 * The vmalloc area will always be on the topmost area of the kernel
Martin Schwidefsky7d3f6612010-04-09 13:43:02 +0200124 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100125 * which should be enough for any sane case.
126 * By putting vmalloc at the top, we maximise the gap between physical
127 * memory and vmalloc to catch misplaced memory accesses. As a side
128 * effect, this also makes sure that 64 bit module code cannot be used
129 * as system call address.
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100130 */
Heiko Carstens239a64252009-06-12 10:26:33 +0200131
132extern unsigned long VMALLOC_START;
133
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100134#ifndef __s390x__
Heiko Carstens239a64252009-06-12 10:26:33 +0200135#define VMALLOC_SIZE (96UL << 20)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100136#define VMALLOC_END 0x7e000000UL
Heiko Carstens01891032008-02-05 16:50:49 +0100137#define VMEM_MAP_END 0x80000000UL
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100138#else /* __s390x__ */
Martin Schwidefsky7d3f6612010-04-09 13:43:02 +0200139#define VMALLOC_SIZE (128UL << 30)
140#define VMALLOC_END 0x3e000000000UL
Heiko Carstens01891032008-02-05 16:50:49 +0100141#define VMEM_MAP_END 0x40000000000UL
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100142#endif /* __s390x__ */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Heiko Carstens01891032008-02-05 16:50:49 +0100144/*
145 * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
146 * mapping. This needs to be calculated at compile time since the size of the
147 * VMEM_MAP is static but the size of struct page can change.
148 */
Martin Schwidefsky522d8dc2008-02-09 18:24:31 +0100149#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
150#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
151#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
Heiko Carstens17f34582008-04-30 13:38:47 +0200152#define vmemmap ((struct page *) VMALLOC_END)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154/*
155 * A 31 bit pagetable entry of S390 has following format:
156 * | PFRA | | OS |
157 * 0 0IP0
158 * 00000000001111111111222222222233
159 * 01234567890123456789012345678901
160 *
161 * I Page-Invalid Bit: Page is not available for address-translation
162 * P Page-Protection Bit: Store access not possible for page
163 *
164 * A 31 bit segmenttable entry of S390 has following format:
165 * | P-table origin | |PTL
166 * 0 IC
167 * 00000000001111111111222222222233
168 * 01234567890123456789012345678901
169 *
170 * I Segment-Invalid Bit: Segment is not available for address-translation
171 * C Common-Segment Bit: Segment is not private (PoP 3-30)
172 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
173 *
174 * The 31 bit segmenttable origin of S390 has following format:
175 *
176 * |S-table origin | | STL |
177 * X **GPS
178 * 00000000001111111111222222222233
179 * 01234567890123456789012345678901
180 *
181 * X Space-Switch event:
182 * G Segment-Invalid Bit: *
183 * P Private-Space Bit: Segment is not private (PoP 3-30)
184 * S Storage-Alteration:
185 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
186 *
187 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100188 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
191 *
192 * I Page-Invalid Bit: Page is not available for address-translation
193 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100194 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 *
196 * A 64 bit segmenttable entry of S390 has following format:
197 * | P-table origin | TT
198 * 0000000000111111111122222222223333333333444444444455555555556666
199 * 0123456789012345678901234567890123456789012345678901234567890123
200 *
201 * I Segment-Invalid Bit: Segment is not available for address-translation
202 * C Common-Segment Bit: Segment is not private (PoP 3-30)
203 * P Page-Protection Bit: Store access not possible for page
204 * TT Type 00
205 *
206 * A 64 bit region table entry of S390 has following format:
207 * | S-table origin | TF TTTL
208 * 0000000000111111111122222222223333333333444444444455555555556666
209 * 0123456789012345678901234567890123456789012345678901234567890123
210 *
211 * I Segment-Invalid Bit: Segment is not available for address-translation
212 * TT Type 01
213 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200214 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 *
216 * The 64 bit regiontable origin of S390 has following format:
217 * | region table origon | DTTL
218 * 0000000000111111111122222222223333333333444444444455555555556666
219 * 0123456789012345678901234567890123456789012345678901234567890123
220 *
221 * X Space-Switch event:
222 * G Segment-Invalid Bit:
223 * P Private-Space Bit:
224 * S Storage-Alteration:
225 * R Real space
226 * TL Table-Length:
227 *
228 * A storage key has the following format:
229 * | ACC |F|R|C|0|
230 * 0 3 4 5 6 7
231 * ACC: access key
232 * F : fetch protection bit
233 * R : referenced bit
234 * C : changed bit
235 */
236
237/* Hardware bits in the page table entry */
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100238#define _PAGE_CO 0x100 /* HW Change-bit override */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200239#define _PAGE_RO 0x200 /* HW read-only bit */
240#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200241
242/* Software bits in the page table entry */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200243#define _PAGE_SWT 0x001 /* SW pte type bit t */
244#define _PAGE_SWX 0x002 /* SW pte type bit x */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200245#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
246#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
247#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
Nick Piggina08cb622008-04-28 02:13:03 -0700248#define __HAVE_ARCH_PTE_SPECIAL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Nick Piggin138c9022008-07-08 11:31:06 +0200250/* Set of bits not changed in pte_modify */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200251#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
Nick Piggin138c9022008-07-08 11:31:06 +0200252
Martin Schwidefsky83377482006-10-18 18:30:51 +0200253/* Six different types of pages. */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200254#define _PAGE_TYPE_EMPTY 0x400
255#define _PAGE_TYPE_NONE 0x401
Martin Schwidefsky83377482006-10-18 18:30:51 +0200256#define _PAGE_TYPE_SWAP 0x403
257#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200258#define _PAGE_TYPE_RO 0x200
259#define _PAGE_TYPE_RW 0x000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Martin Schwidefsky83377482006-10-18 18:30:51 +0200261/*
Gerald Schaefer53492b12008-04-30 13:38:46 +0200262 * Only four types for huge pages, using the invalid bit and protection bit
263 * of a segment table entry.
264 */
265#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
266#define _HPAGE_TYPE_NONE 0x220
267#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
268#define _HPAGE_TYPE_RW 0x000
269
270/*
Martin Schwidefsky83377482006-10-18 18:30:51 +0200271 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
272 * pte_none and pte_file to find out the pte type WITHOUT holding the page
273 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
274 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
275 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
276 * This change is done while holding the lock, but the intermediate step
277 * of a previously valid pte with the hw invalid bit set can be observed by
278 * handle_pte_fault. That makes it necessary that all valid pte types with
279 * the hw invalid bit set must be distinguishable from the four pte types
280 * empty, none, swap and file.
281 *
282 * irxt ipte irxt
283 * _PAGE_TYPE_EMPTY 1000 -> 1000
284 * _PAGE_TYPE_NONE 1001 -> 1001
285 * _PAGE_TYPE_SWAP 1011 -> 1011
286 * _PAGE_TYPE_FILE 11?1 -> 11?1
287 * _PAGE_TYPE_RO 0100 -> 1100
288 * _PAGE_TYPE_RW 0000 -> 1000
289 *
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100290 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
Martin Schwidefsky83377482006-10-18 18:30:51 +0200291 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
292 * pte_file is true for bits combinations 1101, 1111
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200294 */
295
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100296/* Page status table bits for virtualization */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200297#define RCP_ACC_BITS 0xf000000000000000UL
298#define RCP_FP_BIT 0x0800000000000000UL
299#define RCP_PCL_BIT 0x0080000000000000UL
300#define RCP_HR_BIT 0x0040000000000000UL
301#define RCP_HC_BIT 0x0020000000000000UL
302#define RCP_GR_BIT 0x0004000000000000UL
303#define RCP_GC_BIT 0x0002000000000000UL
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100304
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200305/* User dirty / referenced bit for KVM's migration feature */
306#define KVM_UR_BIT 0x0000800000000000UL
307#define KVM_UC_BIT 0x0000400000000000UL
Florian Funke15e86b02008-10-10 21:33:26 +0200308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309#ifndef __s390x__
310
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200311/* Bits in the segment table address-space-control-element */
312#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
313#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
314#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
315#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
316#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318/* Bits in the segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200319#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200320#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200321#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
322#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
323#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
324
325#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
326#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327
328#else /* __s390x__ */
329
330/* Bits in the segment/region table address-space-control-element */
331#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
332#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
333#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
334#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
335#define _ASCE_REAL_SPACE 0x20 /* real space control */
336#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
337#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
338#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
339#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
340#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
341#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
342
343/* Bits in the region table entry */
344#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
345#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
346#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
347#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
348#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
349#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
350#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
351
352#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
353#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
354#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
355#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
356#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
357#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
358
359/* Bits in the segment table entry */
360#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
361#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
362#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
363
364#define _SEGMENT_ENTRY (0)
365#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
366
Gerald Schaefer53492b12008-04-30 13:38:46 +0200367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
369
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200370#endif /* __s390x__ */
371
372/*
373 * A user page table pointer has the space-switch-event bit, the
374 * private-space-control bit and the storage-alteration-event-control
375 * bit set. A kernel page table pointer doesn't need them.
376 */
377#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
378 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200381 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200383#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
384#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
385#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
386
387#define PAGE_KERNEL PAGE_RW
388#define PAGE_COPY PAGE_RO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
390/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200391 * On s390 the page table entry has an invalid bit and a read-only bit.
392 * Read permission implies execute permission and write permission
393 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 */
395 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200396#define __P000 PAGE_NONE
397#define __P001 PAGE_RO
398#define __P010 PAGE_RO
399#define __P011 PAGE_RO
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200400#define __P100 PAGE_RO
401#define __P101 PAGE_RO
402#define __P110 PAGE_RO
403#define __P111 PAGE_RO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200405#define __S000 PAGE_NONE
406#define __S001 PAGE_RO
407#define __S010 PAGE_RW
408#define __S011 PAGE_RW
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200409#define __S100 PAGE_RO
410#define __S101 PAGE_RO
411#define __S110 PAGE_RW
412#define __S111 PAGE_RW
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200414static inline int mm_exclusive(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200416 return likely(mm == current->active_mm &&
417 atomic_read(&mm->context.attach_count) <= 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200420static inline int mm_has_pgste(struct mm_struct *mm)
421{
422#ifdef CONFIG_PGSTE
423 if (unlikely(mm->context.has_pgste))
424 return 1;
425#endif
426 return 0;
427}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428/*
429 * pgd/pmd/pte query functions
430 */
431#ifndef __s390x__
432
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800433static inline int pgd_present(pgd_t pgd) { return 1; }
434static inline int pgd_none(pgd_t pgd) { return 0; }
435static inline int pgd_bad(pgd_t pgd) { return 0; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200437static inline int pud_present(pud_t pud) { return 1; }
438static inline int pud_none(pud_t pud) { return 0; }
439static inline int pud_bad(pud_t pud) { return 0; }
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441#else /* __s390x__ */
442
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100443static inline int pgd_present(pgd_t pgd)
444{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100445 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
446 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100447 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
448}
449
450static inline int pgd_none(pgd_t pgd)
451{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100452 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
453 return 0;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100454 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
455}
456
457static inline int pgd_bad(pgd_t pgd)
458{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100459 /*
460 * With dynamic page table levels the pgd can be a region table
461 * entry or a segment table entry. Check for the bit that are
462 * invalid for either table entry.
463 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100464 unsigned long mask =
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100465 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100466 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
467 return (pgd_val(pgd) & mask) != 0;
468}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200469
470static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100472 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
473 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100474 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200477static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100479 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
480 return 0;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100481 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482}
483
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200484static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100486 /*
487 * With dynamic page table levels the pud can be a region table
488 * entry or a segment table entry. Check for the bit that are
489 * invalid for either table entry.
490 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100491 unsigned long mask =
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100492 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100493 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
494 return (pud_val(pud) & mask) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495}
496
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200497#endif /* __s390x__ */
498
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800499static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500{
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100501 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502}
503
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800504static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100506 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
508
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800509static inline int pmd_bad(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200511 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
512 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513}
514
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800515static inline int pte_none(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200517 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518}
519
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800520static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200522 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
523 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
524 (!(pte_val(pte) & _PAGE_INVALID) &&
525 !(pte_val(pte) & _PAGE_SWT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800528static inline int pte_file(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200530 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
531 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532}
533
Nick Piggin7e675132008-04-28 02:13:00 -0700534static inline int pte_special(pte_t pte)
535{
Nick Piggina08cb622008-04-28 02:13:03 -0700536 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700537}
538
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200539#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200540static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100541{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200542 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100543}
544
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200545static inline pgste_t pgste_get_lock(pte_t *ptep)
546{
547 unsigned long new = 0;
548#ifdef CONFIG_PGSTE
549 unsigned long old;
550
551 preempt_disable();
552 asm(
553 " lg %0,%2\n"
554 "0: lgr %1,%0\n"
555 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
556 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
557 " csg %0,%1,%2\n"
558 " jl 0b\n"
559 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
560 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
561#endif
562 return __pgste(new);
563}
564
565static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100566{
567#ifdef CONFIG_PGSTE
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200568 asm(
569 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
570 " stg %1,%0\n"
571 : "=Q" (ptep[PTRS_PER_PTE])
572 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100573 preempt_enable();
574#endif
575}
576
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200577static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100578{
579#ifdef CONFIG_PGSTE
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200580 unsigned long pfn, bits;
581 unsigned char skey;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100582
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200583 pfn = pte_val(*ptep) >> PAGE_SHIFT;
584 skey = page_get_storage_key(pfn);
585 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
586 /* Clear page changed & referenced bit in the storage key */
587 if (bits) {
588 skey ^= bits;
589 page_set_storage_key(pfn, skey, 1);
Florian Funke15e86b02008-10-10 21:33:26 +0200590 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200591 /* Transfer page changed & referenced bit to guest bits in pgste */
592 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
593 /* Get host changed & referenced bits from pgste */
594 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
595 /* Clear host bits in pgste. */
596 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
597 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
598 /* Copy page access key and fetch protection bit to pgste */
599 pgste_val(pgste) |=
600 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
601 /* Transfer changed and referenced to kvm user bits */
602 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
603 /* Transfer changed & referenced to pte sofware bits */
604 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100605#endif
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200606 return pgste;
607
608}
609
610static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
611{
612#ifdef CONFIG_PGSTE
613 int young;
614
615 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
616 /* Transfer page referenced bit to pte software bit (host view) */
617 if (young || (pgste_val(pgste) & RCP_HR_BIT))
618 pte_val(*ptep) |= _PAGE_SWR;
619 /* Clear host referenced bit in pgste. */
620 pgste_val(pgste) &= ~RCP_HR_BIT;
621 /* Transfer page referenced bit to guest bit in pgste */
622 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
623#endif
624 return pgste;
625
626}
627
628static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
629{
630#ifdef CONFIG_PGSTE
631 unsigned long pfn;
632 unsigned long okey, nkey;
633
634 pfn = pte_val(*ptep) >> PAGE_SHIFT;
635 okey = nkey = page_get_storage_key(pfn);
636 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
637 /* Set page access key and fetch protection bit from pgste */
638 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
639 if (okey != nkey)
640 page_set_storage_key(pfn, nkey, 1);
641#endif
642}
643
644/*
645 * Certain architectures need to do special things when PTEs
646 * within a page table are directly modified. Thus, the following
647 * hook is made available.
648 */
649static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
650 pte_t *ptep, pte_t entry)
651{
652 pgste_t pgste;
653
654 if (mm_has_pgste(mm)) {
655 pgste = pgste_get_lock(ptep);
656 pgste_set_pte(ptep, pgste);
657 *ptep = entry;
658 pgste_set_unlock(ptep, pgste);
659 } else
660 *ptep = entry;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100661}
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663/*
664 * query functions pte_write/pte_dirty/pte_young only work if
665 * pte_present() is true. Undefined behaviour if not..
666 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800667static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
669 return (pte_val(pte) & _PAGE_RO) == 0;
670}
671
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800672static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200674#ifdef CONFIG_PGSTE
675 if (pte_val(pte) & _PAGE_SWC)
676 return 1;
677#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return 0;
679}
680
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800681static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200683#ifdef CONFIG_PGSTE
684 if (pte_val(pte) & _PAGE_SWR)
685 return 1;
686#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 return 0;
688}
689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690/*
691 * pgd/pmd/pte modification functions
692 */
693
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200694static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100695{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200696#ifdef __s390x__
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100697 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
698 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200699#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100702static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100703{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200704#ifdef __s390x__
705 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
706 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
707#endif
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100708}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100709
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200710static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200712 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800715static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200717 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718}
719
720/*
721 * The following pte modification functions only work if
722 * pte_present() is true. Undefined behaviour if not..
723 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800724static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725{
Nick Piggin138c9022008-07-08 11:31:06 +0200726 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 pte_val(pte) |= pgprot_val(newprot);
728 return pte;
729}
730
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800731static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732{
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200733 /* Do not clobber _PAGE_TYPE_NONE pages! */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 if (!(pte_val(pte) & _PAGE_INVALID))
735 pte_val(pte) |= _PAGE_RO;
736 return pte;
737}
738
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800739static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740{
741 pte_val(pte) &= ~_PAGE_RO;
742 return pte;
743}
744
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800745static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200747#ifdef CONFIG_PGSTE
748 pte_val(pte) &= ~_PAGE_SWC;
749#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 return pte;
751}
752
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800753static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 return pte;
756}
757
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800758static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200760#ifdef CONFIG_PGSTE
761 pte_val(pte) &= ~_PAGE_SWR;
762#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 return pte;
764}
765
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800766static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 return pte;
769}
770
Nick Piggin7e675132008-04-28 02:13:00 -0700771static inline pte_t pte_mkspecial(pte_t pte)
772{
Nick Piggina08cb622008-04-28 02:13:03 -0700773 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700774 return pte;
775}
776
Heiko Carstens84afdce2010-10-25 16:10:36 +0200777#ifdef CONFIG_HUGETLB_PAGE
778static inline pte_t pte_mkhuge(pte_t pte)
779{
780 /*
781 * PROT_NONE needs to be remapped from the pte type to the ste type.
782 * The HW invalid bit is also different for pte and ste. The pte
783 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
784 * bit, so we don't have to clear it.
785 */
786 if (pte_val(pte) & _PAGE_INVALID) {
787 if (pte_val(pte) & _PAGE_SWT)
788 pte_val(pte) |= _HPAGE_TYPE_NONE;
789 pte_val(pte) |= _SEGMENT_ENTRY_INV;
790 }
791 /*
792 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
793 * table entry.
794 */
795 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
796 /*
797 * Also set the change-override bit because we don't need dirty bit
798 * tracking for hugetlbfs pages.
799 */
800 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
801 return pte;
802}
803#endif
804
Florian Funke15e86b02008-10-10 21:33:26 +0200805/*
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200806 * Get (and clear) the user dirty bit for a pte.
Florian Funke15e86b02008-10-10 21:33:26 +0200807 */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200808static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
809 pte_t *ptep)
Florian Funke15e86b02008-10-10 21:33:26 +0200810{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200811 pgste_t pgste;
812 int dirty = 0;
Florian Funke15e86b02008-10-10 21:33:26 +0200813
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200814 if (mm_has_pgste(mm)) {
815 pgste = pgste_get_lock(ptep);
816 pgste = pgste_update_all(ptep, pgste);
817 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
818 pgste_val(pgste) &= ~KVM_UC_BIT;
819 pgste_set_unlock(ptep, pgste);
820 return dirty;
Florian Funke15e86b02008-10-10 21:33:26 +0200821 }
Florian Funke15e86b02008-10-10 21:33:26 +0200822 return dirty;
823}
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200824
825/*
826 * Get (and clear) the user referenced bit for a pte.
827 */
828static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
829 pte_t *ptep)
830{
831 pgste_t pgste;
832 int young = 0;
833
834 if (mm_has_pgste(mm)) {
835 pgste = pgste_get_lock(ptep);
836 pgste = pgste_update_young(ptep, pgste);
837 young = !!(pgste_val(pgste) & KVM_UR_BIT);
838 pgste_val(pgste) &= ~KVM_UR_BIT;
839 pgste_set_unlock(ptep, pgste);
840 }
841 return young;
842}
Florian Funke15e86b02008-10-10 21:33:26 +0200843
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200844#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
845static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
846 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200848 pgste_t pgste;
849 pte_t pte;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100850
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200851 if (mm_has_pgste(vma->vm_mm)) {
852 pgste = pgste_get_lock(ptep);
853 pgste = pgste_update_young(ptep, pgste);
854 pte = *ptep;
855 *ptep = pte_mkold(pte);
856 pgste_set_unlock(ptep, pgste);
857 return pte_young(pte);
858 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return 0;
860}
861
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200862#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
863static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
864 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865{
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100866 /* No need to flush TLB
867 * On s390 reference bits are in storage key and never in TLB
868 * With virtualization we handle the reference bit, without we
869 * we can simply return */
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100870 return ptep_test_and_clear_young(vma, address, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
872
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200873static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
874{
875 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
876#ifndef __s390x__
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100877 /* pto must point to the start of the segment table */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200878 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
879#else
880 /* ipte in zarch mode can do the math */
881 pte_t *pto = ptep;
882#endif
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200883 asm volatile(
884 " ipte %2,%3"
885 : "=m" (*ptep) : "m" (*ptep),
886 "a" (pto), "a" (address));
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200887 }
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200888}
889
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200890/*
891 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
892 * both clear the TLB for the unmapped pte. The reason is that
893 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
894 * to modify an active pte. The sequence is
895 * 1) ptep_get_and_clear
896 * 2) set_pte_at
897 * 3) flush_tlb_range
898 * On s390 the tlb needs to get flushed with the modification of the pte
899 * if the pte is active. The only way how this can be implemented is to
900 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
901 * is a nop.
902 */
903#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200904static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
905 unsigned long address, pte_t *ptep)
906{
907 pgste_t pgste;
908 pte_t pte;
909
910 mm->context.flush_mm = 1;
911 if (mm_has_pgste(mm))
912 pgste = pgste_get_lock(ptep);
913
914 pte = *ptep;
915 if (!mm_exclusive(mm))
916 __ptep_ipte(address, ptep);
917 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
918
919 if (mm_has_pgste(mm)) {
920 pgste = pgste_update_all(&pte, pgste);
921 pgste_set_unlock(ptep, pgste);
922 }
923 return pte;
924}
925
926#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
927static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
928 unsigned long address,
929 pte_t *ptep)
930{
931 pte_t pte;
932
933 mm->context.flush_mm = 1;
934 if (mm_has_pgste(mm))
935 pgste_get_lock(ptep);
936
937 pte = *ptep;
938 if (!mm_exclusive(mm))
939 __ptep_ipte(address, ptep);
940 return pte;
941}
942
943static inline void ptep_modify_prot_commit(struct mm_struct *mm,
944 unsigned long address,
945 pte_t *ptep, pte_t pte)
946{
947 *ptep = pte;
948 if (mm_has_pgste(mm))
949 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
950}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200951
952#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -0700953static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
954 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200956 pgste_t pgste;
957 pte_t pte;
958
959 if (mm_has_pgste(vma->vm_mm))
960 pgste = pgste_get_lock(ptep);
961
962 pte = *ptep;
963 __ptep_ipte(address, ptep);
964 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
965
966 if (mm_has_pgste(vma->vm_mm)) {
967 pgste = pgste_update_all(&pte, pgste);
968 pgste_set_unlock(ptep, pgste);
969 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 return pte;
971}
972
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200973/*
974 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
975 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
976 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
977 * cannot be accessed while the batched unmap is running. In this case
978 * full==1 and a simple pte_clear is enough. See tlb.h.
979 */
980#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
981static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200982 unsigned long address,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200983 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200985 pgste_t pgste;
986 pte_t pte;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200987
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200988 if (mm_has_pgste(mm))
989 pgste = pgste_get_lock(ptep);
990
991 pte = *ptep;
992 if (!full)
993 __ptep_ipte(address, ptep);
994 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
995
996 if (mm_has_pgste(mm)) {
997 pgste = pgste_update_all(&pte, pgste);
998 pgste_set_unlock(ptep, pgste);
999 }
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001000 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001}
1002
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001003#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001004static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1005 unsigned long address, pte_t *ptep)
1006{
1007 pgste_t pgste;
1008 pte_t pte = *ptep;
1009
1010 if (pte_write(pte)) {
1011 mm->context.flush_mm = 1;
1012 if (mm_has_pgste(mm))
1013 pgste = pgste_get_lock(ptep);
1014
1015 if (!mm_exclusive(mm))
1016 __ptep_ipte(address, ptep);
1017 *ptep = pte_wrprotect(pte);
1018
1019 if (mm_has_pgste(mm))
1020 pgste_set_unlock(ptep, pgste);
1021 }
1022 return pte;
1023}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001024
1025#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001026static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1027 unsigned long address, pte_t *ptep,
1028 pte_t entry, int dirty)
1029{
1030 pgste_t pgste;
1031
1032 if (pte_same(*ptep, entry))
1033 return 0;
1034 if (mm_has_pgste(vma->vm_mm))
1035 pgste = pgste_get_lock(ptep);
1036
1037 __ptep_ipte(address, ptep);
1038 *ptep = entry;
1039
1040 if (mm_has_pgste(vma->vm_mm))
1041 pgste_set_unlock(ptep, pgste);
1042 return 1;
1043}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
1045/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 * Conversion functions: convert a page and protection to a page entry,
1047 * and a page entry and page directory to the page they refer to.
1048 */
1049static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1050{
1051 pte_t __pte;
1052 pte_val(__pte) = physpage + pgprot_val(pgprot);
1053 return __pte;
1054}
1055
Heiko Carstens2dcea572006-09-29 01:58:41 -07001056static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1057{
Heiko Carstens0b2b6e12006-10-04 20:02:23 +02001058 unsigned long physpage = page_to_phys(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
Heiko Carstens2dcea572006-09-29 01:58:41 -07001060 return mk_pte_phys(physpage, pgprot);
1061}
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001064#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1065#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1066#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001068#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1070
1071#ifndef __s390x__
1072
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001073#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1074#define pud_deref(pmd) ({ BUG(); 0UL; })
1075#define pgd_deref(pmd) ({ BUG(); 0UL; })
1076
1077#define pud_offset(pgd, address) ((pud_t *) pgd)
1078#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080#else /* __s390x__ */
1081
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001082#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1083#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001084#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001085
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001086static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1087{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001088 pud_t *pud = (pud_t *) pgd;
1089 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1090 pud = (pud_t *) pgd_deref(*pgd);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001091 return pud + pud_index(address);
1092}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001093
1094static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1095{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001096 pmd_t *pmd = (pmd_t *) pud;
1097 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1098 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001099 return pmd + pmd_index(address);
1100}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
1102#endif /* __s390x__ */
1103
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001104#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1105#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1106#define pte_page(x) pfn_to_page(pte_pfn(x))
1107
1108#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1109
1110/* Find an entry in the lowest level page table.. */
1111#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1112#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
1116/*
1117 * 31 bit swap entry format:
1118 * A page-table entry has some bits we have to treat in a special way.
1119 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1120 * exception will occur instead of a page translation exception. The
1121 * specifiation exception has the bad habit not to store necessary
1122 * information in the lowcore.
1123 * Bit 21 and bit 22 are the page invalid bit and the page protection
1124 * bit. We set both to indicate a swapped page.
1125 * Bit 30 and 31 are used to distinguish the different page types. For
1126 * a swapped page these bits need to be zero.
1127 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1128 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1129 * plus 24 for the offset.
1130 * 0| offset |0110|o|type |00|
1131 * 0 0000000001111111111 2222 2 22222 33
1132 * 0 1234567890123456789 0123 4 56789 01
1133 *
1134 * 64 bit swap entry format:
1135 * A page-table entry has some bits we have to treat in a special way.
1136 * Bits 52 and bit 55 have to be zero, otherwise an specification
1137 * exception will occur instead of a page translation exception. The
1138 * specifiation exception has the bad habit not to store necessary
1139 * information in the lowcore.
1140 * Bit 53 and bit 54 are the page invalid bit and the page protection
1141 * bit. We set both to indicate a swapped page.
1142 * Bit 62 and 63 are used to distinguish the different page types. For
1143 * a swapped page these bits need to be zero.
1144 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1145 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1146 * plus 56 for the offset.
1147 * | offset |0110|o|type |00|
1148 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1149 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1150 */
1151#ifndef __s390x__
1152#define __SWP_OFFSET_MASK (~0UL >> 12)
1153#else
1154#define __SWP_OFFSET_MASK (~0UL >> 11)
1155#endif
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001156static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157{
1158 pte_t pte;
1159 offset &= __SWP_OFFSET_MASK;
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001160 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1162 return pte;
1163}
1164
1165#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1166#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1167#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1168
1169#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1170#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1171
1172#ifndef __s390x__
1173# define PTE_FILE_MAX_BITS 26
1174#else /* __s390x__ */
1175# define PTE_FILE_MAX_BITS 59
1176#endif /* __s390x__ */
1177
1178#define pte_to_pgoff(__pte) \
1179 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1180
1181#define pgoff_to_pte(__off) \
1182 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001183 | _PAGE_TYPE_FILE })
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184
1185#endif /* !__ASSEMBLY__ */
1186
1187#define kern_addr_valid(addr) (1)
1188
Heiko Carstens17f34582008-04-30 13:38:47 +02001189extern int vmem_add_mapping(unsigned long start, unsigned long size);
1190extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001191extern int s390_enable_sie(void);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193/*
1194 * No page table caches to initialise
1195 */
1196#define pgtable_cache_init() do { } while (0)
1197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198#include <asm-generic/pgtable.h>
1199
1200#endif /* _S390_PAGE_H */