blob: 979fe3dc07889b0cd50df90508ace5513c2392f3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
20 * used).
21 *
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
24 * into the pgd entry)
25 *
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
28 */
29#ifndef __ASSEMBLY__
Heiko Carstens9789db02008-07-14 09:59:11 +020030#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070031#include <linux/mm_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020033#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Linus Torvalds1da177e2005-04-16 15:20:36 -070035extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
36extern void paging_init(void);
Heiko Carstens2b67fc42007-02-05 21:16:47 +010037extern void vmem_map_init(void);
Martin Schwidefsky92f842e2010-10-25 16:10:13 +020038extern void fault_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/*
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
43 */
Russell King4b3073e2009-12-18 16:40:18 +000044#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070045#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020048 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 * for zero-mapped memory areas etc..
50 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020051
52extern unsigned long empty_zero_page;
53extern unsigned long zero_page_mask;
54
55#define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
58
59#define is_zero_pfn is_zero_pfn
60static inline int is_zero_pfn(unsigned long pfn)
61{
62 extern unsigned long zero_pfn;
63 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
64 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
65}
66
67#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#endif /* !__ASSEMBLY__ */
70
71/*
72 * PMD_SHIFT determines the size of the area a second-level page
73 * table can map
74 * PGDIR_SHIFT determines what a third-level page table entry can map
75 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +020076#ifndef CONFIG_64BIT
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010077# define PMD_SHIFT 20
78# define PUD_SHIFT 20
79# define PGDIR_SHIFT 20
Heiko Carstensf4815ac2012-05-23 16:24:51 +020080#else /* CONFIG_64BIT */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010081# define PMD_SHIFT 20
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020082# define PUD_SHIFT 31
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010083# define PGDIR_SHIFT 42
Heiko Carstensf4815ac2012-05-23 16:24:51 +020084#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86#define PMD_SIZE (1UL << PMD_SHIFT)
87#define PMD_MASK (~(PMD_SIZE-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020088#define PUD_SIZE (1UL << PUD_SHIFT)
89#define PUD_MASK (~(PUD_SIZE-1))
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010090#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
91#define PGDIR_MASK (~(PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93/*
94 * entries per page directory level: the S390 is two-level, so
95 * we don't really have any PMD directory physically.
96 * for S390 segment-table entries are combined to one PGD
97 * that leads to 1024 pte per pgd
98 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010099#define PTRS_PER_PTE 256
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200100#ifndef CONFIG_64BIT
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100101#define PTRS_PER_PMD 1
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100102#define PTRS_PER_PUD 1
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200103#else /* CONFIG_64BIT */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100104#define PTRS_PER_PMD 2048
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100105#define PTRS_PER_PUD 2048
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200106#endif /* CONFIG_64BIT */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100107#define PTRS_PER_PGD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Hugh Dickinsd455a362005-04-19 13:29:23 -0700109#define FIRST_USER_ADDRESS 0
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#define pte_ERROR(e) \
112 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
113#define pmd_ERROR(e) \
114 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200115#define pud_ERROR(e) \
116 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#define pgd_ERROR(e) \
118 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
119
120#ifndef __ASSEMBLY__
121/*
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100122 * The vmalloc area will always be on the topmost area of the kernel
Martin Schwidefsky7d3f6612010-04-09 13:43:02 +0200123 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100124 * which should be enough for any sane case.
125 * By putting vmalloc at the top, we maximise the gap between physical
126 * memory and vmalloc to catch misplaced memory accesses. As a side
127 * effect, this also makes sure that 64 bit module code cannot be used
128 * as system call address.
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100129 */
Heiko Carstens239a64252009-06-12 10:26:33 +0200130extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100131extern unsigned long VMALLOC_END;
132extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +0200133
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100134#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/*
137 * A 31 bit pagetable entry of S390 has following format:
138 * | PFRA | | OS |
139 * 0 0IP0
140 * 00000000001111111111222222222233
141 * 01234567890123456789012345678901
142 *
143 * I Page-Invalid Bit: Page is not available for address-translation
144 * P Page-Protection Bit: Store access not possible for page
145 *
146 * A 31 bit segmenttable entry of S390 has following format:
147 * | P-table origin | |PTL
148 * 0 IC
149 * 00000000001111111111222222222233
150 * 01234567890123456789012345678901
151 *
152 * I Segment-Invalid Bit: Segment is not available for address-translation
153 * C Common-Segment Bit: Segment is not private (PoP 3-30)
154 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
155 *
156 * The 31 bit segmenttable origin of S390 has following format:
157 *
158 * |S-table origin | | STL |
159 * X **GPS
160 * 00000000001111111111222222222233
161 * 01234567890123456789012345678901
162 *
163 * X Space-Switch event:
164 * G Segment-Invalid Bit: *
165 * P Private-Space Bit: Segment is not private (PoP 3-30)
166 * S Storage-Alteration:
167 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
168 *
169 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100170 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 * 0000000000111111111122222222223333333333444444444455555555556666
172 * 0123456789012345678901234567890123456789012345678901234567890123
173 *
174 * I Page-Invalid Bit: Page is not available for address-translation
175 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100176 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 *
178 * A 64 bit segmenttable entry of S390 has following format:
179 * | P-table origin | TT
180 * 0000000000111111111122222222223333333333444444444455555555556666
181 * 0123456789012345678901234567890123456789012345678901234567890123
182 *
183 * I Segment-Invalid Bit: Segment is not available for address-translation
184 * C Common-Segment Bit: Segment is not private (PoP 3-30)
185 * P Page-Protection Bit: Store access not possible for page
186 * TT Type 00
187 *
188 * A 64 bit region table entry of S390 has following format:
189 * | S-table origin | TF TTTL
190 * 0000000000111111111122222222223333333333444444444455555555556666
191 * 0123456789012345678901234567890123456789012345678901234567890123
192 *
193 * I Segment-Invalid Bit: Segment is not available for address-translation
194 * TT Type 01
195 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200196 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 *
198 * The 64 bit regiontable origin of S390 has following format:
199 * | region table origon | DTTL
200 * 0000000000111111111122222222223333333333444444444455555555556666
201 * 0123456789012345678901234567890123456789012345678901234567890123
202 *
203 * X Space-Switch event:
204 * G Segment-Invalid Bit:
205 * P Private-Space Bit:
206 * S Storage-Alteration:
207 * R Real space
208 * TL Table-Length:
209 *
210 * A storage key has the following format:
211 * | ACC |F|R|C|0|
212 * 0 3 4 5 6 7
213 * ACC: access key
214 * F : fetch protection bit
215 * R : referenced bit
216 * C : changed bit
217 */
218
219/* Hardware bits in the page table entry */
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100220#define _PAGE_CO 0x100 /* HW Change-bit override */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200221#define _PAGE_RO 0x200 /* HW read-only bit */
222#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200223
224/* Software bits in the page table entry */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200225#define _PAGE_SWT 0x001 /* SW pte type bit t */
226#define _PAGE_SWX 0x002 /* SW pte type bit x */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200227#define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
228#define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
229#define _PAGE_SPECIAL 0x010 /* SW associated with special page */
Nick Piggina08cb622008-04-28 02:13:03 -0700230#define __HAVE_ARCH_PTE_SPECIAL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Nick Piggin138c9022008-07-08 11:31:06 +0200232/* Set of bits not changed in pte_modify */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200233#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
Nick Piggin138c9022008-07-08 11:31:06 +0200234
Martin Schwidefsky83377482006-10-18 18:30:51 +0200235/* Six different types of pages. */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200236#define _PAGE_TYPE_EMPTY 0x400
237#define _PAGE_TYPE_NONE 0x401
Martin Schwidefsky83377482006-10-18 18:30:51 +0200238#define _PAGE_TYPE_SWAP 0x403
239#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200240#define _PAGE_TYPE_RO 0x200
241#define _PAGE_TYPE_RW 0x000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
Martin Schwidefsky83377482006-10-18 18:30:51 +0200243/*
Gerald Schaefer53492b12008-04-30 13:38:46 +0200244 * Only four types for huge pages, using the invalid bit and protection bit
245 * of a segment table entry.
246 */
247#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
248#define _HPAGE_TYPE_NONE 0x220
249#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
250#define _HPAGE_TYPE_RW 0x000
251
252/*
Martin Schwidefsky83377482006-10-18 18:30:51 +0200253 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
254 * pte_none and pte_file to find out the pte type WITHOUT holding the page
255 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
256 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
257 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
258 * This change is done while holding the lock, but the intermediate step
259 * of a previously valid pte with the hw invalid bit set can be observed by
260 * handle_pte_fault. That makes it necessary that all valid pte types with
261 * the hw invalid bit set must be distinguishable from the four pte types
262 * empty, none, swap and file.
263 *
264 * irxt ipte irxt
265 * _PAGE_TYPE_EMPTY 1000 -> 1000
266 * _PAGE_TYPE_NONE 1001 -> 1001
267 * _PAGE_TYPE_SWAP 1011 -> 1011
268 * _PAGE_TYPE_FILE 11?1 -> 11?1
269 * _PAGE_TYPE_RO 0100 -> 1100
270 * _PAGE_TYPE_RW 0000 -> 1000
271 *
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100272 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
Martin Schwidefsky83377482006-10-18 18:30:51 +0200273 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
274 * pte_file is true for bits combinations 1101, 1111
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100275 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200276 */
277
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200278#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200280/* Bits in the segment table address-space-control-element */
281#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
282#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
283#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
284#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
285#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287/* Bits in the segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200288#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
Martin Schwidefsky80217142010-10-25 16:10:11 +0200289#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200290#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
291#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
292#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
293
294#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
295#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
296
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200297/* Page status table bits for virtualization */
298#define RCP_ACC_BITS 0xf0000000UL
299#define RCP_FP_BIT 0x08000000UL
300#define RCP_PCL_BIT 0x00800000UL
301#define RCP_HR_BIT 0x00400000UL
302#define RCP_HC_BIT 0x00200000UL
303#define RCP_GR_BIT 0x00040000UL
304#define RCP_GC_BIT 0x00020000UL
305
306/* User dirty / referenced bit for KVM's migration feature */
307#define KVM_UR_BIT 0x00008000UL
308#define KVM_UC_BIT 0x00004000UL
309
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200310#else /* CONFIG_64BIT */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200311
312/* Bits in the segment/region table address-space-control-element */
313#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
314#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
315#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
316#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
317#define _ASCE_REAL_SPACE 0x20 /* real space control */
318#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
319#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
320#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
321#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
322#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
323#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
324
325/* Bits in the region table entry */
326#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
327#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
328#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
329#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
330#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
331#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
332#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
333
334#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
335#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
336#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
337#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
338#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
339#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
340
341/* Bits in the segment table entry */
342#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
343#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
344#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
345
346#define _SEGMENT_ENTRY (0)
347#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
348
Gerald Schaefer53492b12008-04-30 13:38:46 +0200349#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
350#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
Gerald Schaefer75077af2012-10-08 16:30:15 -0700351#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
352#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
Gerald Schaefer53492b12008-04-30 13:38:46 +0200353
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700354/* Set of bits not changed in pmd_modify */
355#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
356 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
357
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200358/* Page status table bits for virtualization */
359#define RCP_ACC_BITS 0xf000000000000000UL
360#define RCP_FP_BIT 0x0800000000000000UL
361#define RCP_PCL_BIT 0x0080000000000000UL
362#define RCP_HR_BIT 0x0040000000000000UL
363#define RCP_HC_BIT 0x0020000000000000UL
364#define RCP_GR_BIT 0x0004000000000000UL
365#define RCP_GC_BIT 0x0002000000000000UL
366
367/* User dirty / referenced bit for KVM's migration feature */
368#define KVM_UR_BIT 0x0000800000000000UL
369#define KVM_UC_BIT 0x0000400000000000UL
370
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200371#endif /* CONFIG_64BIT */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200372
373/*
374 * A user page table pointer has the space-switch-event bit, the
375 * private-space-control bit and the storage-alteration-event-control
376 * bit set. A kernel page table pointer doesn't need them.
377 */
378#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
379 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200382 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200384#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
385#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
386#define PAGE_RW __pgprot(_PAGE_TYPE_RW)
387
388#define PAGE_KERNEL PAGE_RW
389#define PAGE_COPY PAGE_RO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
391/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200392 * On s390 the page table entry has an invalid bit and a read-only bit.
393 * Read permission implies execute permission and write permission
394 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 */
396 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200397#define __P000 PAGE_NONE
398#define __P001 PAGE_RO
399#define __P010 PAGE_RO
400#define __P011 PAGE_RO
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200401#define __P100 PAGE_RO
402#define __P101 PAGE_RO
403#define __P110 PAGE_RO
404#define __P111 PAGE_RO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200406#define __S000 PAGE_NONE
407#define __S001 PAGE_RO
408#define __S010 PAGE_RW
409#define __S011 PAGE_RW
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200410#define __S100 PAGE_RO
411#define __S101 PAGE_RO
412#define __S110 PAGE_RW
413#define __S111 PAGE_RW
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200415static inline int mm_exclusive(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200417 return likely(mm == current->active_mm &&
418 atomic_read(&mm->context.attach_count) <= 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200421static inline int mm_has_pgste(struct mm_struct *mm)
422{
423#ifdef CONFIG_PGSTE
424 if (unlikely(mm->context.has_pgste))
425 return 1;
426#endif
427 return 0;
428}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429/*
430 * pgd/pmd/pte query functions
431 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200432#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800434static inline int pgd_present(pgd_t pgd) { return 1; }
435static inline int pgd_none(pgd_t pgd) { return 0; }
436static inline int pgd_bad(pgd_t pgd) { return 0; }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200438static inline int pud_present(pud_t pud) { return 1; }
439static inline int pud_none(pud_t pud) { return 0; }
440static inline int pud_bad(pud_t pud) { return 0; }
441
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200442#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100444static inline int pgd_present(pgd_t pgd)
445{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100446 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
447 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100448 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
449}
450
451static inline int pgd_none(pgd_t pgd)
452{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100453 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
454 return 0;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100455 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
456}
457
458static inline int pgd_bad(pgd_t pgd)
459{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100460 /*
461 * With dynamic page table levels the pgd can be a region table
462 * entry or a segment table entry. Check for the bit that are
463 * invalid for either table entry.
464 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100465 unsigned long mask =
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100466 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100467 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
468 return (pgd_val(pgd) & mask) != 0;
469}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200470
471static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100473 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
474 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100475 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200478static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100480 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
481 return 0;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100482 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200485static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100487 /*
488 * With dynamic page table levels the pud can be a region table
489 * entry or a segment table entry. Check for the bit that are
490 * invalid for either table entry.
491 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100492 unsigned long mask =
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100493 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100494 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
495 return (pud_val(pud) & mask) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496}
497
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200498#endif /* CONFIG_64BIT */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200499
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800500static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100502 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
504
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800505static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100507 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
509
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800510static inline int pmd_bad(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200512 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
513 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514}
515
Gerald Schaefer75077af2012-10-08 16:30:15 -0700516#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
517extern void pmdp_splitting_flush(struct vm_area_struct *vma,
518 unsigned long addr, pmd_t *pmdp);
519
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700520#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
521extern int pmdp_set_access_flags(struct vm_area_struct *vma,
522 unsigned long address, pmd_t *pmdp,
523 pmd_t entry, int dirty);
524
525#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
526extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
527 unsigned long address, pmd_t *pmdp);
528
529#define __HAVE_ARCH_PMD_WRITE
530static inline int pmd_write(pmd_t pmd)
531{
532 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
533}
534
535static inline int pmd_young(pmd_t pmd)
536{
537 return 0;
538}
539
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800540static inline int pte_none(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200542 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800545static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200547 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
548 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
549 (!(pte_val(pte) & _PAGE_INVALID) &&
550 !(pte_val(pte) & _PAGE_SWT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551}
552
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800553static inline int pte_file(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
Martin Schwidefsky83377482006-10-18 18:30:51 +0200555 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
556 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
Nick Piggin7e675132008-04-28 02:13:00 -0700559static inline int pte_special(pte_t pte)
560{
Nick Piggina08cb622008-04-28 02:13:03 -0700561 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700562}
563
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200564#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200565static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100566{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200567 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100568}
569
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200570static inline pgste_t pgste_get_lock(pte_t *ptep)
571{
572 unsigned long new = 0;
573#ifdef CONFIG_PGSTE
574 unsigned long old;
575
576 preempt_disable();
577 asm(
578 " lg %0,%2\n"
579 "0: lgr %1,%0\n"
580 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
581 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
582 " csg %0,%1,%2\n"
583 " jl 0b\n"
584 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
585 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
586#endif
587 return __pgste(new);
588}
589
590static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100591{
592#ifdef CONFIG_PGSTE
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200593 asm(
594 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
595 " stg %1,%0\n"
596 : "=Q" (ptep[PTRS_PER_PTE])
597 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100598 preempt_enable();
599#endif
600}
601
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200602static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100603{
604#ifdef CONFIG_PGSTE
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200605 unsigned long address, bits;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200606 unsigned char skey;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100607
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100608 if (!pte_present(*ptep))
609 return pgste;
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200610 address = pte_val(*ptep) & PAGE_MASK;
611 skey = page_get_storage_key(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200612 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
613 /* Clear page changed & referenced bit in the storage key */
Carsten Otte7c818782011-12-01 13:32:16 +0100614 if (bits & _PAGE_CHANGED)
615 page_set_storage_key(address, skey ^ bits, 1);
616 else if (bits)
617 page_reset_referenced(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200618 /* Transfer page changed & referenced bit to guest bits in pgste */
619 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
620 /* Get host changed & referenced bits from pgste */
621 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
622 /* Clear host bits in pgste. */
623 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
624 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
625 /* Copy page access key and fetch protection bit to pgste */
626 pgste_val(pgste) |=
627 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
628 /* Transfer changed and referenced to kvm user bits */
629 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
630 /* Transfer changed & referenced to pte sofware bits */
631 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100632#endif
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200633 return pgste;
634
635}
636
637static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
638{
639#ifdef CONFIG_PGSTE
640 int young;
641
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100642 if (!pte_present(*ptep))
643 return pgste;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200644 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
645 /* Transfer page referenced bit to pte software bit (host view) */
646 if (young || (pgste_val(pgste) & RCP_HR_BIT))
647 pte_val(*ptep) |= _PAGE_SWR;
648 /* Clear host referenced bit in pgste. */
649 pgste_val(pgste) &= ~RCP_HR_BIT;
650 /* Transfer page referenced bit to guest bit in pgste */
651 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
652#endif
653 return pgste;
654
655}
656
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100657static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200658{
659#ifdef CONFIG_PGSTE
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200660 unsigned long address;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200661 unsigned long okey, nkey;
662
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100663 if (!pte_present(entry))
664 return;
665 address = pte_val(entry) & PAGE_MASK;
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200666 okey = nkey = page_get_storage_key(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200667 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
668 /* Set page access key and fetch protection bit from pgste */
669 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
670 if (okey != nkey)
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200671 page_set_storage_key(address, nkey, 1);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200672#endif
673}
674
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200675/**
676 * struct gmap_struct - guest address space
677 * @mm: pointer to the parent mm_struct
678 * @table: pointer to the page directory
Christian Borntraeger480e5922011-09-20 17:07:28 +0200679 * @asce: address space control element for gmap page table
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200680 * @crst_list: list of all crst tables used in the guest address space
681 */
682struct gmap {
683 struct list_head list;
684 struct mm_struct *mm;
685 unsigned long *table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200686 unsigned long asce;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200687 struct list_head crst_list;
688};
689
690/**
691 * struct gmap_rmap - reverse mapping for segment table entries
692 * @next: pointer to the next gmap_rmap structure in the list
693 * @entry: pointer to a segment table entry
694 */
695struct gmap_rmap {
696 struct list_head list;
697 unsigned long *entry;
698};
699
700/**
701 * struct gmap_pgtable - gmap information attached to a page table
702 * @vmaddr: address of the 1MB segment in the process virtual memory
703 * @mapper: list of segment table entries maping a page table
704 */
705struct gmap_pgtable {
706 unsigned long vmaddr;
707 struct list_head mapper;
708};
709
710struct gmap *gmap_alloc(struct mm_struct *mm);
711void gmap_free(struct gmap *gmap);
712void gmap_enable(struct gmap *gmap);
713void gmap_disable(struct gmap *gmap);
714int gmap_map_segment(struct gmap *gmap, unsigned long from,
715 unsigned long to, unsigned long length);
716int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
Carsten Otte499069e2011-10-30 15:17:02 +0100717unsigned long __gmap_fault(unsigned long address, struct gmap *);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200718unsigned long gmap_fault(unsigned long address, struct gmap *);
Christian Borntraeger388186b2011-10-30 15:17:03 +0100719void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200720
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200721/*
722 * Certain architectures need to do special things when PTEs
723 * within a page table are directly modified. Thus, the following
724 * hook is made available.
725 */
726static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
727 pte_t *ptep, pte_t entry)
728{
729 pgste_t pgste;
730
731 if (mm_has_pgste(mm)) {
732 pgste = pgste_get_lock(ptep);
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100733 pgste_set_pte(ptep, pgste, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200734 *ptep = entry;
735 pgste_set_unlock(ptep, pgste);
736 } else
737 *ptep = entry;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100738}
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740/*
741 * query functions pte_write/pte_dirty/pte_young only work if
742 * pte_present() is true. Undefined behaviour if not..
743 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800744static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
746 return (pte_val(pte) & _PAGE_RO) == 0;
747}
748
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800749static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200751#ifdef CONFIG_PGSTE
752 if (pte_val(pte) & _PAGE_SWC)
753 return 1;
754#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 return 0;
756}
757
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800758static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200760#ifdef CONFIG_PGSTE
761 if (pte_val(pte) & _PAGE_SWR)
762 return 1;
763#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return 0;
765}
766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767/*
768 * pgd/pmd/pte modification functions
769 */
770
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200771static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100772{
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200773#ifdef CONFIG_64BIT
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100774 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
775 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200776#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100779static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100780{
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200781#ifdef CONFIG_64BIT
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200782 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
783 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
784#endif
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100785}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100786
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200787static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200789 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790}
791
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800792static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200794 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795}
796
797/*
798 * The following pte modification functions only work if
799 * pte_present() is true. Undefined behaviour if not..
800 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800801static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802{
Nick Piggin138c9022008-07-08 11:31:06 +0200803 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 pte_val(pte) |= pgprot_val(newprot);
805 return pte;
806}
807
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800808static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200810 /* Do not clobber _PAGE_TYPE_NONE pages! */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (!(pte_val(pte) & _PAGE_INVALID))
812 pte_val(pte) |= _PAGE_RO;
813 return pte;
814}
815
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800816static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817{
818 pte_val(pte) &= ~_PAGE_RO;
819 return pte;
820}
821
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800822static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200824#ifdef CONFIG_PGSTE
825 pte_val(pte) &= ~_PAGE_SWC;
826#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return pte;
828}
829
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800830static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 return pte;
833}
834
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800835static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200837#ifdef CONFIG_PGSTE
838 pte_val(pte) &= ~_PAGE_SWR;
839#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return pte;
841}
842
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800843static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 return pte;
846}
847
Nick Piggin7e675132008-04-28 02:13:00 -0700848static inline pte_t pte_mkspecial(pte_t pte)
849{
Nick Piggina08cb622008-04-28 02:13:03 -0700850 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700851 return pte;
852}
853
Heiko Carstens84afdce2010-10-25 16:10:36 +0200854#ifdef CONFIG_HUGETLB_PAGE
855static inline pte_t pte_mkhuge(pte_t pte)
856{
857 /*
858 * PROT_NONE needs to be remapped from the pte type to the ste type.
859 * The HW invalid bit is also different for pte and ste. The pte
860 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
861 * bit, so we don't have to clear it.
862 */
863 if (pte_val(pte) & _PAGE_INVALID) {
864 if (pte_val(pte) & _PAGE_SWT)
865 pte_val(pte) |= _HPAGE_TYPE_NONE;
866 pte_val(pte) |= _SEGMENT_ENTRY_INV;
867 }
868 /*
869 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
870 * table entry.
871 */
872 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
873 /*
874 * Also set the change-override bit because we don't need dirty bit
875 * tracking for hugetlbfs pages.
876 */
877 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
878 return pte;
879}
880#endif
881
Florian Funke15e86b02008-10-10 21:33:26 +0200882/*
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200883 * Get (and clear) the user dirty bit for a pte.
Florian Funke15e86b02008-10-10 21:33:26 +0200884 */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200885static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
886 pte_t *ptep)
Florian Funke15e86b02008-10-10 21:33:26 +0200887{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200888 pgste_t pgste;
889 int dirty = 0;
Florian Funke15e86b02008-10-10 21:33:26 +0200890
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200891 if (mm_has_pgste(mm)) {
892 pgste = pgste_get_lock(ptep);
893 pgste = pgste_update_all(ptep, pgste);
894 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
895 pgste_val(pgste) &= ~KVM_UC_BIT;
896 pgste_set_unlock(ptep, pgste);
897 return dirty;
Florian Funke15e86b02008-10-10 21:33:26 +0200898 }
Florian Funke15e86b02008-10-10 21:33:26 +0200899 return dirty;
900}
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200901
902/*
903 * Get (and clear) the user referenced bit for a pte.
904 */
905static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
906 pte_t *ptep)
907{
908 pgste_t pgste;
909 int young = 0;
910
911 if (mm_has_pgste(mm)) {
912 pgste = pgste_get_lock(ptep);
913 pgste = pgste_update_young(ptep, pgste);
914 young = !!(pgste_val(pgste) & KVM_UR_BIT);
915 pgste_val(pgste) &= ~KVM_UR_BIT;
916 pgste_set_unlock(ptep, pgste);
917 }
918 return young;
919}
Florian Funke15e86b02008-10-10 21:33:26 +0200920
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200921#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
922static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
923 unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200925 pgste_t pgste;
926 pte_t pte;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100927
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200928 if (mm_has_pgste(vma->vm_mm)) {
929 pgste = pgste_get_lock(ptep);
930 pgste = pgste_update_young(ptep, pgste);
931 pte = *ptep;
932 *ptep = pte_mkold(pte);
933 pgste_set_unlock(ptep, pgste);
934 return pte_young(pte);
935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 return 0;
937}
938
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200939#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
940static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
941 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100943 /* No need to flush TLB
944 * On s390 reference bits are in storage key and never in TLB
945 * With virtualization we handle the reference bit, without we
946 * we can simply return */
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100947 return ptep_test_and_clear_young(vma, address, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948}
949
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200950static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
951{
952 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200953#ifndef CONFIG_64BIT
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100954 /* pto must point to the start of the segment table */
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200955 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
956#else
957 /* ipte in zarch mode can do the math */
958 pte_t *pto = ptep;
959#endif
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200960 asm volatile(
961 " ipte %2,%3"
962 : "=m" (*ptep) : "m" (*ptep),
963 "a" (pto), "a" (address));
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200964 }
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200965}
966
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200967/*
968 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
969 * both clear the TLB for the unmapped pte. The reason is that
970 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
971 * to modify an active pte. The sequence is
972 * 1) ptep_get_and_clear
973 * 2) set_pte_at
974 * 3) flush_tlb_range
975 * On s390 the tlb needs to get flushed with the modification of the pte
976 * if the pte is active. The only way how this can be implemented is to
977 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
978 * is a nop.
979 */
980#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200981static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
982 unsigned long address, pte_t *ptep)
983{
984 pgste_t pgste;
985 pte_t pte;
986
987 mm->context.flush_mm = 1;
988 if (mm_has_pgste(mm))
989 pgste = pgste_get_lock(ptep);
990
991 pte = *ptep;
992 if (!mm_exclusive(mm))
993 __ptep_ipte(address, ptep);
994 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
995
996 if (mm_has_pgste(mm)) {
997 pgste = pgste_update_all(&pte, pgste);
998 pgste_set_unlock(ptep, pgste);
999 }
1000 return pte;
1001}
1002
1003#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1004static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1005 unsigned long address,
1006 pte_t *ptep)
1007{
1008 pte_t pte;
1009
1010 mm->context.flush_mm = 1;
1011 if (mm_has_pgste(mm))
1012 pgste_get_lock(ptep);
1013
1014 pte = *ptep;
1015 if (!mm_exclusive(mm))
1016 __ptep_ipte(address, ptep);
1017 return pte;
1018}
1019
1020static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1021 unsigned long address,
1022 pte_t *ptep, pte_t pte)
1023{
1024 *ptep = pte;
1025 if (mm_has_pgste(mm))
1026 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
1027}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001028
1029#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -07001030static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1031 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001033 pgste_t pgste;
1034 pte_t pte;
1035
1036 if (mm_has_pgste(vma->vm_mm))
1037 pgste = pgste_get_lock(ptep);
1038
1039 pte = *ptep;
1040 __ptep_ipte(address, ptep);
1041 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1042
1043 if (mm_has_pgste(vma->vm_mm)) {
1044 pgste = pgste_update_all(&pte, pgste);
1045 pgste_set_unlock(ptep, pgste);
1046 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 return pte;
1048}
1049
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001050/*
1051 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1052 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1053 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1054 * cannot be accessed while the batched unmap is running. In this case
1055 * full==1 and a simple pte_clear is enough. See tlb.h.
1056 */
1057#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1058static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001059 unsigned long address,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001060 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001062 pgste_t pgste;
1063 pte_t pte;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001064
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001065 if (mm_has_pgste(mm))
1066 pgste = pgste_get_lock(ptep);
1067
1068 pte = *ptep;
1069 if (!full)
1070 __ptep_ipte(address, ptep);
1071 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1072
1073 if (mm_has_pgste(mm)) {
1074 pgste = pgste_update_all(&pte, pgste);
1075 pgste_set_unlock(ptep, pgste);
1076 }
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001077 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078}
1079
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001080#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001081static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1082 unsigned long address, pte_t *ptep)
1083{
1084 pgste_t pgste;
1085 pte_t pte = *ptep;
1086
1087 if (pte_write(pte)) {
1088 mm->context.flush_mm = 1;
1089 if (mm_has_pgste(mm))
1090 pgste = pgste_get_lock(ptep);
1091
1092 if (!mm_exclusive(mm))
1093 __ptep_ipte(address, ptep);
1094 *ptep = pte_wrprotect(pte);
1095
1096 if (mm_has_pgste(mm))
1097 pgste_set_unlock(ptep, pgste);
1098 }
1099 return pte;
1100}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001101
1102#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001103static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1104 unsigned long address, pte_t *ptep,
1105 pte_t entry, int dirty)
1106{
1107 pgste_t pgste;
1108
1109 if (pte_same(*ptep, entry))
1110 return 0;
1111 if (mm_has_pgste(vma->vm_mm))
1112 pgste = pgste_get_lock(ptep);
1113
1114 __ptep_ipte(address, ptep);
1115 *ptep = entry;
1116
1117 if (mm_has_pgste(vma->vm_mm))
1118 pgste_set_unlock(ptep, pgste);
1119 return 1;
1120}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 * Conversion functions: convert a page and protection to a page entry,
1124 * and a page entry and page directory to the page they refer to.
1125 */
1126static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1127{
1128 pte_t __pte;
1129 pte_val(__pte) = physpage + pgprot_val(pgprot);
1130 return __pte;
1131}
1132
Heiko Carstens2dcea572006-09-29 01:58:41 -07001133static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1134{
Heiko Carstens0b2b6e12006-10-04 20:02:23 +02001135 unsigned long physpage = page_to_phys(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136
Heiko Carstens2dcea572006-09-29 01:58:41 -07001137 return mk_pte_phys(physpage, pgprot);
1138}
1139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001141#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1142#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1143#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001145#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1147
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001148#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001150#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1151#define pud_deref(pmd) ({ BUG(); 0UL; })
1152#define pgd_deref(pmd) ({ BUG(); 0UL; })
1153
1154#define pud_offset(pgd, address) ((pud_t *) pgd)
1155#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001157#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001159#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1160#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001161#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001162
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001163static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1164{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001165 pud_t *pud = (pud_t *) pgd;
1166 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1167 pud = (pud_t *) pgd_deref(*pgd);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001168 return pud + pud_index(address);
1169}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001170
1171static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1172{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001173 pmd_t *pmd = (pmd_t *) pud;
1174 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1175 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001176 return pmd + pmd_index(address);
1177}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001179#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001181#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1182#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1183#define pte_page(x) pfn_to_page(pte_pfn(x))
1184
1185#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1186
1187/* Find an entry in the lowest level page table.. */
1188#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1189#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001193static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1194{
1195 unsigned long sto = (unsigned long) pmdp -
1196 pmd_index(address) * sizeof(pmd_t);
1197
1198 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
1199 asm volatile(
1200 " .insn rrf,0xb98e0000,%2,%3,0,0"
1201 : "=m" (*pmdp)
1202 : "m" (*pmdp), "a" (sto),
1203 "a" ((address & HPAGE_MASK))
1204 : "cc"
1205 );
1206 }
1207}
1208
Gerald Schaefer75077af2012-10-08 16:30:15 -07001209#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Gerald Schaefer9501d092012-10-08 16:30:18 -07001210#define __HAVE_ARCH_PGTABLE_DEPOSIT
1211extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
1212
1213#define __HAVE_ARCH_PGTABLE_WITHDRAW
1214extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
1215
Gerald Schaefer75077af2012-10-08 16:30:15 -07001216static inline int pmd_trans_splitting(pmd_t pmd)
1217{
1218 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1219}
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001220
1221static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1222 pmd_t *pmdp, pmd_t entry)
1223{
1224 *pmdp = entry;
1225}
1226
1227static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1228{
1229 unsigned long pgprot_pmd = 0;
1230
1231 if (pgprot_val(pgprot) & _PAGE_INVALID) {
1232 if (pgprot_val(pgprot) & _PAGE_SWT)
1233 pgprot_pmd |= _HPAGE_TYPE_NONE;
1234 pgprot_pmd |= _SEGMENT_ENTRY_INV;
1235 }
1236 if (pgprot_val(pgprot) & _PAGE_RO)
1237 pgprot_pmd |= _SEGMENT_ENTRY_RO;
1238 return pgprot_pmd;
1239}
1240
1241static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1242{
1243 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1244 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1245 return pmd;
1246}
1247
1248static inline pmd_t pmd_mkhuge(pmd_t pmd)
1249{
1250 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1251 return pmd;
1252}
1253
1254static inline pmd_t pmd_mkwrite(pmd_t pmd)
1255{
1256 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
1257 return pmd;
1258}
1259
1260static inline pmd_t pmd_wrprotect(pmd_t pmd)
1261{
1262 pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
1263 return pmd;
1264}
1265
1266static inline pmd_t pmd_mkdirty(pmd_t pmd)
1267{
1268 /* No dirty bit in the segment table entry. */
1269 return pmd;
1270}
1271
1272static inline pmd_t pmd_mkold(pmd_t pmd)
1273{
1274 /* No referenced bit in the segment table entry. */
1275 return pmd;
1276}
1277
1278static inline pmd_t pmd_mkyoung(pmd_t pmd)
1279{
1280 /* No referenced bit in the segment table entry. */
1281 return pmd;
1282}
1283
1284#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1285static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1286 unsigned long address, pmd_t *pmdp)
1287{
1288 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
1289 long tmp, rc;
1290 int counter;
1291
1292 rc = 0;
1293 if (MACHINE_HAS_RRBM) {
1294 counter = PTRS_PER_PTE >> 6;
1295 asm volatile(
1296 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1297 " ogr %1,%0\n"
1298 " la %3,0(%4,%3)\n"
1299 " brct %2,0b\n"
1300 : "=&d" (tmp), "+&d" (rc), "+d" (counter),
1301 "+a" (pmd_addr)
1302 : "a" (64 * 4096UL) : "cc");
1303 rc = !!rc;
1304 } else {
1305 counter = PTRS_PER_PTE;
1306 asm volatile(
1307 "0: rrbe 0,%2\n"
1308 " la %2,0(%3,%2)\n"
1309 " brc 12,1f\n"
1310 " lhi %0,1\n"
1311 "1: brct %1,0b\n"
1312 : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1313 : "a" (4096UL) : "cc");
1314 }
1315 return rc;
1316}
1317
1318#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1319static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1320 unsigned long address, pmd_t *pmdp)
1321{
1322 pmd_t pmd = *pmdp;
1323
1324 __pmd_idte(address, pmdp);
1325 pmd_clear(pmdp);
1326 return pmd;
1327}
1328
1329#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1330static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1331 unsigned long address, pmd_t *pmdp)
1332{
1333 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1334}
1335
1336#define __HAVE_ARCH_PMDP_INVALIDATE
1337static inline void pmdp_invalidate(struct vm_area_struct *vma,
1338 unsigned long address, pmd_t *pmdp)
1339{
1340 __pmd_idte(address, pmdp);
1341}
1342
1343static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1344{
1345 pmd_t __pmd;
1346 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1347 return __pmd;
1348}
1349
1350#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1351#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1352
1353static inline int pmd_trans_huge(pmd_t pmd)
1354{
1355 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1356}
1357
1358static inline int has_transparent_hugepage(void)
1359{
1360 return MACHINE_HAS_HPAGE ? 1 : 0;
1361}
1362
1363static inline unsigned long pmd_pfn(pmd_t pmd)
1364{
1365 if (pmd_trans_huge(pmd))
1366 return pmd_val(pmd) >> HPAGE_SHIFT;
1367 else
1368 return pmd_val(pmd) >> PAGE_SHIFT;
1369}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001370#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372/*
1373 * 31 bit swap entry format:
1374 * A page-table entry has some bits we have to treat in a special way.
1375 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1376 * exception will occur instead of a page translation exception. The
1377 * specifiation exception has the bad habit not to store necessary
1378 * information in the lowcore.
1379 * Bit 21 and bit 22 are the page invalid bit and the page protection
1380 * bit. We set both to indicate a swapped page.
1381 * Bit 30 and 31 are used to distinguish the different page types. For
1382 * a swapped page these bits need to be zero.
1383 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1384 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1385 * plus 24 for the offset.
1386 * 0| offset |0110|o|type |00|
1387 * 0 0000000001111111111 2222 2 22222 33
1388 * 0 1234567890123456789 0123 4 56789 01
1389 *
1390 * 64 bit swap entry format:
1391 * A page-table entry has some bits we have to treat in a special way.
1392 * Bits 52 and bit 55 have to be zero, otherwise an specification
1393 * exception will occur instead of a page translation exception. The
1394 * specifiation exception has the bad habit not to store necessary
1395 * information in the lowcore.
1396 * Bit 53 and bit 54 are the page invalid bit and the page protection
1397 * bit. We set both to indicate a swapped page.
1398 * Bit 62 and 63 are used to distinguish the different page types. For
1399 * a swapped page these bits need to be zero.
1400 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1401 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1402 * plus 56 for the offset.
1403 * | offset |0110|o|type |00|
1404 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1405 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1406 */
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001407#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408#define __SWP_OFFSET_MASK (~0UL >> 12)
1409#else
1410#define __SWP_OFFSET_MASK (~0UL >> 11)
1411#endif
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001412static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413{
1414 pte_t pte;
1415 offset &= __SWP_OFFSET_MASK;
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001416 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1418 return pte;
1419}
1420
1421#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1422#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1423#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1424
1425#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1426#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1427
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001428#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429# define PTE_FILE_MAX_BITS 26
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001430#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431# define PTE_FILE_MAX_BITS 59
Heiko Carstensf4815ac2012-05-23 16:24:51 +02001432#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
1434#define pte_to_pgoff(__pte) \
1435 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1436
1437#define pgoff_to_pte(__off) \
1438 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
Gerald Schaefer9282ed92006-09-20 15:59:37 +02001439 | _PAGE_TYPE_FILE })
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441#endif /* !__ASSEMBLY__ */
1442
1443#define kern_addr_valid(addr) (1)
1444
Heiko Carstens17f34582008-04-30 13:38:47 +02001445extern int vmem_add_mapping(unsigned long start, unsigned long size);
1446extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001447extern int s390_enable_sie(void);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449/*
1450 * No page table caches to initialise
1451 */
1452#define pgtable_cache_init() do { } while (0)
1453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454#include <asm-generic/pgtable.h>
1455
1456#endif /* _S390_PAGE_H */