blob: fc642399b489d896f2b4494cc57456ab7bd2ddb9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999, 2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +020015 * The Linux memory management assumes a three-level page table setup.
16 * For s390 64 bit we use up to four of the five levels the hardware
17 * provides (region first tables are not used).
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 *
19 * The "pgd_xxx()" functions are trivial for a folded two-level
20 * setup: the pgd is never bad, and a pmd always exists (as it's folded
21 * into the pgd entry)
22 *
23 * This file contains the functions and defines necessary to modify and use
24 * the S390 page table tree.
25 */
26#ifndef __ASSEMBLY__
Heiko Carstens9789db02008-07-14 09:59:11 +020027#include <linux/sched.h>
Heiko Carstens2dcea572006-09-29 01:58:41 -070028#include <linux/mm_types.h>
Martin Schwidefskyabf09be2012-11-07 13:17:37 +010029#include <linux/page-flags.h>
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020030#include <linux/radix-tree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/bug.h>
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +020032#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
35extern void paging_init(void);
Heiko Carstens2b67fc42007-02-05 21:16:47 +010036extern void vmem_map_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38/*
39 * The S390 doesn't have any external MMU info: the kernel page
40 * tables contain all the necessary information.
41 */
Russell King4b3073e2009-12-18 16:40:18 +000042#define update_mmu_cache(vma, address, ptep) do { } while (0)
David Millerb113da62012-10-08 16:34:25 -070043#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/*
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020046 * ZERO_PAGE is a global shared page that is always zero; used
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 * for zero-mapped memory areas etc..
48 */
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020049
50extern unsigned long empty_zero_page;
51extern unsigned long zero_page_mask;
52
53#define ZERO_PAGE(vaddr) \
54 (virt_to_page((void *)(empty_zero_page + \
55 (((unsigned long)(vaddr)) &zero_page_mask))))
Kirill A. Shutemov816422a2012-12-12 13:52:36 -080056#define __HAVE_COLOR_ZERO_PAGE
Martin Schwidefsky238ec4e2010-10-25 16:10:07 +020057
Linus Torvalds4f2e2902013-04-17 08:46:19 -070058/* TODO: s390 cannot support io_remap_pfn_range... */
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#endif /* !__ASSEMBLY__ */
60
61/*
62 * PMD_SHIFT determines the size of the area a second-level page
63 * table can map
64 * PGDIR_SHIFT determines what a third-level page table entry can map
65 */
Heiko Carstens5a798592015-02-12 13:08:27 +010066#define PMD_SHIFT 20
67#define PUD_SHIFT 31
68#define PGDIR_SHIFT 42
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70#define PMD_SIZE (1UL << PMD_SHIFT)
71#define PMD_MASK (~(PMD_SIZE-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020072#define PUD_SIZE (1UL << PUD_SHIFT)
73#define PUD_MASK (~(PUD_SIZE-1))
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010074#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
75#define PGDIR_MASK (~(PGDIR_SIZE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/*
78 * entries per page directory level: the S390 is two-level, so
79 * we don't really have any PMD directory physically.
80 * for S390 segment-table entries are combined to one PGD
81 * that leads to 1024 pte per pgd
82 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010083#define PTRS_PER_PTE 256
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010084#define PTRS_PER_PMD 2048
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010085#define PTRS_PER_PUD 2048
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010086#define PTRS_PER_PGD 2048
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Kirill A. Shutemovd016bf72015-02-11 15:26:41 -080088#define FIRST_USER_ADDRESS 0UL
Hugh Dickinsd455a362005-04-19 13:29:23 -070089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#define pte_ERROR(e) \
91 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
92#define pmd_ERROR(e) \
93 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020094#define pud_ERROR(e) \
95 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
Linus Torvalds1da177e2005-04-16 15:20:36 -070096#define pgd_ERROR(e) \
97 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
98
99#ifndef __ASSEMBLY__
100/*
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200101 * The vmalloc and module area will always be on the topmost area of the
102 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
Heiko Carstensc972cc62012-10-05 16:52:18 +0200103 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
104 * modules will reside. That makes sure that inter module branches always
105 * happen without trampolines and in addition the placement within a 2GB frame
106 * is branch prediction unit friendly.
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100107 */
Heiko Carstens239a64252009-06-12 10:26:33 +0200108extern unsigned long VMALLOC_START;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100109extern unsigned long VMALLOC_END;
110extern struct page *vmemmap;
Heiko Carstens239a64252009-06-12 10:26:33 +0200111
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100112#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100113
Heiko Carstensc972cc62012-10-05 16:52:18 +0200114extern unsigned long MODULES_VADDR;
115extern unsigned long MODULES_END;
116#define MODULES_VADDR MODULES_VADDR
117#define MODULES_END MODULES_END
118#define MODULES_LEN (1UL << 31)
Heiko Carstensc972cc62012-10-05 16:52:18 +0200119
Heiko Carstensc9331462014-10-15 12:17:38 +0200120static inline int is_module_addr(void *addr)
121{
Heiko Carstensc9331462014-10-15 12:17:38 +0200122 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
123 if (addr < (void *)MODULES_VADDR)
124 return 0;
125 if (addr > (void *)MODULES_END)
126 return 0;
Heiko Carstensc9331462014-10-15 12:17:38 +0200127 return 1;
128}
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 * A 64 bit pagetable entry of S390 has following format:
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100132 * | PFRA |0IPC| OS |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 * 0000000000111111111122222222223333333333444444444455555555556666
134 * 0123456789012345678901234567890123456789012345678901234567890123
135 *
136 * I Page-Invalid Bit: Page is not available for address-translation
137 * P Page-Protection Bit: Store access not possible for page
Christian Borntraeger6a985c62009-12-07 12:52:11 +0100138 * C Change-bit override: HW is not required to set change bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 *
140 * A 64 bit segmenttable entry of S390 has following format:
141 * | P-table origin | TT
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
144 *
145 * I Segment-Invalid Bit: Segment is not available for address-translation
146 * C Common-Segment Bit: Segment is not private (PoP 3-30)
147 * P Page-Protection Bit: Store access not possible for page
148 * TT Type 00
149 *
150 * A 64 bit region table entry of S390 has following format:
151 * | S-table origin | TF TTTL
152 * 0000000000111111111122222222223333333333444444444455555555556666
153 * 0123456789012345678901234567890123456789012345678901234567890123
154 *
155 * I Segment-Invalid Bit: Segment is not available for address-translation
156 * TT Type 01
157 * TF
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200158 * TL Table length
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
160 * The 64 bit regiontable origin of S390 has following format:
161 * | region table origon | DTTL
162 * 0000000000111111111122222222223333333333444444444455555555556666
163 * 0123456789012345678901234567890123456789012345678901234567890123
164 *
165 * X Space-Switch event:
166 * G Segment-Invalid Bit:
167 * P Private-Space Bit:
168 * S Storage-Alteration:
169 * R Real space
170 * TL Table-Length:
171 *
172 * A storage key has the following format:
173 * | ACC |F|R|C|0|
174 * 0 3 4 5 6 7
175 * ACC: access key
176 * F : fetch protection bit
177 * R : referenced bit
178 * C : changed bit
179 */
180
181/* Hardware bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200182#define _PAGE_PROTECT 0x200 /* HW read-only bit */
Martin Schwidefsky83377482006-10-18 18:30:51 +0200183#define _PAGE_INVALID 0x400 /* HW invalid bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200184#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200185
186/* Software bits in the page table entry */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200187#define _PAGE_PRESENT 0x001 /* SW pte present bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200188#define _PAGE_YOUNG 0x004 /* SW pte young bit */
189#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200190#define _PAGE_READ 0x010 /* SW pte read bit */
191#define _PAGE_WRITE 0x020 /* SW pte write bit */
192#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200193#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
Nick Piggina08cb622008-04-28 02:13:03 -0700194#define __HAVE_ARCH_PTE_SPECIAL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Nick Piggin138c9022008-07-08 11:31:06 +0200196/* Set of bits not changed in pte_modify */
Heiko Carstens6a5c1482014-09-22 08:50:51 +0200197#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
198 _PAGE_YOUNG)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Martin Schwidefsky83377482006-10-18 18:30:51 +0200200/*
Kirill A. Shutemov6e76d4b2015-02-10 14:11:04 -0800201 * handle_pte_fault uses pte_present and pte_none to find out the pte type
202 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
203 * distinguish present from not-present ptes. It is changed only with the page
204 * table lock held.
Martin Schwidefsky83377482006-10-18 18:30:51 +0200205 *
Martin Schwidefskye5098612013-07-23 20:57:57 +0200206 * The following table gives the different possible bit combinations for
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200207 * the pte hardware and software bits in the last 12 bits of a pte
208 * (. unassigned bit, x don't care, t swap type):
Martin Schwidefsky83377482006-10-18 18:30:51 +0200209 *
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200210 * 842100000000
211 * 000084210000
212 * 000000008421
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200213 * .IR.uswrdy.p
214 * empty .10.00000000
215 * swap .11..ttttt.0
216 * prot-none, clean, old .11.xx0000.1
217 * prot-none, clean, young .11.xx0001.1
218 * prot-none, dirty, old .10.xx0010.1
219 * prot-none, dirty, young .10.xx0011.1
220 * read-only, clean, old .11.xx0100.1
221 * read-only, clean, young .01.xx0101.1
222 * read-only, dirty, old .11.xx0110.1
223 * read-only, dirty, young .01.xx0111.1
224 * read-write, clean, old .11.xx1100.1
225 * read-write, clean, young .01.xx1101.1
226 * read-write, dirty, old .10.xx1110.1
227 * read-write, dirty, young .00.xx1111.1
228 * HW-bits: R read-only, I invalid
229 * SW-bits: p present, y young, d dirty, r read, w write, s special,
230 * u unused, l large
Martin Schwidefskye5098612013-07-23 20:57:57 +0200231 *
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200232 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
233 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
234 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
Martin Schwidefsky83377482006-10-18 18:30:51 +0200235 */
236
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200237/* Bits in the segment/region table address-space-control-element */
238#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
239#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
240#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
241#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
242#define _ASCE_REAL_SPACE 0x20 /* real space control */
243#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
244#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
245#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
246#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
247#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
248#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
249
250/* Bits in the region table entry */
251#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200252#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
253#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200254#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
255#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
256#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
257#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
258#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
259
260#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200261#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200262#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200263#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200264#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200265#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200266
Heiko Carstens18da2362012-10-08 09:18:26 +0200267#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
Heiko Carstens1819ed12013-02-16 11:47:27 +0100268#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
Heiko Carstens18da2362012-10-08 09:18:26 +0200269
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200270/* Bits in the segment table entry */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200271#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200272#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
Heiko Carstensea815312013-03-21 12:50:39 +0100273#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200274#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200275#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
276#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200277
278#define _SEGMENT_ENTRY (0)
Martin Schwidefskye5098612013-07-23 20:57:57 +0200279#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200280
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200281#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
282#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
283#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
284#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200285#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
286#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200287
288/*
289 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200290 * dy..R...I...wr
291 * prot-none, clean, old 00..1...1...00
292 * prot-none, clean, young 01..1...1...00
293 * prot-none, dirty, old 10..1...1...00
294 * prot-none, dirty, young 11..1...1...00
295 * read-only, clean, old 00..1...1...01
296 * read-only, clean, young 01..1...0...01
297 * read-only, dirty, old 10..1...1...01
298 * read-only, dirty, young 11..1...0...01
299 * read-write, clean, old 00..1...1...11
300 * read-write, clean, young 01..1...0...11
301 * read-write, dirty, old 10..0...1...11
302 * read-write, dirty, young 11..0...0...11
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200303 * The segment table origin is used to distinguish empty (origin==0) from
304 * read-write, old segment table entries (origin!=0)
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200305 * HW-bits: R read-only, I invalid
306 * SW-bits: y young, d dirty, r read, w write
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200307 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200308
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200309#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700310
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200311/* Page status table bits for virtualization */
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200312#define PGSTE_ACC_BITS 0xf000000000000000UL
313#define PGSTE_FP_BIT 0x0800000000000000UL
314#define PGSTE_PCL_BIT 0x0080000000000000UL
315#define PGSTE_HR_BIT 0x0040000000000000UL
316#define PGSTE_HC_BIT 0x0020000000000000UL
317#define PGSTE_GR_BIT 0x0004000000000000UL
318#define PGSTE_GC_BIT 0x0002000000000000UL
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200319#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
320#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
Martin Schwidefsky6c61cfe2011-06-06 14:14:42 +0200321
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200322/* Guest Page State used for virtualization */
323#define _PGSTE_GPS_ZERO 0x0000000080000000UL
324#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
325#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
326#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
327
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200328/*
329 * A user page table pointer has the space-switch-event bit, the
330 * private-space-control bit and the storage-alteration-event-control
331 * bit set. A kernel page table pointer doesn't need them.
332 */
333#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
334 _ASCE_ALT_EVENT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336/*
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200337 * Page protection definitions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200339#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200340#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
341 _PAGE_INVALID | _PAGE_PROTECT)
342#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
343 _PAGE_INVALID | _PAGE_PROTECT)
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200344
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200345#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
346 _PAGE_YOUNG | _PAGE_DIRTY)
347#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
348 _PAGE_YOUNG | _PAGE_DIRTY)
349#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
350 _PAGE_PROTECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352/*
Martin Schwidefsky043d0702011-05-23 10:24:23 +0200353 * On s390 the page table entry has an invalid bit and a read-only bit.
354 * Read permission implies execute permission and write permission
355 * implies read permission.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 */
357 /*xwr*/
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200358#define __P000 PAGE_NONE
Martin Schwidefskye5098612013-07-23 20:57:57 +0200359#define __P001 PAGE_READ
360#define __P010 PAGE_READ
361#define __P011 PAGE_READ
362#define __P100 PAGE_READ
363#define __P101 PAGE_READ
364#define __P110 PAGE_READ
365#define __P111 PAGE_READ
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200367#define __S000 PAGE_NONE
Martin Schwidefskye5098612013-07-23 20:57:57 +0200368#define __S001 PAGE_READ
369#define __S010 PAGE_WRITE
370#define __S011 PAGE_WRITE
371#define __S100 PAGE_READ
372#define __S101 PAGE_READ
373#define __S110 PAGE_WRITE
374#define __S111 PAGE_WRITE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Gerald Schaefer106c9922013-04-29 15:07:23 -0700376/*
377 * Segment entry (large page) protection definitions.
378 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200379#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
380 _SEGMENT_ENTRY_PROTECT)
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200381#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
382 _SEGMENT_ENTRY_READ)
383#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
384 _SEGMENT_ENTRY_WRITE)
Gerald Schaefer106c9922013-04-29 15:07:23 -0700385
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200386static inline int mm_has_pgste(struct mm_struct *mm)
387{
388#ifdef CONFIG_PGSTE
389 if (unlikely(mm->context.has_pgste))
390 return 1;
391#endif
392 return 0;
393}
Dominik Dingel65eef3352014-01-14 15:02:11 +0100394
Martin Schwidefsky0b46e0a2015-04-15 13:23:26 +0200395static inline int mm_alloc_pgste(struct mm_struct *mm)
396{
397#ifdef CONFIG_PGSTE
398 if (unlikely(mm->context.alloc_pgste))
399 return 1;
400#endif
401 return 0;
402}
403
Dominik Dingel2faee8f2014-10-23 12:08:38 +0200404/*
405 * In the case that a guest uses storage keys
406 * faults should no longer be backed by zero pages
407 */
408#define mm_forbids_zeropage mm_use_skey
Dominik Dingel65eef3352014-01-14 15:02:11 +0100409static inline int mm_use_skey(struct mm_struct *mm)
410{
411#ifdef CONFIG_PGSTE
412 if (mm->context.use_skey)
413 return 1;
414#endif
415 return 0;
416}
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/*
419 * pgd/pmd/pte query functions
420 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100421static inline int pgd_present(pgd_t pgd)
422{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100423 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
424 return 1;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100425 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
426}
427
428static inline int pgd_none(pgd_t pgd)
429{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100430 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
431 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200432 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100433}
434
435static inline int pgd_bad(pgd_t pgd)
436{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100437 /*
438 * With dynamic page table levels the pgd can be a region table
439 * entry or a segment table entry. Check for the bit that are
440 * invalid for either table entry.
441 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100442 unsigned long mask =
Martin Schwidefskye5098612013-07-23 20:57:57 +0200443 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100444 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
445 return (pgd_val(pgd) & mask) != 0;
446}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200447
448static inline int pud_present(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100450 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
451 return 1;
Martin Schwidefsky0d017922007-12-17 16:25:48 +0100452 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
454
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200455static inline int pud_none(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100457 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
458 return 0;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200459 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460}
461
Heiko Carstens18da2362012-10-08 09:18:26 +0200462static inline int pud_large(pud_t pud)
463{
464 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
465 return 0;
466 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
467}
468
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200469static inline int pud_bad(pud_t pud)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100471 /*
472 * With dynamic page table levels the pud can be a region table
473 * entry or a segment table entry. Check for the bit that are
474 * invalid for either table entry.
475 */
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100476 unsigned long mask =
Martin Schwidefskye5098612013-07-23 20:57:57 +0200477 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100478 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
479 return (pud_val(pud) & mask) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
481
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800482static inline int pmd_present(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200484 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485}
486
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800487static inline int pmd_none(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200489 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490}
491
Heiko Carstens378b1e72012-10-01 12:58:34 +0200492static inline int pmd_large(pmd_t pmd)
493{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200494 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200495}
496
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200497static inline int pmd_pfn(pmd_t pmd)
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200498{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200499 unsigned long origin_mask;
500
501 origin_mask = _SEGMENT_ENTRY_ORIGIN;
502 if (pmd_large(pmd))
503 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
504 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200505}
506
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800507static inline int pmd_bad(pmd_t pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200509 if (pmd_large(pmd))
510 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200511 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512}
513
Gerald Schaefer75077af2012-10-08 16:30:15 -0700514#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
515extern void pmdp_splitting_flush(struct vm_area_struct *vma,
516 unsigned long addr, pmd_t *pmdp);
517
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700518#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
519extern int pmdp_set_access_flags(struct vm_area_struct *vma,
520 unsigned long address, pmd_t *pmdp,
521 pmd_t entry, int dirty);
522
523#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
524extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
525 unsigned long address, pmd_t *pmdp);
526
527#define __HAVE_ARCH_PMD_WRITE
528static inline int pmd_write(pmd_t pmd)
529{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200530 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
531}
532
533static inline int pmd_dirty(pmd_t pmd)
534{
535 int dirty = 1;
536 if (pmd_large(pmd))
537 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
538 return dirty;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700539}
540
541static inline int pmd_young(pmd_t pmd)
542{
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200543 int young = 1;
544 if (pmd_large(pmd))
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200545 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200546 return young;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -0700547}
548
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800549static inline int pte_present(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200551 /* Bit pattern: (pte & 0x001) == 0x001 */
552 return (pte_val(pte) & _PAGE_PRESENT) != 0;
553}
554
555static inline int pte_none(pte_t pte)
556{
557 /* Bit pattern: pte == 0x400 */
558 return pte_val(pte) == _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559}
560
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200561static inline int pte_swap(pte_t pte)
562{
Martin Schwidefskya1c843b2015-04-22 13:55:59 +0200563 /* Bit pattern: (pte & 0x201) == 0x200 */
564 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
565 == _PAGE_PROTECT;
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200566}
567
Nick Piggin7e675132008-04-28 02:13:00 -0700568static inline int pte_special(pte_t pte)
569{
Nick Piggina08cb622008-04-28 02:13:03 -0700570 return (pte_val(pte) & _PAGE_SPECIAL);
Nick Piggin7e675132008-04-28 02:13:00 -0700571}
572
Martin Schwidefskyba8a9222007-10-22 12:52:44 +0200573#define __HAVE_ARCH_PTE_SAME
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200574static inline int pte_same(pte_t a, pte_t b)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100575{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200576 return pte_val(a) == pte_val(b);
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100577}
578
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200579static inline pgste_t pgste_get_lock(pte_t *ptep)
580{
581 unsigned long new = 0;
582#ifdef CONFIG_PGSTE
583 unsigned long old;
584
585 preempt_disable();
586 asm(
587 " lg %0,%2\n"
588 "0: lgr %1,%0\n"
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200589 " nihh %0,0xff7f\n" /* clear PCL bit in old */
590 " oihh %1,0x0080\n" /* set PCL bit in new */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200591 " csg %0,%1,%2\n"
592 " jl 0b\n"
593 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
Christian Borntraegera8f6e7f2013-06-05 09:25:34 +0200594 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200595#endif
596 return __pgste(new);
597}
598
599static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100600{
601#ifdef CONFIG_PGSTE
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200602 asm(
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200603 " nihh %1,0xff7f\n" /* clear PCL bit */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200604 " stg %1,%0\n"
605 : "=Q" (ptep[PTRS_PER_PTE])
Christian Borntraegera8f6e7f2013-06-05 09:25:34 +0200606 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
607 : "cc", "memory");
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100608 preempt_enable();
609#endif
610}
611
Martin Schwidefskyd56c8932013-07-19 11:15:54 +0200612static inline pgste_t pgste_get(pte_t *ptep)
613{
614 unsigned long pgste = 0;
615#ifdef CONFIG_PGSTE
616 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
617#endif
618 return __pgste(pgste);
619}
620
Christian Borntraeger3a826032013-06-05 09:22:33 +0200621static inline void pgste_set(pte_t *ptep, pgste_t pgste)
622{
623#ifdef CONFIG_PGSTE
624 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
625#endif
626}
627
Dominik Dingel65eef3352014-01-14 15:02:11 +0100628static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste,
629 struct mm_struct *mm)
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100630{
631#ifdef CONFIG_PGSTE
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200632 unsigned long address, bits, skey;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100633
Dominik Dingel65eef3352014-01-14 15:02:11 +0100634 if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID)
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100635 return pgste;
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200636 address = pte_val(*ptep) & PAGE_MASK;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200637 skey = (unsigned long) page_get_storage_key(address);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200638 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200639 /* Transfer page changed & referenced bit to guest bits in pgste */
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200640 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200641 /* Copy page access key and fetch protection bit to pgste */
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200642 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
643 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100644#endif
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200645 return pgste;
646
647}
648
Dominik Dingel65eef3352014-01-14 15:02:11 +0100649static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
650 struct mm_struct *mm)
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200651{
652#ifdef CONFIG_PGSTE
Heiko Carstensa43a9d92011-05-29 12:40:50 +0200653 unsigned long address;
Christian Borntraeger338679f2013-06-04 09:53:59 +0200654 unsigned long nkey;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200655
Dominik Dingel65eef3352014-01-14 15:02:11 +0100656 if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID)
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100657 return;
Christian Borntraeger338679f2013-06-04 09:53:59 +0200658 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
Martin Schwidefsky09b53882011-11-14 11:19:00 +0100659 address = pte_val(entry) & PAGE_MASK;
Christian Borntraeger338679f2013-06-04 09:53:59 +0200660 /*
661 * Set page access key and fetch protection bit from pgste.
662 * The guest C/R information is still in the PGSTE, set real
663 * key C/R to 0.
664 */
Linus Torvaldsfe489bf2013-07-03 13:21:40 -0700665 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200666 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
Christian Borntraeger338679f2013-06-04 09:53:59 +0200667 page_set_storage_key(address, nkey, 0);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200668#endif
669}
670
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200671static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100672{
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200673 if ((pte_val(entry) & _PAGE_PRESENT) &&
674 (pte_val(entry) & _PAGE_WRITE) &&
675 !(pte_val(entry) & _PAGE_INVALID)) {
676 if (!MACHINE_HAS_ESOP) {
677 /*
678 * Without enhanced suppression-on-protection force
679 * the dirty bit on for all writable ptes.
680 */
681 pte_val(entry) |= _PAGE_DIRTY;
682 pte_val(entry) &= ~_PAGE_PROTECT;
683 }
684 if (!(pte_val(entry) & _PAGE_PROTECT))
685 /* This pte allows write access, set user-dirty */
686 pgste_val(pgste) |= PGSTE_UC_BIT;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100687 }
688 *ptep = entry;
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200689 return pgste;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100690}
691
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200692/**
693 * struct gmap_struct - guest address space
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200694 * @crst_list: list of all crst tables used in the guest address space
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200695 * @mm: pointer to the parent mm_struct
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200696 * @guest_to_host: radix tree with guest to host address translation
697 * @host_to_guest: radix tree with pointer to segment table entries
698 * @guest_table_lock: spinlock to protect all entries in the guest page table
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200699 * @table: pointer to the page directory
Christian Borntraeger480e5922011-09-20 17:07:28 +0200700 * @asce: address space control element for gmap page table
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200701 * @pfault_enabled: defines if pfaults are applicable for the guest
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200702 */
703struct gmap {
704 struct list_head list;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200705 struct list_head crst_list;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200706 struct mm_struct *mm;
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200707 struct radix_tree_root guest_to_host;
708 struct radix_tree_root host_to_guest;
709 spinlock_t guest_table_lock;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200710 unsigned long *table;
Christian Borntraeger480e5922011-09-20 17:07:28 +0200711 unsigned long asce;
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200712 unsigned long asce_end;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200713 void *private;
Dominik Dingel24eb3a82013-06-17 16:25:18 +0200714 bool pfault_enabled;
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200715};
716
717/**
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200718 * struct gmap_notifier - notify function block for page invalidation
719 * @notifier_call: address of callback function
720 */
721struct gmap_notifier {
722 struct list_head list;
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200723 void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200724};
725
Martin Schwidefskyc6c956b2014-07-01 14:36:04 +0200726struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200727void gmap_free(struct gmap *gmap);
728void gmap_enable(struct gmap *gmap);
729void gmap_disable(struct gmap *gmap);
730int gmap_map_segment(struct gmap *gmap, unsigned long from,
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200731 unsigned long to, unsigned long len);
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200732int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200733unsigned long __gmap_translate(struct gmap *, unsigned long gaddr);
734unsigned long gmap_translate(struct gmap *, unsigned long gaddr);
Martin Schwidefsky527e30b2014-04-30 16:04:25 +0200735int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr);
736int gmap_fault(struct gmap *, unsigned long gaddr, unsigned int fault_flags);
Martin Schwidefsky6e0a0432014-04-29 09:34:41 +0200737void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
738void __gmap_zap(struct gmap *, unsigned long gaddr);
Dominik Dingela0bf4f12014-03-24 14:27:58 +0100739bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *);
740
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200741
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200742void gmap_register_ipte_notifier(struct gmap_notifier *);
743void gmap_unregister_ipte_notifier(struct gmap_notifier *);
744int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
Martin Schwidefsky9da4e382014-04-30 14:46:26 +0200745void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200746
747static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +0200748 unsigned long addr,
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200749 pte_t *ptep, pgste_t pgste)
750{
751#ifdef CONFIG_PGSTE
Martin Schwidefsky0d0dafc2013-05-17 14:41:33 +0200752 if (pgste_val(pgste) & PGSTE_IN_BIT) {
753 pgste_val(pgste) &= ~PGSTE_IN_BIT;
Martin Schwidefsky9da4e382014-04-30 14:46:26 +0200754 gmap_do_ipte_notify(mm, addr, ptep);
Martin Schwidefskyd3383632013-04-17 10:53:39 +0200755 }
756#endif
757 return pgste;
758}
759
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200760/*
761 * Certain architectures need to do special things when PTEs
762 * within a page table are directly modified. Thus, the following
763 * hook is made available.
764 */
765static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
766 pte_t *ptep, pte_t entry)
767{
768 pgste_t pgste;
769
770 if (mm_has_pgste(mm)) {
771 pgste = pgste_get_lock(ptep);
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200772 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
Dominik Dingel65eef3352014-01-14 15:02:11 +0100773 pgste_set_key(ptep, pgste, entry, mm);
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200774 pgste = pgste_set_pte(ptep, pgste, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200775 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100776 } else {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200777 *ptep = entry;
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100778 }
Christian Borntraeger5b7baf02008-03-25 18:47:12 +0100779}
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781/*
782 * query functions pte_write/pte_dirty/pte_young only work if
783 * pte_present() is true. Undefined behaviour if not..
784 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800785static inline int pte_write(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200787 return (pte_val(pte) & _PAGE_WRITE) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788}
789
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800790static inline int pte_dirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200792 return (pte_val(pte) & _PAGE_DIRTY) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793}
794
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800795static inline int pte_young(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200797 return (pte_val(pte) & _PAGE_YOUNG) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798}
799
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200800#define __HAVE_ARCH_PTE_UNUSED
801static inline int pte_unused(pte_t pte)
802{
803 return pte_val(pte) & _PAGE_UNUSED;
804}
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806/*
807 * pgd/pmd/pte modification functions
808 */
809
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200810static inline void pgd_clear(pgd_t *pgd)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100811{
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100812 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
813 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
815
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100816static inline void pud_clear(pud_t *pud)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100817{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200818 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
819 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100820}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100821
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200822static inline void pmd_clear(pmd_t *pmdp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200824 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825}
826
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800827static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200829 pte_val(*ptep) = _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830}
831
832/*
833 * The following pte modification functions only work if
834 * pte_present() is true. Undefined behaviour if not..
835 */
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800836static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
Nick Piggin138c9022008-07-08 11:31:06 +0200838 pte_val(pte) &= _PAGE_CHG_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 pte_val(pte) |= pgprot_val(newprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200840 /*
841 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
842 * invalid bit set, clear it again for readable, young pages
843 */
844 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
845 pte_val(pte) &= ~_PAGE_INVALID;
846 /*
847 * newprot for PAGE_READ and PAGE_WRITE has the page protection
848 * bit set, clear it again for writable, dirty pages
849 */
Martin Schwidefskye5098612013-07-23 20:57:57 +0200850 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
851 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 return pte;
853}
854
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800855static inline pte_t pte_wrprotect(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200857 pte_val(pte) &= ~_PAGE_WRITE;
858 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return pte;
860}
861
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800862static inline pte_t pte_mkwrite(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200864 pte_val(pte) |= _PAGE_WRITE;
865 if (pte_val(pte) & _PAGE_DIRTY)
866 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return pte;
868}
869
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800870static inline pte_t pte_mkclean(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200872 pte_val(pte) &= ~_PAGE_DIRTY;
873 pte_val(pte) |= _PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 return pte;
875}
876
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800877static inline pte_t pte_mkdirty(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200879 pte_val(pte) |= _PAGE_DIRTY;
880 if (pte_val(pte) & _PAGE_WRITE)
881 pte_val(pte) &= ~_PAGE_PROTECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 return pte;
883}
884
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800885static inline pte_t pte_mkold(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200887 pte_val(pte) &= ~_PAGE_YOUNG;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200888 pte_val(pte) |= _PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 return pte;
890}
891
Adrian Bunk4448aaf2005-11-08 21:34:42 -0800892static inline pte_t pte_mkyoung(pte_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +0200894 pte_val(pte) |= _PAGE_YOUNG;
895 if (pte_val(pte) & _PAGE_READ)
896 pte_val(pte) &= ~_PAGE_INVALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 return pte;
898}
899
Nick Piggin7e675132008-04-28 02:13:00 -0700900static inline pte_t pte_mkspecial(pte_t pte)
901{
Nick Piggina08cb622008-04-28 02:13:03 -0700902 pte_val(pte) |= _PAGE_SPECIAL;
Nick Piggin7e675132008-04-28 02:13:00 -0700903 return pte;
904}
905
Heiko Carstens84afdce2010-10-25 16:10:36 +0200906#ifdef CONFIG_HUGETLB_PAGE
907static inline pte_t pte_mkhuge(pte_t pte)
908{
Martin Schwidefskye5098612013-07-23 20:57:57 +0200909 pte_val(pte) |= _PAGE_LARGE;
Heiko Carstens84afdce2010-10-25 16:10:36 +0200910 return pte;
911}
912#endif
913
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200914static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
915{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200916 unsigned long pto = (unsigned long) ptep;
917
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200918 /* Invalidation + global TLB flush for the pte */
919 asm volatile(
920 " ipte %2,%3"
921 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
922}
923
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200924static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
925{
926 unsigned long pto = (unsigned long) ptep;
927
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200928 /* Invalidation + local TLB flush for the pte */
929 asm volatile(
930 " .insn rrf,0xb2210000,%2,%3,0,1"
931 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
932}
933
Heiko Carstenscfb0b242014-09-23 21:29:20 +0200934static inline void __ptep_ipte_range(unsigned long address, int nr, pte_t *ptep)
935{
936 unsigned long pto = (unsigned long) ptep;
937
Heiko Carstenscfb0b242014-09-23 21:29:20 +0200938 /* Invalidate a range of ptes + global TLB flush of the ptes */
939 do {
940 asm volatile(
941 " .insn rrf,0xb2210000,%2,%0,%1,0"
942 : "+a" (address), "+a" (nr) : "a" (pto) : "memory");
943 } while (nr != 255);
944}
945
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200946static inline void ptep_flush_direct(struct mm_struct *mm,
947 unsigned long address, pte_t *ptep)
948{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200949 int active, count;
950
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200951 if (pte_val(*ptep) & _PAGE_INVALID)
952 return;
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200953 active = (mm == current->active_mm) ? 1 : 0;
954 count = atomic_add_return(0x10000, &mm->context.attach_count);
955 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
956 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
957 __ptep_ipte_local(address, ptep);
958 else
959 __ptep_ipte(address, ptep);
960 atomic_sub(0x10000, &mm->context.attach_count);
Gerald Schaefer9282ed92006-09-20 15:59:37 +0200961}
962
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200963static inline void ptep_flush_lazy(struct mm_struct *mm,
964 unsigned long address, pte_t *ptep)
965{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200966 int active, count;
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200967
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200968 if (pte_val(*ptep) & _PAGE_INVALID)
969 return;
970 active = (mm == current->active_mm) ? 1 : 0;
971 count = atomic_add_return(0x10000, &mm->context.attach_count);
972 if ((count & 0xffff) <= active) {
973 pte_val(*ptep) |= _PAGE_INVALID;
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200974 mm->context.flush_mm = 1;
Martin Schwidefsky53e857f2012-09-10 13:00:09 +0200975 } else
976 __ptep_ipte(address, ptep);
977 atomic_sub(0x10000, &mm->context.attach_count);
Martin Schwidefsky5c474a12013-08-16 13:31:40 +0200978}
979
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200980/*
981 * Get (and clear) the user dirty bit for a pte.
982 */
983static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
984 unsigned long addr,
985 pte_t *ptep)
986{
987 pgste_t pgste;
988 pte_t pte;
989 int dirty;
990
991 if (!mm_has_pgste(mm))
992 return 0;
993 pgste = pgste_get_lock(ptep);
994 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
995 pgste_val(pgste) &= ~PGSTE_UC_BIT;
996 pte = *ptep;
997 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +0200998 pgste = pgste_ipte_notify(mm, addr, ptep, pgste);
Martin Schwidefsky0a61b222013-10-18 12:03:41 +0200999 __ptep_ipte(addr, ptep);
1000 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
1001 pte_val(pte) |= _PAGE_PROTECT;
1002 else
1003 pte_val(pte) |= _PAGE_INVALID;
1004 *ptep = pte;
1005 }
1006 pgste_set_unlock(ptep, pgste);
1007 return dirty;
1008}
1009
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001010#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1011static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1012 unsigned long addr, pte_t *ptep)
1013{
1014 pgste_t pgste;
Christian Borntraeger3e03d4c2014-08-28 21:21:41 +02001015 pte_t pte, oldpte;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001016 int young;
1017
1018 if (mm_has_pgste(vma->vm_mm)) {
1019 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001020 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001021 }
1022
Christian Borntraeger3e03d4c2014-08-28 21:21:41 +02001023 oldpte = pte = *ptep;
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001024 ptep_flush_direct(vma->vm_mm, addr, ptep);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001025 young = pte_young(pte);
1026 pte = pte_mkold(pte);
1027
1028 if (mm_has_pgste(vma->vm_mm)) {
Christian Borntraeger3e03d4c2014-08-28 21:21:41 +02001029 pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm);
Martin Schwidefsky0a61b222013-10-18 12:03:41 +02001030 pgste = pgste_set_pte(ptep, pgste, pte);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001031 pgste_set_unlock(ptep, pgste);
1032 } else
1033 *ptep = pte;
1034
1035 return young;
1036}
1037
1038#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1039static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1040 unsigned long address, pte_t *ptep)
1041{
1042 return ptep_test_and_clear_young(vma, address, ptep);
1043}
1044
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001045/*
1046 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1047 * both clear the TLB for the unmapped pte. The reason is that
1048 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1049 * to modify an active pte. The sequence is
1050 * 1) ptep_get_and_clear
1051 * 2) set_pte_at
1052 * 3) flush_tlb_range
1053 * On s390 the tlb needs to get flushed with the modification of the pte
1054 * if the pte is active. The only way how this can be implemented is to
1055 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1056 * is a nop.
1057 */
1058#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001059static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1060 unsigned long address, pte_t *ptep)
1061{
1062 pgste_t pgste;
1063 pte_t pte;
1064
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001065 if (mm_has_pgste(mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001066 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001067 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001068 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001069
1070 pte = *ptep;
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001071 ptep_flush_lazy(mm, address, ptep);
Martin Schwidefskye5098612013-07-23 20:57:57 +02001072 pte_val(*ptep) = _PAGE_INVALID;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001073
1074 if (mm_has_pgste(mm)) {
Dominik Dingel65eef3352014-01-14 15:02:11 +01001075 pgste = pgste_update_all(&pte, pgste, mm);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001076 pgste_set_unlock(ptep, pgste);
1077 }
1078 return pte;
1079}
1080
1081#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1082static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1083 unsigned long address,
1084 pte_t *ptep)
1085{
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001086 pgste_t pgste;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001087 pte_t pte;
1088
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001089 if (mm_has_pgste(mm)) {
1090 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001091 pgste_ipte_notify(mm, address, ptep, pgste);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001092 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001093
1094 pte = *ptep;
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001095 ptep_flush_lazy(mm, address, ptep);
Christian Borntraegerb56433c2013-05-27 16:19:55 +02001096
Christian Borntraeger3a826032013-06-05 09:22:33 +02001097 if (mm_has_pgste(mm)) {
Dominik Dingel65eef3352014-01-14 15:02:11 +01001098 pgste = pgste_update_all(&pte, pgste, mm);
Christian Borntraeger3a826032013-06-05 09:22:33 +02001099 pgste_set(ptep, pgste);
1100 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001101 return pte;
1102}
1103
1104static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1105 unsigned long address,
1106 pte_t *ptep, pte_t pte)
1107{
Christian Borntraegerb56433c2013-05-27 16:19:55 +02001108 pgste_t pgste;
1109
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001110 if (mm_has_pgste(mm)) {
Martin Schwidefskyd56c8932013-07-19 11:15:54 +02001111 pgste = pgste_get(ptep);
Dominik Dingel65eef3352014-01-14 15:02:11 +01001112 pgste_set_key(ptep, pgste, pte, mm);
Martin Schwidefsky0a61b222013-10-18 12:03:41 +02001113 pgste = pgste_set_pte(ptep, pgste, pte);
Christian Borntraegerb56433c2013-05-27 16:19:55 +02001114 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001115 } else
1116 *ptep = pte;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001117}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001118
1119#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
Martin Schwidefskyf0e47c22007-07-17 04:03:03 -07001120static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1121 unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001123 pgste_t pgste;
1124 pte_t pte;
1125
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001126 if (mm_has_pgste(vma->vm_mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001127 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001128 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001129 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001130
1131 pte = *ptep;
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001132 ptep_flush_direct(vma->vm_mm, address, ptep);
Martin Schwidefskye5098612013-07-23 20:57:57 +02001133 pte_val(*ptep) = _PAGE_INVALID;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001134
1135 if (mm_has_pgste(vma->vm_mm)) {
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001136 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1137 _PGSTE_GPS_USAGE_UNUSED)
1138 pte_val(pte) |= _PAGE_UNUSED;
Dominik Dingel65eef3352014-01-14 15:02:11 +01001139 pgste = pgste_update_all(&pte, pgste, vma->vm_mm);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001140 pgste_set_unlock(ptep, pgste);
1141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 return pte;
1143}
1144
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001145/*
1146 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1147 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1148 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1149 * cannot be accessed while the batched unmap is running. In this case
1150 * full==1 and a simple pte_clear is enough. See tlb.h.
1151 */
1152#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1153static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001154 unsigned long address,
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001155 pte_t *ptep, int full)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156{
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001157 pgste_t pgste;
1158 pte_t pte;
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001159
Martin Schwidefskya055f662013-07-19 10:31:55 +02001160 if (!full && mm_has_pgste(mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001161 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001162 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001163 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001164
1165 pte = *ptep;
1166 if (!full)
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001167 ptep_flush_lazy(mm, address, ptep);
Martin Schwidefskye5098612013-07-23 20:57:57 +02001168 pte_val(*ptep) = _PAGE_INVALID;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001169
Martin Schwidefskya055f662013-07-19 10:31:55 +02001170 if (!full && mm_has_pgste(mm)) {
Dominik Dingel65eef3352014-01-14 15:02:11 +01001171 pgste = pgste_update_all(&pte, pgste, mm);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001172 pgste_set_unlock(ptep, pgste);
1173 }
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001174 return pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175}
1176
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001177#define __HAVE_ARCH_PTEP_SET_WRPROTECT
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001178static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1179 unsigned long address, pte_t *ptep)
1180{
1181 pgste_t pgste;
1182 pte_t pte = *ptep;
1183
1184 if (pte_write(pte)) {
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001185 if (mm_has_pgste(mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001186 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001187 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001188 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001189
Martin Schwidefsky5c474a12013-08-16 13:31:40 +02001190 ptep_flush_lazy(mm, address, ptep);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001191 pte = pte_wrprotect(pte);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001192
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001193 if (mm_has_pgste(mm)) {
Martin Schwidefsky0a61b222013-10-18 12:03:41 +02001194 pgste = pgste_set_pte(ptep, pgste, pte);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001195 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001196 } else
1197 *ptep = pte;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001198 }
1199 return pte;
1200}
Martin Schwidefskyba8a9222007-10-22 12:52:44 +02001201
1202#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001203static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1204 unsigned long address, pte_t *ptep,
1205 pte_t entry, int dirty)
1206{
1207 pgste_t pgste;
1208
1209 if (pte_same(*ptep, entry))
1210 return 0;
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001211 if (mm_has_pgste(vma->vm_mm)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001212 pgste = pgste_get_lock(ptep);
Martin Schwidefsky55dbbdd2014-04-30 14:44:44 +02001213 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
Martin Schwidefskyd3383632013-04-17 10:53:39 +02001214 }
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001215
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001216 ptep_flush_direct(vma->vm_mm, address, ptep);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001217
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001218 if (mm_has_pgste(vma->vm_mm)) {
Christian Borntraeger19514972014-08-28 23:44:57 +02001219 pgste_set_key(ptep, pgste, entry, vma->vm_mm);
Martin Schwidefsky0a61b222013-10-18 12:03:41 +02001220 pgste = pgste_set_pte(ptep, pgste, entry);
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001221 pgste_set_unlock(ptep, pgste);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001222 } else
1223 *ptep = entry;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +02001224 return 1;
1225}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
1227/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 * Conversion functions: convert a page and protection to a page entry,
1229 * and a page entry and page directory to the page they refer to.
1230 */
1231static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1232{
1233 pte_t __pte;
1234 pte_val(__pte) = physpage + pgprot_val(pgprot);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001235 return pte_mkyoung(__pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236}
1237
Heiko Carstens2dcea572006-09-29 01:58:41 -07001238static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1239{
Heiko Carstens0b2b6e12006-10-04 20:02:23 +02001240 unsigned long physpage = page_to_phys(page);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001241 pte_t __pte = mk_pte_phys(physpage, pgprot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
Martin Schwidefskye5098612013-07-23 20:57:57 +02001243 if (pte_write(__pte) && PageDirty(page))
1244 __pte = pte_mkdirty(__pte);
Martin Schwidefskyabf09be2012-11-07 13:17:37 +01001245 return __pte;
Heiko Carstens2dcea572006-09-29 01:58:41 -07001246}
1247
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001249#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1250#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1251#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001253#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1255
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001256#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1257#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001258#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001259
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001260static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1261{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001262 pud_t *pud = (pud_t *) pgd;
1263 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1264 pud = (pud_t *) pgd_deref(*pgd);
Martin Schwidefsky5a216a22008-02-09 18:24:36 +01001265 return pud + pud_index(address);
1266}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001267
1268static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1269{
Martin Schwidefsky6252d702008-02-09 18:24:37 +01001270 pmd_t *pmd = (pmd_t *) pud;
1271 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1272 pmd = (pmd_t *) pud_deref(*pud);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001273 return pmd + pmd_index(address);
1274}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001276#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1277#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1278#define pte_page(x) pfn_to_page(pte_pfn(x))
1279
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001280#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
Martin Schwidefsky190a1d72007-10-22 12:52:48 +02001281
1282/* Find an entry in the lowest level page table.. */
1283#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1284#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286#define pte_unmap(pte) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Gerald Schaefer106c9922013-04-29 15:07:23 -07001288#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001289static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1290{
Gerald Schaeferd8e7a332012-10-25 17:42:50 +02001291 /*
Martin Schwidefskye5098612013-07-23 20:57:57 +02001292 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
Gerald Schaeferd8e7a332012-10-25 17:42:50 +02001293 * Convert to segment table entry format.
1294 */
1295 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1296 return pgprot_val(SEGMENT_NONE);
Martin Schwidefskye5098612013-07-23 20:57:57 +02001297 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1298 return pgprot_val(SEGMENT_READ);
1299 return pgprot_val(SEGMENT_WRITE);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001300}
1301
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001302static inline pmd_t pmd_wrprotect(pmd_t pmd)
1303{
1304 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1305 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1306 return pmd;
1307}
1308
1309static inline pmd_t pmd_mkwrite(pmd_t pmd)
1310{
1311 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1312 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1313 return pmd;
1314 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1315 return pmd;
1316}
1317
1318static inline pmd_t pmd_mkclean(pmd_t pmd)
1319{
1320 if (pmd_large(pmd)) {
1321 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1322 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1323 }
1324 return pmd;
1325}
1326
1327static inline pmd_t pmd_mkdirty(pmd_t pmd)
1328{
1329 if (pmd_large(pmd)) {
1330 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1331 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1332 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1333 }
1334 return pmd;
1335}
1336
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001337static inline pmd_t pmd_mkyoung(pmd_t pmd)
1338{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001339 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001340 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001341 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1342 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001343 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001344 return pmd;
1345}
1346
1347static inline pmd_t pmd_mkold(pmd_t pmd)
1348{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001349 if (pmd_large(pmd)) {
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001350 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1351 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1352 }
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001353 return pmd;
1354}
1355
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001356static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1357{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001358 if (pmd_large(pmd)) {
1359 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1360 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1361 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1362 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1363 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1364 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1365 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1366 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1367 return pmd;
1368 }
1369 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001370 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1371 return pmd;
1372}
1373
Gerald Schaefer106c9922013-04-29 15:07:23 -07001374static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001375{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001376 pmd_t __pmd;
1377 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001378 return __pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001379}
1380
Gerald Schaefer106c9922013-04-29 15:07:23 -07001381#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1382
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001383static inline void __pmdp_csp(pmd_t *pmdp)
1384{
1385 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1386 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1387 _SEGMENT_ENTRY_INVALID;
1388 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1389
1390 asm volatile(
1391 " csp %1,%3"
1392 : "=m" (*pmdp)
1393 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1394}
1395
1396static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1397{
1398 unsigned long sto;
1399
1400 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1401 asm volatile(
1402 " .insn rrf,0xb98e0000,%2,%3,0,0"
1403 : "=m" (*pmdp)
1404 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1405 : "cc" );
1406}
1407
1408static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1409{
1410 unsigned long sto;
1411
1412 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1413 asm volatile(
1414 " .insn rrf,0xb98e0000,%2,%3,0,1"
1415 : "=m" (*pmdp)
1416 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1417 : "cc" );
1418}
1419
1420static inline void pmdp_flush_direct(struct mm_struct *mm,
1421 unsigned long address, pmd_t *pmdp)
1422{
1423 int active, count;
1424
1425 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1426 return;
1427 if (!MACHINE_HAS_IDTE) {
1428 __pmdp_csp(pmdp);
1429 return;
1430 }
1431 active = (mm == current->active_mm) ? 1 : 0;
1432 count = atomic_add_return(0x10000, &mm->context.attach_count);
1433 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1434 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1435 __pmdp_idte_local(address, pmdp);
1436 else
1437 __pmdp_idte(address, pmdp);
1438 atomic_sub(0x10000, &mm->context.attach_count);
1439}
1440
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001441static inline void pmdp_flush_lazy(struct mm_struct *mm,
1442 unsigned long address, pmd_t *pmdp)
1443{
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001444 int active, count;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001445
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001446 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1447 return;
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001448 active = (mm == current->active_mm) ? 1 : 0;
1449 count = atomic_add_return(0x10000, &mm->context.attach_count);
1450 if ((count & 0xffff) <= active) {
1451 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001452 mm->context.flush_mm = 1;
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001453 } else if (MACHINE_HAS_IDTE)
1454 __pmdp_idte(address, pmdp);
1455 else
1456 __pmdp_csp(pmdp);
Martin Schwidefsky53e857f2012-09-10 13:00:09 +02001457 atomic_sub(0x10000, &mm->context.attach_count);
Martin Schwidefsky3eabaee2013-07-26 15:04:02 +02001458}
1459
Gerald Schaefer106c9922013-04-29 15:07:23 -07001460#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1461
1462#define __HAVE_ARCH_PGTABLE_DEPOSIT
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001463extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1464 pgtable_t pgtable);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001465
1466#define __HAVE_ARCH_PGTABLE_WITHDRAW
Aneesh Kumar K.V6b0b50b2013-06-05 17:14:02 -07001467extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001468
1469static inline int pmd_trans_splitting(pmd_t pmd)
1470{
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001471 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1472 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
Gerald Schaefer106c9922013-04-29 15:07:23 -07001473}
1474
1475static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1476 pmd_t *pmdp, pmd_t entry)
1477{
Gerald Schaefer106c9922013-04-29 15:07:23 -07001478 *pmdp = entry;
1479}
1480
1481static inline pmd_t pmd_mkhuge(pmd_t pmd)
1482{
1483 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
Martin Schwidefsky152125b2014-07-24 11:03:41 +02001484 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1485 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001486 return pmd;
1487}
1488
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001489#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1490static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1491 unsigned long address, pmd_t *pmdp)
1492{
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001493 pmd_t pmd;
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001494
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001495 pmd = *pmdp;
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001496 pmdp_flush_direct(vma->vm_mm, address, pmdp);
Martin Schwidefsky0944fe32013-07-23 22:11:42 +02001497 *pmdp = pmd_mkold(pmd);
1498 return pmd_young(pmd);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001499}
1500
1501#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1502static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1503 unsigned long address, pmd_t *pmdp)
1504{
1505 pmd_t pmd = *pmdp;
1506
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001507 pmdp_flush_direct(mm, address, pmdp);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001508 pmd_clear(pmdp);
1509 return pmd;
1510}
1511
Martin Schwidefskyfcbe08d62014-10-24 10:52:29 +02001512#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1513static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1514 unsigned long address,
1515 pmd_t *pmdp, int full)
1516{
1517 pmd_t pmd = *pmdp;
1518
1519 if (!full)
1520 pmdp_flush_lazy(mm, address, pmdp);
1521 pmd_clear(pmdp);
1522 return pmd;
1523}
1524
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001525#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1526static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1527 unsigned long address, pmd_t *pmdp)
1528{
1529 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1530}
1531
1532#define __HAVE_ARCH_PMDP_INVALIDATE
1533static inline void pmdp_invalidate(struct vm_area_struct *vma,
1534 unsigned long address, pmd_t *pmdp)
1535{
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001536 pmdp_flush_direct(vma->vm_mm, address, pmdp);
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001537}
1538
Gerald Schaeferbe328652013-01-21 16:48:07 +01001539#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1540static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1541 unsigned long address, pmd_t *pmdp)
1542{
1543 pmd_t pmd = *pmdp;
1544
1545 if (pmd_write(pmd)) {
Martin Schwidefsky1b948d62014-04-03 13:55:01 +02001546 pmdp_flush_direct(mm, address, pmdp);
Gerald Schaeferbe328652013-01-21 16:48:07 +01001547 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1548 }
1549}
1550
Gerald Schaefer1ae1c1d2012-10-08 16:30:24 -07001551#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1552#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1553
1554static inline int pmd_trans_huge(pmd_t pmd)
1555{
1556 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1557}
1558
1559static inline int has_transparent_hugepage(void)
1560{
1561 return MACHINE_HAS_HPAGE ? 1 : 0;
1562}
Gerald Schaefer75077af2012-10-08 16:30:15 -07001563#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * 64 bit swap entry format:
1567 * A page-table entry has some bits we have to treat in a special way.
1568 * Bits 52 and bit 55 have to be zero, otherwise an specification
1569 * exception will occur instead of a page translation exception. The
1570 * specifiation exception has the bad habit not to store necessary
1571 * information in the lowcore.
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001572 * Bits 54 and 63 are used to indicate the page type.
1573 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1574 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1575 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1576 * for the offset.
1577 * | offset |01100|type |00|
1578 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1579 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 */
Heiko Carstens5a798592015-02-12 13:08:27 +01001581
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001582#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1583#define __SWP_OFFSET_SHIFT 12
1584#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1585#define __SWP_TYPE_SHIFT 2
Heiko Carstens5a798592015-02-12 13:08:27 +01001586
Adrian Bunk4448aaf2005-11-08 21:34:42 -08001587static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
1589 pte_t pte;
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001590
1591 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1592 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1593 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 return pte;
1595}
1596
Martin Schwidefskya1c843b2015-04-22 13:55:59 +02001597static inline unsigned long __swp_type(swp_entry_t entry)
1598{
1599 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1600}
1601
1602static inline unsigned long __swp_offset(swp_entry_t entry)
1603{
1604 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1605}
1606
1607static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1608{
1609 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1610}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1613#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1614
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615#endif /* !__ASSEMBLY__ */
1616
1617#define kern_addr_valid(addr) (1)
1618
Heiko Carstens17f34582008-04-30 13:38:47 +02001619extern int vmem_add_mapping(unsigned long start, unsigned long size);
1620extern int vmem_remove_mapping(unsigned long start, unsigned long size);
Carsten Otte402b0862008-03-25 18:47:10 +01001621extern int s390_enable_sie(void);
Dominik Dingel3ac8e382014-10-23 12:09:17 +02001622extern int s390_enable_skey(void);
Dominik Dingela13cff32014-10-23 12:07:14 +02001623extern void s390_reset_cmma(struct mm_struct *mm);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001624
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +01001625/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1626#define HAVE_ARCH_UNMAPPED_AREA
1627#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1628
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629/*
1630 * No page table caches to initialise
1631 */
Heiko Carstens765a0ca2013-03-23 10:29:01 +01001632static inline void pgtable_cache_init(void) { }
1633static inline void check_pgt_cache(void) { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635#include <asm-generic/pgtable.h>
1636
1637#endif /* _S390_PAGE_H */