blob: 210ffede0153130d40ce222a15c559057b114e1e [file] [log] [blame]
Gerald Schaefer53492b12008-04-30 13:38:46 +02001/*
2 * IBM System z Huge TLB Page Support for Kernel.
3 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2007
Gerald Schaefer53492b12008-04-30 13:38:46 +02005 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10
Martin Schwidefskye5098612013-07-23 20:57:57 +020011static inline pmd_t __pte_to_pmd(pte_t pte)
12{
Martin Schwidefskye5098612013-07-23 20:57:57 +020013 pmd_t pmd;
14
15 /*
Martin Schwidefsky152125b2014-07-24 11:03:41 +020016 * Convert encoding pte bits pmd bits
17 * .IR...wrdytp dy..R...I...wr
18 * empty .10...000000 -> 00..0...1...00
19 * prot-none, clean, old .11...000001 -> 00..1...1...00
20 * prot-none, clean, young .11...000101 -> 01..1...1...00
21 * prot-none, dirty, old .10...001001 -> 10..1...1...00
22 * prot-none, dirty, young .10...001101 -> 11..1...1...00
23 * read-only, clean, old .11...010001 -> 00..1...1...01
24 * read-only, clean, young .01...010101 -> 01..1...0...01
25 * read-only, dirty, old .11...011001 -> 10..1...1...01
26 * read-only, dirty, young .01...011101 -> 11..1...0...01
27 * read-write, clean, old .11...110001 -> 00..0...1...11
28 * read-write, clean, young .01...110101 -> 01..0...0...11
29 * read-write, dirty, old .10...111001 -> 10..0...1...11
30 * read-write, dirty, young .00...111101 -> 11..0...0...11
Martin Schwidefskye5098612013-07-23 20:57:57 +020031 */
32 if (pte_present(pte)) {
33 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
Martin Schwidefsky152125b2014-07-24 11:03:41 +020034 pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
35 pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
36 pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
37 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
38 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
39 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
Martin Schwidefskye5098612013-07-23 20:57:57 +020040 } else
41 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
42 return pmd;
43}
44
45static inline pte_t __pmd_to_pte(pmd_t pmd)
46{
47 pte_t pte;
48
49 /*
Martin Schwidefsky152125b2014-07-24 11:03:41 +020050 * Convert encoding pmd bits pte bits
51 * dy..R...I...wr .IR...wrdytp
52 * empty 00..0...1...00 -> .10...001100
53 * prot-none, clean, old 00..0...1...00 -> .10...000001
54 * prot-none, clean, young 01..0...1...00 -> .10...000101
55 * prot-none, dirty, old 10..0...1...00 -> .10...001001
56 * prot-none, dirty, young 11..0...1...00 -> .10...001101
57 * read-only, clean, old 00..1...1...01 -> .11...010001
58 * read-only, clean, young 01..1...1...01 -> .11...010101
59 * read-only, dirty, old 10..1...1...01 -> .11...011001
60 * read-only, dirty, young 11..1...1...01 -> .11...011101
61 * read-write, clean, old 00..0...1...11 -> .10...110001
62 * read-write, clean, young 01..0...1...11 -> .10...110101
63 * read-write, dirty, old 10..0...1...11 -> .10...111001
64 * read-write, dirty, young 11..0...1...11 -> .10...111101
Martin Schwidefskye5098612013-07-23 20:57:57 +020065 */
66 if (pmd_present(pmd)) {
Martin Schwidefsky152125b2014-07-24 11:03:41 +020067 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
68 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
69 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
70 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
71 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
72 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
73 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
74 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
Martin Schwidefskye5098612013-07-23 20:57:57 +020075 } else
76 pte_val(pte) = _PAGE_INVALID;
77 return pte;
78}
Gerald Schaefer53492b12008-04-30 13:38:46 +020079
80void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
Martin Schwidefskye5098612013-07-23 20:57:57 +020081 pte_t *ptep, pte_t pte)
Gerald Schaefer53492b12008-04-30 13:38:46 +020082{
Martin Schwidefskye5098612013-07-23 20:57:57 +020083 pmd_t pmd;
Gerald Schaefer53492b12008-04-30 13:38:46 +020084
Martin Schwidefskye5098612013-07-23 20:57:57 +020085 pmd = __pte_to_pmd(pte);
Gerald Schaefer53492b12008-04-30 13:38:46 +020086 if (!MACHINE_HAS_HPAGE) {
Martin Schwidefsky152125b2014-07-24 11:03:41 +020087 /* Emulated huge ptes loose the dirty and young bit */
Martin Schwidefskye5098612013-07-23 20:57:57 +020088 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
89 pmd_val(pmd) |= pte_page(pte)[1].index;
90 } else
Heiko Carstens6a5c1482014-09-22 08:50:51 +020091 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
Martin Schwidefskye5098612013-07-23 20:57:57 +020092 *(pmd_t *) ptep = pmd;
93}
Gerald Schaefer53492b12008-04-30 13:38:46 +020094
Martin Schwidefskye5098612013-07-23 20:57:57 +020095pte_t huge_ptep_get(pte_t *ptep)
96{
97 unsigned long origin;
98 pmd_t pmd;
99
100 pmd = *(pmd_t *) ptep;
101 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
102 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
103 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
104 pmd_val(pmd) |= *(unsigned long *) origin;
Martin Schwidefsky152125b2014-07-24 11:03:41 +0200105 /* Emulated huge ptes are young and dirty by definition */
106 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200107 }
108 return __pmd_to_pte(pmd);
109}
110
111pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
112 unsigned long addr, pte_t *ptep)
113{
114 pmd_t *pmdp = (pmd_t *) ptep;
115 pte_t pte = huge_ptep_get(ptep);
116
Martin Schwidefsky1b948d62014-04-03 13:55:01 +0200117 pmdp_flush_direct(mm, addr, pmdp);
Martin Schwidefskye5098612013-07-23 20:57:57 +0200118 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
119 return pte;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200120}
121
122int arch_prepare_hugepage(struct page *page)
123{
124 unsigned long addr = page_to_phys(page);
125 pte_t pte;
126 pte_t *ptep;
127 int i;
128
129 if (MACHINE_HAS_HPAGE)
130 return 0;
131
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200132 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200133 if (!ptep)
134 return -ENOMEM;
135
Gerald Schaefer106c9922013-04-29 15:07:23 -0700136 pte_val(pte) = addr;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200137 for (i = 0; i < PTRS_PER_PTE; i++) {
138 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
139 pte_val(pte) += PAGE_SIZE;
140 }
141 page[1].index = (unsigned long) ptep;
142 return 0;
143}
144
145void arch_release_hugepage(struct page *page)
146{
147 pte_t *ptep;
148
149 if (MACHINE_HAS_HPAGE)
150 return;
151
152 ptep = (pte_t *) page[1].index;
153 if (!ptep)
154 return;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200155 clear_table((unsigned long *) ptep, _PAGE_INVALID,
Gerald Schaefera6864252012-05-09 16:27:37 +0200156 PTRS_PER_PTE * sizeof(pte_t));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200157 page_table_free(&init_mm, (unsigned long *) ptep);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200158 page[1].index = 0;
159}
160
Andi Kleena5516432008-07-23 21:27:41 -0700161pte_t *huge_pte_alloc(struct mm_struct *mm,
162 unsigned long addr, unsigned long sz)
Gerald Schaefer53492b12008-04-30 13:38:46 +0200163{
164 pgd_t *pgdp;
165 pud_t *pudp;
166 pmd_t *pmdp = NULL;
167
168 pgdp = pgd_offset(mm, addr);
169 pudp = pud_alloc(mm, pgdp, addr);
170 if (pudp)
171 pmdp = pmd_alloc(mm, pudp, addr);
172 return (pte_t *) pmdp;
173}
174
175pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
176{
177 pgd_t *pgdp;
178 pud_t *pudp;
179 pmd_t *pmdp = NULL;
180
181 pgdp = pgd_offset(mm, addr);
182 if (pgd_present(*pgdp)) {
183 pudp = pud_offset(pgdp, addr);
184 if (pud_present(*pudp))
185 pmdp = pmd_offset(pudp, addr);
186 }
187 return (pte_t *) pmdp;
188}
189
190int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
191{
192 return 0;
193}
194
Gerald Schaefer53492b12008-04-30 13:38:46 +0200195int pmd_huge(pmd_t pmd)
196{
197 if (!MACHINE_HAS_HPAGE)
198 return 0;
199
200 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
201}
202
Andi Kleenceb86872008-07-23 21:27:50 -0700203int pud_huge(pud_t pud)
204{
205 return 0;
206}