blob: b0bd0ae17796fe6d0620f24c74d1c4c66d5a27b3 [file] [log] [blame]
Gerald Schaefer53492b12008-04-30 13:38:46 +02001/*
2 * IBM System z Huge TLB Page Support for Kernel.
3 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2007
Gerald Schaefer53492b12008-04-30 13:38:46 +02005 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10
Martin Schwidefskye5098612013-07-23 20:57:57 +020011static inline pmd_t __pte_to_pmd(pte_t pte)
12{
13 int none, prot;
14 pmd_t pmd;
15
16 /*
17 * Convert encoding pte bits pmd bits
18 * .IR.....wdtp ..R...I.....
19 * empty .10.....0000 -> ..0...1.....
20 * prot-none, clean .11.....0001 -> ..1...1.....
21 * prot-none, dirty .10.....0101 -> ..1...1.....
22 * read-only, clean .01.....0001 -> ..1...0.....
23 * read-only, dirty .01.....0101 -> ..1...0.....
24 * read-write, clean .01.....1001 -> ..0...0.....
25 * read-write, dirty .00.....1101 -> ..0...0.....
26 * Huge ptes are dirty by definition, a clean pte is made dirty
27 * by the conversion.
28 */
29 if (pte_present(pte)) {
30 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
31 if (pte_val(pte) & _PAGE_INVALID)
32 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
33 none = (pte_val(pte) & _PAGE_PRESENT) &&
34 (pte_val(pte) & _PAGE_INVALID);
35 prot = (pte_val(pte) & _PAGE_PROTECT);
36 if (prot || none)
37 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
38 } else
39 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
40 return pmd;
41}
42
43static inline pte_t __pmd_to_pte(pmd_t pmd)
44{
45 pte_t pte;
46
47 /*
48 * Convert encoding pmd bits pte bits
49 * ..R...I..... .IR.....wdtp
50 * empty ..0...1..... -> .10.....0000
51 * prot-none, young ..1...1..... -> .10.....0101
52 * read-only, young ..1...0..... -> .01.....0101
53 * read-write, young ..0...0..... -> .00.....1101
54 * Huge ptes are dirty by definition
55 */
56 if (pmd_present(pmd)) {
57 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
58 (pmd_val(pmd) & PAGE_MASK);
59 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
60 pte_val(pte) |= _PAGE_INVALID;
61 else {
62 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
63 pte_val(pte) |= _PAGE_PROTECT;
64 else
65 pte_val(pte) |= _PAGE_WRITE;
66 }
67 } else
68 pte_val(pte) = _PAGE_INVALID;
69 return pte;
70}
Gerald Schaefer53492b12008-04-30 13:38:46 +020071
72void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
Martin Schwidefskye5098612013-07-23 20:57:57 +020073 pte_t *ptep, pte_t pte)
Gerald Schaefer53492b12008-04-30 13:38:46 +020074{
Martin Schwidefskye5098612013-07-23 20:57:57 +020075 pmd_t pmd;
Gerald Schaefer53492b12008-04-30 13:38:46 +020076
Martin Schwidefskye5098612013-07-23 20:57:57 +020077 pmd = __pte_to_pmd(pte);
Gerald Schaefer53492b12008-04-30 13:38:46 +020078 if (!MACHINE_HAS_HPAGE) {
Martin Schwidefskye5098612013-07-23 20:57:57 +020079 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
80 pmd_val(pmd) |= pte_page(pte)[1].index;
81 } else
82 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
83 *(pmd_t *) ptep = pmd;
84}
Gerald Schaefer53492b12008-04-30 13:38:46 +020085
Martin Schwidefskye5098612013-07-23 20:57:57 +020086pte_t huge_ptep_get(pte_t *ptep)
87{
88 unsigned long origin;
89 pmd_t pmd;
90
91 pmd = *(pmd_t *) ptep;
92 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
93 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
94 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
95 pmd_val(pmd) |= *(unsigned long *) origin;
96 }
97 return __pmd_to_pte(pmd);
98}
99
100pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
101 unsigned long addr, pte_t *ptep)
102{
103 pmd_t *pmdp = (pmd_t *) ptep;
104 pte_t pte = huge_ptep_get(ptep);
105
106 if (MACHINE_HAS_IDTE)
107 __pmd_idte(addr, pmdp);
108 else
109 __pmd_csp(pmdp);
110 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
111 return pte;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200112}
113
114int arch_prepare_hugepage(struct page *page)
115{
116 unsigned long addr = page_to_phys(page);
117 pte_t pte;
118 pte_t *ptep;
119 int i;
120
121 if (MACHINE_HAS_HPAGE)
122 return 0;
123
Martin Schwidefskye5992f22011-07-24 10:48:20 +0200124 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200125 if (!ptep)
126 return -ENOMEM;
127
Gerald Schaefer106c9922013-04-29 15:07:23 -0700128 pte_val(pte) = addr;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200129 for (i = 0; i < PTRS_PER_PTE; i++) {
130 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
131 pte_val(pte) += PAGE_SIZE;
132 }
133 page[1].index = (unsigned long) ptep;
134 return 0;
135}
136
137void arch_release_hugepage(struct page *page)
138{
139 pte_t *ptep;
140
141 if (MACHINE_HAS_HPAGE)
142 return;
143
144 ptep = (pte_t *) page[1].index;
145 if (!ptep)
146 return;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200147 clear_table((unsigned long *) ptep, _PAGE_INVALID,
Gerald Schaefera6864252012-05-09 16:27:37 +0200148 PTRS_PER_PTE * sizeof(pte_t));
Martin Schwidefsky80217142010-10-25 16:10:11 +0200149 page_table_free(&init_mm, (unsigned long *) ptep);
Gerald Schaefer53492b12008-04-30 13:38:46 +0200150 page[1].index = 0;
151}
152
Andi Kleena5516432008-07-23 21:27:41 -0700153pte_t *huge_pte_alloc(struct mm_struct *mm,
154 unsigned long addr, unsigned long sz)
Gerald Schaefer53492b12008-04-30 13:38:46 +0200155{
156 pgd_t *pgdp;
157 pud_t *pudp;
158 pmd_t *pmdp = NULL;
159
160 pgdp = pgd_offset(mm, addr);
161 pudp = pud_alloc(mm, pgdp, addr);
162 if (pudp)
163 pmdp = pmd_alloc(mm, pudp, addr);
164 return (pte_t *) pmdp;
165}
166
167pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
168{
169 pgd_t *pgdp;
170 pud_t *pudp;
171 pmd_t *pmdp = NULL;
172
173 pgdp = pgd_offset(mm, addr);
174 if (pgd_present(*pgdp)) {
175 pudp = pud_offset(pgdp, addr);
176 if (pud_present(*pudp))
177 pmdp = pmd_offset(pudp, addr);
178 }
179 return (pte_t *) pmdp;
180}
181
182int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
183{
184 return 0;
185}
186
187struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
188 int write)
189{
190 return ERR_PTR(-EINVAL);
191}
192
193int pmd_huge(pmd_t pmd)
194{
195 if (!MACHINE_HAS_HPAGE)
196 return 0;
197
198 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
199}
200
Andi Kleenceb86872008-07-23 21:27:50 -0700201int pud_huge(pud_t pud)
202{
203 return 0;
204}
205
Gerald Schaefer53492b12008-04-30 13:38:46 +0200206struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
207 pmd_t *pmdp, int write)
208{
209 struct page *page;
210
211 if (!MACHINE_HAS_HPAGE)
212 return NULL;
213
214 page = pmd_page(*pmdp);
215 if (page)
216 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
217 return page;
218}