blob: aa50ac090e9b9d50648843cd2f85bae1ec7d045d [file] [log] [blame]
Helge Deller736d2162015-11-22 00:07:06 +01001/*
2 * PARISC64 Huge TLB page support.
3 *
4 * This parisc implementation is heavily based on the SPARC and x86 code.
5 *
6 * Copyright (C) 2015 Helge Deller <deller@gmx.de>
7 */
8
9#include <linux/fs.h>
10#include <linux/mm.h>
Ingo Molnar01042602017-02-08 18:51:31 +010011#include <linux/sched/mm.h>
Helge Deller736d2162015-11-22 00:07:06 +010012#include <linux/hugetlb.h>
13#include <linux/pagemap.h>
14#include <linux/sysctl.h>
15
16#include <asm/mman.h>
17#include <asm/pgalloc.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23
24unsigned long
25hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
27{
28 struct hstate *h = hstate_file(file);
29
30 if (len & ~huge_page_mask(h))
31 return -EINVAL;
32 if (len > TASK_SIZE)
33 return -ENOMEM;
34
35 if (flags & MAP_FIXED)
36 if (prepare_hugepage_range(file, addr, len))
37 return -EINVAL;
38
39 if (addr)
40 addr = ALIGN(addr, huge_page_size(h));
41
42 /* we need to make sure the colouring is OK */
43 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
44}
45
46
47pte_t *huge_pte_alloc(struct mm_struct *mm,
48 unsigned long addr, unsigned long sz)
49{
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
53 pte_t *pte = NULL;
54
55 /* We must align the address, because our caller will run
56 * set_huge_pte_at() on whatever we return, which writes out
57 * all of the sub-ptes for the hugepage range. So we have
58 * to give it the first such sub-pte.
59 */
60 addr &= HPAGE_MASK;
61
62 pgd = pgd_offset(mm, addr);
63 pud = pud_alloc(mm, pgd, addr);
64 if (pud) {
65 pmd = pmd_alloc(mm, pud, addr);
66 if (pmd)
Kirill A. Shutemov3ed3a4f2016-03-17 14:19:11 -070067 pte = pte_alloc_map(mm, pmd, addr);
Helge Deller736d2162015-11-22 00:07:06 +010068 }
69 return pte;
70}
71
72pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
73{
74 pgd_t *pgd;
75 pud_t *pud;
76 pmd_t *pmd;
77 pte_t *pte = NULL;
78
79 addr &= HPAGE_MASK;
80
81 pgd = pgd_offset(mm, addr);
82 if (!pgd_none(*pgd)) {
83 pud = pud_offset(pgd, addr);
84 if (!pud_none(*pud)) {
85 pmd = pmd_offset(pud, addr);
86 if (!pmd_none(*pmd))
87 pte = pte_offset_map(pmd, addr);
88 }
89 }
90 return pte;
91}
92
93/* Purge data and instruction TLB entries. Must be called holding
94 * the pa_tlb_lock. The TLB purge instructions are slow on SMP
95 * machines since the purge must be broadcast to all CPUs.
96 */
97static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
98{
99 int i;
100
101 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
102 * Linux standard huge pages (e.g. 2 MB) */
103 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
104
105 addr &= HPAGE_MASK;
106 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
107
108 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
Helge Dellerb0e55132015-11-26 21:14:02 +0100109 purge_tlb_entries(mm, addr);
Helge Deller736d2162015-11-22 00:07:06 +0100110 addr += (1UL << REAL_HPAGE_SHIFT);
111 }
112}
113
Helge Dellerb0e55132015-11-26 21:14:02 +0100114/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
115static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
Helge Deller736d2162015-11-22 00:07:06 +0100116 pte_t *ptep, pte_t entry)
117{
118 unsigned long addr_start;
119 int i;
120
121 addr &= HPAGE_MASK;
122 addr_start = addr;
123
124 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
Helge Dellerb0e55132015-11-26 21:14:02 +0100125 set_pte(ptep, entry);
Helge Deller736d2162015-11-22 00:07:06 +0100126 ptep++;
127
Helge Deller736d2162015-11-22 00:07:06 +0100128 addr += PAGE_SIZE;
129 pte_val(entry) += PAGE_SIZE;
130 }
131
132 purge_tlb_entries_huge(mm, addr_start);
133}
134
Helge Dellerb0e55132015-11-26 21:14:02 +0100135void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
136 pte_t *ptep, pte_t entry)
137{
138 unsigned long flags;
139
140 purge_tlb_start(flags);
141 __set_huge_pte_at(mm, addr, ptep, entry);
142 purge_tlb_end(flags);
143}
144
Helge Deller736d2162015-11-22 00:07:06 +0100145
146pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
147 pte_t *ptep)
148{
Helge Dellerb0e55132015-11-26 21:14:02 +0100149 unsigned long flags;
Helge Deller736d2162015-11-22 00:07:06 +0100150 pte_t entry;
151
Helge Dellerb0e55132015-11-26 21:14:02 +0100152 purge_tlb_start(flags);
Helge Deller736d2162015-11-22 00:07:06 +0100153 entry = *ptep;
Helge Dellerb0e55132015-11-26 21:14:02 +0100154 __set_huge_pte_at(mm, addr, ptep, __pte(0));
155 purge_tlb_end(flags);
Helge Deller736d2162015-11-22 00:07:06 +0100156
157 return entry;
158}
159
Helge Dellerb0e55132015-11-26 21:14:02 +0100160
161void huge_ptep_set_wrprotect(struct mm_struct *mm,
162 unsigned long addr, pte_t *ptep)
163{
164 unsigned long flags;
165 pte_t old_pte;
166
167 purge_tlb_start(flags);
168 old_pte = *ptep;
169 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
170 purge_tlb_end(flags);
171}
172
173int huge_ptep_set_access_flags(struct vm_area_struct *vma,
174 unsigned long addr, pte_t *ptep,
175 pte_t pte, int dirty)
176{
177 unsigned long flags;
178 int changed;
179
180 purge_tlb_start(flags);
181 changed = !pte_same(*ptep, pte);
182 if (changed) {
183 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
184 }
185 purge_tlb_end(flags);
186 return changed;
187}
188
189
Helge Deller736d2162015-11-22 00:07:06 +0100190int pmd_huge(pmd_t pmd)
191{
192 return 0;
193}
194
195int pud_huge(pud_t pud)
196{
197 return 0;
198}