blob: 4cafc0c33d0ac8cb6353c2bd6272e881488a674b [file] [log] [blame]
Paul Mackerrasfa282372008-01-24 08:35:13 +11001/*
2 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/gfp.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/hugetlb.h>
17
18#include <asm/pgtable.h>
19#include <asm/uaccess.h>
20#include <asm/tlbflush.h>
21
22/*
23 * Free all pages allocated for subpage protection maps and pointers.
24 * Also makes sure that the subpage_prot_table structure is
25 * reinitialized for the next user.
26 */
27void subpage_prot_free(pgd_t *pgd)
28{
29 struct subpage_prot_table *spt = pgd_subpage_prot(pgd);
30 unsigned long i, j, addr;
31 u32 **p;
32
33 for (i = 0; i < 4; ++i) {
34 if (spt->low_prot[i]) {
35 free_page((unsigned long)spt->low_prot[i]);
36 spt->low_prot[i] = NULL;
37 }
38 }
39 addr = 0;
40 for (i = 0; i < 2; ++i) {
41 p = spt->protptrs[i];
42 if (!p)
43 continue;
44 spt->protptrs[i] = NULL;
45 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
46 ++j, addr += PAGE_SIZE)
47 if (p[j])
48 free_page((unsigned long)p[j]);
49 free_page((unsigned long)p);
50 }
51 spt->maxaddr = 0;
52}
53
54static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
55 int npages)
56{
57 pgd_t *pgd;
58 pud_t *pud;
59 pmd_t *pmd;
60 pte_t *pte;
61 spinlock_t *ptl;
62
63 pgd = pgd_offset(mm, addr);
64 if (pgd_none(*pgd))
65 return;
66 pud = pud_offset(pgd, addr);
67 if (pud_none(*pud))
68 return;
69 pmd = pmd_offset(pud, addr);
70 if (pmd_none(*pmd))
71 return;
72 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
73 arch_enter_lazy_mmu_mode();
74 for (; npages > 0; --npages) {
75 pte_update(mm, addr, pte, 0, 0);
76 addr += PAGE_SIZE;
77 ++pte;
78 }
79 arch_leave_lazy_mmu_mode();
80 pte_unmap_unlock(pte - 1, ptl);
81}
82
83/*
84 * Clear the subpage protection map for an address range, allowing
85 * all accesses that are allowed by the pte permissions.
86 */
87static void subpage_prot_clear(unsigned long addr, unsigned long len)
88{
89 struct mm_struct *mm = current->mm;
90 struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
91 u32 **spm, *spp;
92 int i, nw;
93 unsigned long next, limit;
94
95 down_write(&mm->mmap_sem);
96 limit = addr + len;
97 if (limit > spt->maxaddr)
98 limit = spt->maxaddr;
99 for (; addr < limit; addr = next) {
100 next = pmd_addr_end(addr, limit);
101 if (addr < 0x100000000) {
102 spm = spt->low_prot;
103 } else {
104 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
105 if (!spm)
106 continue;
107 }
108 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
109 if (!spp)
110 continue;
111 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
112
113 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
114 nw = PTRS_PER_PTE - i;
115 if (addr + (nw << PAGE_SHIFT) > next)
116 nw = (next - addr) >> PAGE_SHIFT;
117
118 memset(spp, 0, nw * sizeof(u32));
119
120 /* now flush any existing HPTEs for the range */
121 hpte_flush_range(mm, addr, nw);
122 }
123 up_write(&mm->mmap_sem);
124}
125
126/*
127 * Copy in a subpage protection map for an address range.
128 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
129 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
130 * 2 or 3 to prevent all accesses.
131 * Note that the normal page protections also apply; the subpage
132 * protection mechanism is an additional constraint, so putting 0
133 * in a 2-bit field won't allow writes to a page that is otherwise
134 * write-protected.
135 */
136long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
137{
138 struct mm_struct *mm = current->mm;
139 struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd);
140 u32 **spm, *spp;
141 int i, nw;
142 unsigned long next, limit;
143 int err;
144
145 /* Check parameters */
146 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
147 addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
148 return -EINVAL;
149
150 if (is_hugepage_only_range(mm, addr, len))
151 return -EINVAL;
152
153 if (!map) {
154 /* Clear out the protection map for the address range */
155 subpage_prot_clear(addr, len);
156 return 0;
157 }
158
159 if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
160 return -EFAULT;
161
162 down_write(&mm->mmap_sem);
163 for (limit = addr + len; addr < limit; addr = next) {
164 next = pmd_addr_end(addr, limit);
165 err = -ENOMEM;
166 if (addr < 0x100000000) {
167 spm = spt->low_prot;
168 } else {
169 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
170 if (!spm) {
171 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
172 if (!spm)
173 goto out;
174 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
175 }
176 }
177 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
178 spp = *spm;
179 if (!spp) {
180 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
181 if (!spp)
182 goto out;
183 *spm = spp;
184 }
185 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
186
187 local_irq_disable();
188 demote_segment_4k(mm, addr);
189 local_irq_enable();
190
191 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
192 nw = PTRS_PER_PTE - i;
193 if (addr + (nw << PAGE_SHIFT) > next)
194 nw = (next - addr) >> PAGE_SHIFT;
195
196 up_write(&mm->mmap_sem);
197 err = -EFAULT;
198 if (__copy_from_user(spp, map, nw * sizeof(u32)))
199 goto out2;
200 map += nw;
201 down_write(&mm->mmap_sem);
202
203 /* now flush any existing HPTEs for the range */
204 hpte_flush_range(mm, addr, nw);
205 }
206 if (limit > spt->maxaddr)
207 spt->maxaddr = limit;
208 err = 0;
209 out:
210 up_write(&mm->mmap_sem);
211 out2:
212 return err;
213}