blob: 8def55e7249b12054e6a6218896d90438c4a7316 [file] [log] [blame]
Laura Abbott11d91a72014-08-19 20:41:43 +01001/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/sched.h>
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010017#include <linux/vmalloc.h>
Laura Abbott11d91a72014-08-19 20:41:43 +010018
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
21
22struct page_change_data {
23 pgprot_t set_mask;
24 pgprot_t clear_mask;
25};
26
27static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
28 void *data)
29{
30 struct page_change_data *cdata = data;
31 pte_t pte = *ptep;
32
33 pte = clear_pte_bit(pte, cdata->clear_mask);
34 pte = set_pte_bit(pte, cdata->set_mask);
35
36 set_pte(ptep, pte);
37 return 0;
38}
39
Laura Abbott83863f22016-02-05 16:24:47 -080040/*
41 * This function assumes that the range is mapped with PAGE_SIZE pages.
42 */
43static int __change_memory_common(unsigned long start, unsigned long size,
44 pgprot_t set_mask, pgprot_t clear_mask)
45{
46 struct page_change_data data;
47 int ret;
48
49 data.set_mask = set_mask;
50 data.clear_mask = clear_mask;
51
52 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
53 &data);
54
55 flush_tlb_kernel_range(start, start + size);
56 return ret;
57}
58
Laura Abbott11d91a72014-08-19 20:41:43 +010059static int change_memory_common(unsigned long addr, int numpages,
60 pgprot_t set_mask, pgprot_t clear_mask)
61{
62 unsigned long start = addr;
63 unsigned long size = PAGE_SIZE*numpages;
64 unsigned long end = start + size;
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010065 struct vm_struct *area;
Laura Abbott11d91a72014-08-19 20:41:43 +010066
Alexander Kuleshovf23bef32015-10-26 17:26:57 +060067 if (!PAGE_ALIGNED(addr)) {
Laura Abbottb4da1842014-09-11 23:10:32 +010068 start &= PAGE_MASK;
69 end = start + size;
Laura Abbott11d91a72014-08-19 20:41:43 +010070 WARN_ON_ONCE(1);
71 }
72
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010073 /*
74 * Kernel VA mappings are always live, and splitting live section
75 * mappings into page mappings may cause TLB conflicts. This means
76 * we have to ensure that changing the permission bits of the range
77 * we are operating on does not result in such splitting.
78 *
79 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
80 * Those are guaranteed to consist entirely of page mappings, and
81 * splitting is never needed.
82 *
83 * So check whether the [addr, addr + size) interval is entirely
84 * covered by precisely one VM area that has the VM_ALLOC flag set.
85 */
86 area = find_vm_area((void *)addr);
87 if (!area ||
88 end > (unsigned long)area->addr + area->size ||
89 !(area->flags & VM_ALLOC))
Laura Abbott11d91a72014-08-19 20:41:43 +010090 return -EINVAL;
91
Mika Penttilä57adec82016-01-26 15:47:25 +000092 if (!numpages)
93 return 0;
94
Laura Abbott83863f22016-02-05 16:24:47 -080095 return __change_memory_common(start, size, set_mask, clear_mask);
Laura Abbott11d91a72014-08-19 20:41:43 +010096}
97
98int set_memory_ro(unsigned long addr, int numpages)
99{
100 return change_memory_common(addr, numpages,
101 __pgprot(PTE_RDONLY),
102 __pgprot(PTE_WRITE));
103}
Laura Abbott11d91a72014-08-19 20:41:43 +0100104
105int set_memory_rw(unsigned long addr, int numpages)
106{
107 return change_memory_common(addr, numpages,
108 __pgprot(PTE_WRITE),
109 __pgprot(PTE_RDONLY));
110}
Laura Abbott11d91a72014-08-19 20:41:43 +0100111
112int set_memory_nx(unsigned long addr, int numpages)
113{
114 return change_memory_common(addr, numpages,
115 __pgprot(PTE_PXN),
116 __pgprot(0));
117}
118EXPORT_SYMBOL_GPL(set_memory_nx);
119
120int set_memory_x(unsigned long addr, int numpages)
121{
122 return change_memory_common(addr, numpages,
123 __pgprot(0),
124 __pgprot(PTE_PXN));
125}
126EXPORT_SYMBOL_GPL(set_memory_x);
Laura Abbott83863f22016-02-05 16:24:47 -0800127
128#ifdef CONFIG_DEBUG_PAGEALLOC
129void __kernel_map_pages(struct page *page, int numpages, int enable)
130{
131 unsigned long addr = (unsigned long) page_address(page);
132
133 if (enable)
134 __change_memory_common(addr, PAGE_SIZE * numpages,
135 __pgprot(PTE_VALID),
136 __pgprot(0));
137 else
138 __change_memory_common(addr, PAGE_SIZE * numpages,
139 __pgprot(0),
140 __pgprot(PTE_VALID));
141}
James Morse5ebe3a42016-08-24 18:27:30 +0100142#ifdef CONFIG_HIBERNATION
143/*
144 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
145 * is used to determine if a linear map page has been marked as not-valid by
146 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
147 * This is based on kern_addr_valid(), which almost does what we need.
148 *
149 * Because this is only called on the kernel linear map, p?d_sect() implies
150 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
151 * disabled.
152 */
153bool kernel_page_present(struct page *page)
154{
155 pgd_t *pgd;
156 pud_t *pud;
157 pmd_t *pmd;
158 pte_t *pte;
159 unsigned long addr = (unsigned long)page_address(page);
160
161 pgd = pgd_offset_k(addr);
162 if (pgd_none(*pgd))
163 return false;
164
165 pud = pud_offset(pgd, addr);
166 if (pud_none(*pud))
167 return false;
168 if (pud_sect(*pud))
169 return true;
170
171 pmd = pmd_offset(pud, addr);
172 if (pmd_none(*pmd))
173 return false;
174 if (pmd_sect(*pmd))
175 return true;
176
177 pte = pte_offset_kernel(pmd, addr);
178 return pte_valid(*pte);
179}
180#endif /* CONFIG_HIBERNATION */
181#endif /* CONFIG_DEBUG_PAGEALLOC */