blob: a682a0a2a0fa4d5db9175256e78100b1afde344a [file] [log] [blame]
Laura Abbott11d91a72014-08-19 20:41:43 +01001/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/sched.h>
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010017#include <linux/vmalloc.h>
Laura Abbott11d91a72014-08-19 20:41:43 +010018
19#include <asm/pgtable.h>
Laura Abbottd4bbc302017-05-08 15:58:05 -070020#include <asm/set_memory.h>
Laura Abbott11d91a72014-08-19 20:41:43 +010021#include <asm/tlbflush.h>
22
23struct page_change_data {
24 pgprot_t set_mask;
25 pgprot_t clear_mask;
26};
27
28static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
29 void *data)
30{
31 struct page_change_data *cdata = data;
32 pte_t pte = *ptep;
33
34 pte = clear_pte_bit(pte, cdata->clear_mask);
35 pte = set_pte_bit(pte, cdata->set_mask);
36
37 set_pte(ptep, pte);
38 return 0;
39}
40
Laura Abbott83863f22016-02-05 16:24:47 -080041/*
42 * This function assumes that the range is mapped with PAGE_SIZE pages.
43 */
44static int __change_memory_common(unsigned long start, unsigned long size,
45 pgprot_t set_mask, pgprot_t clear_mask)
46{
47 struct page_change_data data;
48 int ret;
49
50 data.set_mask = set_mask;
51 data.clear_mask = clear_mask;
52
53 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
54 &data);
55
56 flush_tlb_kernel_range(start, start + size);
57 return ret;
58}
59
Laura Abbott11d91a72014-08-19 20:41:43 +010060static int change_memory_common(unsigned long addr, int numpages,
61 pgprot_t set_mask, pgprot_t clear_mask)
62{
63 unsigned long start = addr;
64 unsigned long size = PAGE_SIZE*numpages;
65 unsigned long end = start + size;
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010066 struct vm_struct *area;
Laura Abbott11d91a72014-08-19 20:41:43 +010067
Alexander Kuleshovf23bef32015-10-26 17:26:57 +060068 if (!PAGE_ALIGNED(addr)) {
Laura Abbottb4da1842014-09-11 23:10:32 +010069 start &= PAGE_MASK;
70 end = start + size;
Laura Abbott11d91a72014-08-19 20:41:43 +010071 WARN_ON_ONCE(1);
72 }
73
Ard Biesheuvel95f5c802016-01-27 10:50:19 +010074 /*
75 * Kernel VA mappings are always live, and splitting live section
76 * mappings into page mappings may cause TLB conflicts. This means
77 * we have to ensure that changing the permission bits of the range
78 * we are operating on does not result in such splitting.
79 *
80 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
81 * Those are guaranteed to consist entirely of page mappings, and
82 * splitting is never needed.
83 *
84 * So check whether the [addr, addr + size) interval is entirely
85 * covered by precisely one VM area that has the VM_ALLOC flag set.
86 */
87 area = find_vm_area((void *)addr);
88 if (!area ||
89 end > (unsigned long)area->addr + area->size ||
90 !(area->flags & VM_ALLOC))
Laura Abbott11d91a72014-08-19 20:41:43 +010091 return -EINVAL;
92
Mika Penttilä57adec82016-01-26 15:47:25 +000093 if (!numpages)
94 return 0;
95
Laura Abbott83863f22016-02-05 16:24:47 -080096 return __change_memory_common(start, size, set_mask, clear_mask);
Laura Abbott11d91a72014-08-19 20:41:43 +010097}
98
99int set_memory_ro(unsigned long addr, int numpages)
100{
101 return change_memory_common(addr, numpages,
102 __pgprot(PTE_RDONLY),
103 __pgprot(PTE_WRITE));
104}
Laura Abbott11d91a72014-08-19 20:41:43 +0100105
106int set_memory_rw(unsigned long addr, int numpages)
107{
108 return change_memory_common(addr, numpages,
109 __pgprot(PTE_WRITE),
110 __pgprot(PTE_RDONLY));
111}
Laura Abbott11d91a72014-08-19 20:41:43 +0100112
113int set_memory_nx(unsigned long addr, int numpages)
114{
115 return change_memory_common(addr, numpages,
116 __pgprot(PTE_PXN),
117 __pgprot(0));
118}
119EXPORT_SYMBOL_GPL(set_memory_nx);
120
121int set_memory_x(unsigned long addr, int numpages)
122{
123 return change_memory_common(addr, numpages,
124 __pgprot(0),
125 __pgprot(PTE_PXN));
126}
127EXPORT_SYMBOL_GPL(set_memory_x);
Laura Abbott83863f22016-02-05 16:24:47 -0800128
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900129int set_memory_valid(unsigned long addr, int numpages, int enable)
Laura Abbott83863f22016-02-05 16:24:47 -0800130{
Laura Abbott83863f22016-02-05 16:24:47 -0800131 if (enable)
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900132 return __change_memory_common(addr, PAGE_SIZE * numpages,
Laura Abbott83863f22016-02-05 16:24:47 -0800133 __pgprot(PTE_VALID),
134 __pgprot(0));
135 else
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900136 return __change_memory_common(addr, PAGE_SIZE * numpages,
Laura Abbott83863f22016-02-05 16:24:47 -0800137 __pgprot(0),
138 __pgprot(PTE_VALID));
139}
AKASHI Takahiro9b0aa142017-04-03 11:24:33 +0900140
141#ifdef CONFIG_DEBUG_PAGEALLOC
142void __kernel_map_pages(struct page *page, int numpages, int enable)
143{
144 set_memory_valid((unsigned long)page_address(page), numpages, enable);
145}
James Morse5ebe3a42016-08-24 18:27:30 +0100146#ifdef CONFIG_HIBERNATION
147/*
148 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
149 * is used to determine if a linear map page has been marked as not-valid by
150 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
151 * This is based on kern_addr_valid(), which almost does what we need.
152 *
153 * Because this is only called on the kernel linear map, p?d_sect() implies
154 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
155 * disabled.
156 */
157bool kernel_page_present(struct page *page)
158{
159 pgd_t *pgd;
160 pud_t *pud;
161 pmd_t *pmd;
162 pte_t *pte;
163 unsigned long addr = (unsigned long)page_address(page);
164
165 pgd = pgd_offset_k(addr);
166 if (pgd_none(*pgd))
167 return false;
168
169 pud = pud_offset(pgd, addr);
170 if (pud_none(*pud))
171 return false;
172 if (pud_sect(*pud))
173 return true;
174
175 pmd = pmd_offset(pud, addr);
176 if (pmd_none(*pmd))
177 return false;
178 if (pmd_sect(*pmd))
179 return true;
180
181 pte = pte_offset_kernel(pmd, addr);
182 return pte_valid(*pte);
183}
184#endif /* CONFIG_HIBERNATION */
185#endif /* CONFIG_DEBUG_PAGEALLOC */