blob: 47b1f4781cceb2a87b45ecf5484654a6ef4d93dd [file] [log] [blame]
Jungseung Lee4e802cf2014-11-29 03:02:11 +01001/*
Shiraz Hashime7acf692017-08-11 17:49:28 +05302 * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
Jungseung Lee4e802cf2014-11-29 03:02:11 +01003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/mm.h>
14#include <linux/module.h>
15
16#include <asm/pgtable.h>
17#include <asm/tlbflush.h>
18
19struct page_change_data {
20 pgprot_t set_mask;
21 pgprot_t clear_mask;
22};
23
24static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
25 void *data)
26{
27 struct page_change_data *cdata = data;
28 pte_t pte = *ptep;
29
30 pte = clear_pte_bit(pte, cdata->clear_mask);
31 pte = set_pte_bit(pte, cdata->set_mask);
32
33 set_pte_ext(ptep, pte, 0);
34 return 0;
35}
36
37static int change_memory_common(unsigned long addr, int numpages,
38 pgprot_t set_mask, pgprot_t clear_mask)
39{
40 unsigned long start = addr;
41 unsigned long size = PAGE_SIZE*numpages;
42 unsigned long end = start + size;
43 int ret;
44 struct page_change_data data;
45
46 if (!IS_ALIGNED(addr, PAGE_SIZE)) {
47 start &= PAGE_MASK;
48 end = start + size;
49 WARN_ON_ONCE(1);
50 }
51
Mika Penttiläf474c8c2016-02-22 17:56:52 +010052 if (!numpages)
53 return 0;
54
Laura Abbott460ad2e2014-04-14 19:42:04 -070055 if (!IS_ENABLED(CONFIG_FORCE_PAGES)) {
56 if (start < MODULES_VADDR || start >= MODULES_END)
57 return -EINVAL;
Laura Abbottf2ca09f2015-03-13 21:41:45 +010058
Laura Abbott460ad2e2014-04-14 19:42:04 -070059 if (end < MODULES_VADDR || start >= MODULES_END)
60 return -EINVAL;
61 }
Jungseung Lee4e802cf2014-11-29 03:02:11 +010062
63 data.set_mask = set_mask;
64 data.clear_mask = clear_mask;
65
66 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
67 &data);
68
69 flush_tlb_kernel_range(start, end);
70 return ret;
71}
72
73int set_memory_ro(unsigned long addr, int numpages)
74{
75 return change_memory_common(addr, numpages,
76 __pgprot(L_PTE_RDONLY),
77 __pgprot(0));
78}
79
80int set_memory_rw(unsigned long addr, int numpages)
81{
82 return change_memory_common(addr, numpages,
83 __pgprot(0),
84 __pgprot(L_PTE_RDONLY));
85}
86
87int set_memory_nx(unsigned long addr, int numpages)
88{
89 return change_memory_common(addr, numpages,
90 __pgprot(L_PTE_XN),
91 __pgprot(0));
92}
93
94int set_memory_x(unsigned long addr, int numpages)
95{
96 return change_memory_common(addr, numpages,
97 __pgprot(0),
98 __pgprot(L_PTE_XN));
99}
Shiraz Hashime7acf692017-08-11 17:49:28 +0530100
101#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
102void __kernel_map_pages(struct page *page, int numpages, int enable)
103{
104 unsigned long addr;
105
106 if (PageHighMem(page))
107 return;
108
109 addr = (unsigned long) page_address(page);
110 if (enable)
111 set_memory_rw(addr, numpages);
112 else
113 set_memory_ro(addr, numpages);
114}
115#endif