blob: e47ed1c5dce1bbe50c22094b17cc12e82dde6cdf [file] [log] [blame]
Laura Abbott11d91a72014-08-19 20:41:43 +01001/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17
18#include <asm/pgtable.h>
19#include <asm/tlbflush.h>
20
21struct page_change_data {
22 pgprot_t set_mask;
23 pgprot_t clear_mask;
24};
25
26static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
27 void *data)
28{
29 struct page_change_data *cdata = data;
30 pte_t pte = *ptep;
31
32 pte = clear_pte_bit(pte, cdata->clear_mask);
33 pte = set_pte_bit(pte, cdata->set_mask);
34
35 set_pte(ptep, pte);
36 return 0;
37}
38
39static int change_memory_common(unsigned long addr, int numpages,
40 pgprot_t set_mask, pgprot_t clear_mask)
41{
42 unsigned long start = addr;
43 unsigned long size = PAGE_SIZE*numpages;
44 unsigned long end = start + size;
45 int ret;
46 struct page_change_data data;
47
48 if (!IS_ALIGNED(addr, PAGE_SIZE)) {
Laura Abbottb4da1842014-09-11 23:10:32 +010049 start &= PAGE_MASK;
50 end = start + size;
Laura Abbott11d91a72014-08-19 20:41:43 +010051 WARN_ON_ONCE(1);
52 }
53
Laura Abbott8b5f5a02015-02-25 14:14:55 -080054 if (start < MODULES_VADDR || start >= MODULES_END)
55 return -EINVAL;
56
57 if (end < MODULES_VADDR || end >= MODULES_END)
Laura Abbott11d91a72014-08-19 20:41:43 +010058 return -EINVAL;
59
60 data.set_mask = set_mask;
61 data.clear_mask = clear_mask;
62
63 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
64 &data);
65
66 flush_tlb_kernel_range(start, end);
67 return ret;
68}
69
70int set_memory_ro(unsigned long addr, int numpages)
71{
72 return change_memory_common(addr, numpages,
73 __pgprot(PTE_RDONLY),
74 __pgprot(PTE_WRITE));
75}
Laura Abbott11d91a72014-08-19 20:41:43 +010076
77int set_memory_rw(unsigned long addr, int numpages)
78{
79 return change_memory_common(addr, numpages,
80 __pgprot(PTE_WRITE),
81 __pgprot(PTE_RDONLY));
82}
Laura Abbott11d91a72014-08-19 20:41:43 +010083
84int set_memory_nx(unsigned long addr, int numpages)
85{
86 return change_memory_common(addr, numpages,
87 __pgprot(PTE_PXN),
88 __pgprot(0));
89}
90EXPORT_SYMBOL_GPL(set_memory_nx);
91
92int set_memory_x(unsigned long addr, int numpages)
93{
94 return change_memory_common(addr, numpages,
95 __pgprot(0),
96 __pgprot(PTE_PXN));
97}
98EXPORT_SYMBOL_GPL(set_memory_x);