Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 and |
| 6 | * only version 2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/sched.h> |
Ard Biesheuvel | 95f5c80 | 2016-01-27 10:50:19 +0100 | [diff] [blame] | 17 | #include <linux/vmalloc.h> |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 18 | |
| 19 | #include <asm/pgtable.h> |
| 20 | #include <asm/tlbflush.h> |
| 21 | |
| 22 | struct page_change_data { |
| 23 | pgprot_t set_mask; |
| 24 | pgprot_t clear_mask; |
| 25 | }; |
| 26 | |
| 27 | static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr, |
| 28 | void *data) |
| 29 | { |
| 30 | struct page_change_data *cdata = data; |
| 31 | pte_t pte = *ptep; |
| 32 | |
| 33 | pte = clear_pte_bit(pte, cdata->clear_mask); |
| 34 | pte = set_pte_bit(pte, cdata->set_mask); |
| 35 | |
| 36 | set_pte(ptep, pte); |
| 37 | return 0; |
| 38 | } |
| 39 | |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 40 | /* |
| 41 | * This function assumes that the range is mapped with PAGE_SIZE pages. |
| 42 | */ |
| 43 | static int __change_memory_common(unsigned long start, unsigned long size, |
| 44 | pgprot_t set_mask, pgprot_t clear_mask) |
| 45 | { |
| 46 | struct page_change_data data; |
| 47 | int ret; |
| 48 | |
| 49 | data.set_mask = set_mask; |
| 50 | data.clear_mask = clear_mask; |
| 51 | |
| 52 | ret = apply_to_page_range(&init_mm, start, size, change_page_range, |
| 53 | &data); |
| 54 | |
| 55 | flush_tlb_kernel_range(start, start + size); |
| 56 | return ret; |
| 57 | } |
| 58 | |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 59 | static int change_memory_common(unsigned long addr, int numpages, |
| 60 | pgprot_t set_mask, pgprot_t clear_mask) |
| 61 | { |
| 62 | unsigned long start = addr; |
| 63 | unsigned long size = PAGE_SIZE*numpages; |
| 64 | unsigned long end = start + size; |
Ard Biesheuvel | 95f5c80 | 2016-01-27 10:50:19 +0100 | [diff] [blame] | 65 | struct vm_struct *area; |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 66 | |
Alexander Kuleshov | f23bef3 | 2015-10-26 17:26:57 +0600 | [diff] [blame] | 67 | if (!PAGE_ALIGNED(addr)) { |
Laura Abbott | b4da184 | 2014-09-11 23:10:32 +0100 | [diff] [blame] | 68 | start &= PAGE_MASK; |
| 69 | end = start + size; |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 70 | WARN_ON_ONCE(1); |
| 71 | } |
| 72 | |
Ard Biesheuvel | 95f5c80 | 2016-01-27 10:50:19 +0100 | [diff] [blame] | 73 | /* |
| 74 | * Kernel VA mappings are always live, and splitting live section |
| 75 | * mappings into page mappings may cause TLB conflicts. This means |
| 76 | * we have to ensure that changing the permission bits of the range |
| 77 | * we are operating on does not result in such splitting. |
| 78 | * |
| 79 | * Let's restrict ourselves to mappings created by vmalloc (or vmap). |
| 80 | * Those are guaranteed to consist entirely of page mappings, and |
| 81 | * splitting is never needed. |
| 82 | * |
| 83 | * So check whether the [addr, addr + size) interval is entirely |
| 84 | * covered by precisely one VM area that has the VM_ALLOC flag set. |
| 85 | */ |
| 86 | area = find_vm_area((void *)addr); |
| 87 | if (!area || |
| 88 | end > (unsigned long)area->addr + area->size || |
| 89 | !(area->flags & VM_ALLOC)) |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 90 | return -EINVAL; |
| 91 | |
Mika Penttilä | 57adec8 | 2016-01-26 15:47:25 +0000 | [diff] [blame] | 92 | if (!numpages) |
| 93 | return 0; |
| 94 | |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 95 | return __change_memory_common(start, size, set_mask, clear_mask); |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | int set_memory_ro(unsigned long addr, int numpages) |
| 99 | { |
| 100 | return change_memory_common(addr, numpages, |
| 101 | __pgprot(PTE_RDONLY), |
| 102 | __pgprot(PTE_WRITE)); |
| 103 | } |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 104 | |
| 105 | int set_memory_rw(unsigned long addr, int numpages) |
| 106 | { |
| 107 | return change_memory_common(addr, numpages, |
| 108 | __pgprot(PTE_WRITE), |
| 109 | __pgprot(PTE_RDONLY)); |
| 110 | } |
Laura Abbott | 11d91a7 | 2014-08-19 20:41:43 +0100 | [diff] [blame] | 111 | |
| 112 | int set_memory_nx(unsigned long addr, int numpages) |
| 113 | { |
| 114 | return change_memory_common(addr, numpages, |
| 115 | __pgprot(PTE_PXN), |
| 116 | __pgprot(0)); |
| 117 | } |
| 118 | EXPORT_SYMBOL_GPL(set_memory_nx); |
| 119 | |
| 120 | int set_memory_x(unsigned long addr, int numpages) |
| 121 | { |
| 122 | return change_memory_common(addr, numpages, |
| 123 | __pgprot(0), |
| 124 | __pgprot(PTE_PXN)); |
| 125 | } |
| 126 | EXPORT_SYMBOL_GPL(set_memory_x); |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 127 | |
| 128 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 129 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
| 130 | { |
| 131 | unsigned long addr = (unsigned long) page_address(page); |
| 132 | |
| 133 | if (enable) |
| 134 | __change_memory_common(addr, PAGE_SIZE * numpages, |
| 135 | __pgprot(PTE_VALID), |
| 136 | __pgprot(0)); |
| 137 | else |
| 138 | __change_memory_common(addr, PAGE_SIZE * numpages, |
| 139 | __pgprot(0), |
| 140 | __pgprot(PTE_VALID)); |
| 141 | } |
| 142 | #endif |