Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Thanks to Ben LaHaise for precious feedback. |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 4 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/highmem.h> |
Ingo Molnar | 8192206 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 6 | #include <linux/bootmem.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 7 | #include <linux/sched.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 8 | #include <linux/mm.h> |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 9 | #include <linux/interrupt.h> |
Thomas Gleixner | ee7ae7a | 2008-04-17 17:40:45 +0200 | [diff] [blame] | 10 | #include <linux/seq_file.h> |
| 11 | #include <linux/debugfs.h> |
Tejun Heo | e59a1bb | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 12 | #include <linux/pfn.h> |
Tejun Heo | 8c4bfc6 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 13 | #include <linux/percpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/gfp.h> |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 15 | #include <linux/pci.h> |
Stephen Rothwell | d647230 | 2015-06-02 19:01:38 +1000 | [diff] [blame] | 16 | #include <linux/vmalloc.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 17 | |
Ingo Molnar | 66441bd | 2017-01-27 10:27:10 +0100 | [diff] [blame] | 18 | #include <asm/e820/api.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/processor.h> |
| 20 | #include <asm/tlbflush.h> |
Dave Jones | f8af095 | 2006-01-06 00:12:10 -0800 | [diff] [blame] | 21 | #include <asm/sections.h> |
Jeremy Fitzhardinge | 93dbda7 | 2009-02-26 17:35:44 -0800 | [diff] [blame] | 22 | #include <asm/setup.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 23 | #include <linux/uaccess.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 24 | #include <asm/pgalloc.h> |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 25 | #include <asm/proto.h> |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 26 | #include <asm/pat.h> |
Laura Abbott | d116365 | 2017-05-08 15:58:11 -0700 | [diff] [blame] | 27 | #include <asm/set_memory.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 29 | /* |
| 30 | * The current flushing context - we pass it instead of 5 arguments: |
| 31 | */ |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 32 | struct cpa_data { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 33 | unsigned long *vaddr; |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 34 | pgd_t *pgd; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 35 | pgprot_t mask_set; |
| 36 | pgprot_t mask_clr; |
Matt Fleming | 7425637 | 2016-01-29 11:36:10 +0000 | [diff] [blame] | 37 | unsigned long numpages; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 38 | int flags; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 39 | unsigned long pfn; |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 40 | unsigned force_split : 1; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 41 | int curpage; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 42 | struct page **pages; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 43 | }; |
| 44 | |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 45 | /* |
| 46 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) |
| 47 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb |
| 48 | * entries change the page attribute in parallel to some other cpu |
| 49 | * splitting a large page entry along with changing the attribute. |
| 50 | */ |
| 51 | static DEFINE_SPINLOCK(cpa_lock); |
| 52 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 53 | #define CPA_FLUSHTLB 1 |
| 54 | #define CPA_ARRAY 2 |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 55 | #define CPA_PAGES_ARRAY 4 |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 56 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 57 | #ifdef CONFIG_PROC_FS |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 58 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
| 59 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 60 | void update_page_count(int level, unsigned long pages) |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 61 | { |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 62 | /* Protect against CPA */ |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 63 | spin_lock(&pgd_lock); |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 64 | direct_pages_count[level] += pages; |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 65 | spin_unlock(&pgd_lock); |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 66 | } |
| 67 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 68 | static void split_page_count(int level) |
| 69 | { |
Dave Jones | c9e0d39 | 2016-01-11 12:04:28 -0500 | [diff] [blame] | 70 | if (direct_pages_count[level] == 0) |
| 71 | return; |
| 72 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 73 | direct_pages_count[level]--; |
| 74 | direct_pages_count[level - 1] += PTRS_PER_PTE; |
| 75 | } |
| 76 | |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 77 | void arch_report_meminfo(struct seq_file *m) |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 78 | { |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 79 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 80 | direct_pages_count[PG_LEVEL_4K] << 2); |
| 81 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 82 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 83 | direct_pages_count[PG_LEVEL_2M] << 11); |
| 84 | #else |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 85 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 86 | direct_pages_count[PG_LEVEL_2M] << 12); |
| 87 | #endif |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 88 | if (direct_gbpages) |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 89 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 90 | direct_pages_count[PG_LEVEL_1G] << 20); |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 91 | } |
| 92 | #else |
| 93 | static inline void split_page_count(int level) { } |
| 94 | #endif |
| 95 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 96 | #ifdef CONFIG_X86_64 |
| 97 | |
| 98 | static inline unsigned long highmap_start_pfn(void) |
| 99 | { |
Alexander Duyck | fc8d782 | 2012-11-16 13:57:13 -0800 | [diff] [blame] | 100 | return __pa_symbol(_text) >> PAGE_SHIFT; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | static inline unsigned long highmap_end_pfn(void) |
| 104 | { |
Thomas Garnier | 4ff5308 | 2016-06-15 12:05:45 -0700 | [diff] [blame] | 105 | /* Do not reference physical address outside the kernel. */ |
| 106 | return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | #endif |
| 110 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 111 | static inline int |
| 112 | within(unsigned long addr, unsigned long start, unsigned long end) |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 113 | { |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 114 | return addr >= start && addr < end; |
| 115 | } |
| 116 | |
Thomas Garnier | 4ff5308 | 2016-06-15 12:05:45 -0700 | [diff] [blame] | 117 | static inline int |
| 118 | within_inclusive(unsigned long addr, unsigned long start, unsigned long end) |
| 119 | { |
| 120 | return addr >= start && addr <= end; |
| 121 | } |
| 122 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 123 | /* |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 124 | * Flushing functions |
| 125 | */ |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 126 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 127 | /** |
| 128 | * clflush_cache_range - flush a cache range with clflush |
Wanpeng Li | 9efc31b | 2012-06-10 10:50:52 +0800 | [diff] [blame] | 129 | * @vaddr: virtual start address |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 130 | * @size: number of bytes to flush |
| 131 | * |
Ross Zwisler | 8b80fd8 | 2014-02-26 12:06:50 -0700 | [diff] [blame] | 132 | * clflushopt is an unordered instruction which needs fencing with mfence or |
| 133 | * sfence to avoid ordering issues. |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 134 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 135 | void clflush_cache_range(void *vaddr, unsigned int size) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 136 | { |
Chris Wilson | 1f1a89a | 2016-01-08 09:55:33 +0000 | [diff] [blame] | 137 | const unsigned long clflush_size = boot_cpu_data.x86_clflush_size; |
| 138 | void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); |
Ross Zwisler | 6c434d6 | 2015-05-11 10:15:49 +0200 | [diff] [blame] | 139 | void *vend = vaddr + size; |
Chris Wilson | 1f1a89a | 2016-01-08 09:55:33 +0000 | [diff] [blame] | 140 | |
| 141 | if (p >= vend) |
| 142 | return; |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 143 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 144 | mb(); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 145 | |
Chris Wilson | 1f1a89a | 2016-01-08 09:55:33 +0000 | [diff] [blame] | 146 | for (; p < vend; p += clflush_size) |
Ross Zwisler | 6c434d6 | 2015-05-11 10:15:49 +0200 | [diff] [blame] | 147 | clflushopt(p); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 148 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 149 | mb(); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 150 | } |
Eric Anholt | e517a5e | 2009-09-10 17:48:48 -0700 | [diff] [blame] | 151 | EXPORT_SYMBOL_GPL(clflush_cache_range); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 152 | |
Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 153 | static void __cpa_flush_all(void *arg) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 154 | { |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 155 | unsigned long cache = (unsigned long)arg; |
| 156 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 157 | /* |
| 158 | * Flush all to work around Errata in early athlons regarding |
| 159 | * large page flushing. |
| 160 | */ |
| 161 | __flush_tlb_all(); |
| 162 | |
venkatesh.pallipadi@intel.com | 0b82753 | 2009-05-22 13:23:37 -0700 | [diff] [blame] | 163 | if (cache && boot_cpu_data.x86 >= 4) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 164 | wbinvd(); |
| 165 | } |
| 166 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 167 | static void cpa_flush_all(unsigned long cache) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 168 | { |
| 169 | BUG_ON(irqs_disabled()); |
| 170 | |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 171 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 172 | } |
| 173 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 174 | static void __cpa_flush_range(void *arg) |
| 175 | { |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 176 | /* |
| 177 | * We could optimize that further and do individual per page |
| 178 | * tlb invalidates for a low number of pages. Caveat: we must |
| 179 | * flush the high aliases on 64bit as well. |
| 180 | */ |
| 181 | __flush_tlb_all(); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 182 | } |
| 183 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 184 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 185 | { |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 186 | unsigned int i, level; |
| 187 | unsigned long addr; |
| 188 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 189 | BUG_ON(irqs_disabled()); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 190 | WARN_ON(PAGE_ALIGN(start) != start); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 191 | |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 192 | on_each_cpu(__cpa_flush_range, NULL, 1); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 193 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 194 | if (!cache) |
| 195 | return; |
| 196 | |
Thomas Gleixner | 3b233e5 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 197 | /* |
| 198 | * We only need to flush on one CPU, |
| 199 | * clflush is a MESI-coherent instruction that |
| 200 | * will cause all other CPUs to flush the same |
| 201 | * cachelines: |
| 202 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 203 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
| 204 | pte_t *pte = lookup_address(addr, &level); |
| 205 | |
| 206 | /* |
| 207 | * Only flush present addresses: |
| 208 | */ |
Thomas Gleixner | 7bfb72e | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 209 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 210 | clflush_cache_range((void *) addr, PAGE_SIZE); |
| 211 | } |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 212 | } |
| 213 | |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 214 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, |
| 215 | int in_flags, struct page **pages) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 216 | { |
| 217 | unsigned int i, level; |
John Ogness | 459fbe0 | 2017-01-30 09:41:21 +0100 | [diff] [blame] | 218 | #ifdef CONFIG_PREEMPT |
| 219 | /* |
| 220 | * Avoid wbinvd() because it causes latencies on all CPUs, |
| 221 | * regardless of any CPU isolation that may be in effect. |
| 222 | * |
| 223 | * This should be extended for CAT enabled systems independent of |
| 224 | * PREEMPT because wbinvd() does not respect the CAT partitions and |
| 225 | * this is exposed to unpriviledged users through the graphics |
| 226 | * subsystem. |
| 227 | */ |
| 228 | unsigned long do_wbinvd = 0; |
| 229 | #else |
Pallipadi, Venkatesh | 2171787 | 2009-05-26 10:33:35 -0700 | [diff] [blame] | 230 | unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ |
John Ogness | 459fbe0 | 2017-01-30 09:41:21 +0100 | [diff] [blame] | 231 | #endif |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 232 | |
| 233 | BUG_ON(irqs_disabled()); |
| 234 | |
Pallipadi, Venkatesh | 2171787 | 2009-05-26 10:33:35 -0700 | [diff] [blame] | 235 | on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 236 | |
Pallipadi, Venkatesh | 2171787 | 2009-05-26 10:33:35 -0700 | [diff] [blame] | 237 | if (!cache || do_wbinvd) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 238 | return; |
| 239 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 240 | /* |
| 241 | * We only need to flush on one CPU, |
| 242 | * clflush is a MESI-coherent instruction that |
| 243 | * will cause all other CPUs to flush the same |
| 244 | * cachelines: |
| 245 | */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 246 | for (i = 0; i < numpages; i++) { |
| 247 | unsigned long addr; |
| 248 | pte_t *pte; |
| 249 | |
| 250 | if (in_flags & CPA_PAGES_ARRAY) |
| 251 | addr = (unsigned long)page_address(pages[i]); |
| 252 | else |
| 253 | addr = start[i]; |
| 254 | |
| 255 | pte = lookup_address(addr, &level); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 256 | |
| 257 | /* |
| 258 | * Only flush present addresses: |
| 259 | */ |
| 260 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 261 | clflush_cache_range((void *)addr, PAGE_SIZE); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 262 | } |
| 263 | } |
| 264 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 265 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 266 | * Certain areas of memory on x86 require very specific protection flags, |
| 267 | * for example the BIOS area or kernel text. Callers don't always get this |
| 268 | * right (again, ioremap() on BIOS memory is not uncommon) so this function |
| 269 | * checks and fixes these known static required protection bits. |
| 270 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 271 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
| 272 | unsigned long pfn) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 273 | { |
| 274 | pgprot_t forbidden = __pgprot(0); |
| 275 | |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 276 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 277 | * The BIOS area between 640k and 1Mb needs to be executable for |
| 278 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 279 | */ |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 280 | #ifdef CONFIG_PCI_BIOS |
| 281 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 282 | pgprot_val(forbidden) |= _PAGE_NX; |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 283 | #endif |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 284 | |
| 285 | /* |
| 286 | * The kernel text needs to be executable for obvious reasons |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 287 | * Does not cover __inittext since that is gone later on. On |
| 288 | * 64bit we do not enforce !NX on the low mapping |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 289 | */ |
| 290 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) |
| 291 | pgprot_val(forbidden) |= _PAGE_NX; |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 292 | |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 293 | /* |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 294 | * The .rodata section needs to be read-only. Using the pfn |
| 295 | * catches all aliases. |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 296 | */ |
Alexander Duyck | fc8d782 | 2012-11-16 13:57:13 -0800 | [diff] [blame] | 297 | if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, |
| 298 | __pa_symbol(__end_rodata) >> PAGE_SHIFT)) |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 299 | pgprot_val(forbidden) |= _PAGE_RW; |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 300 | |
Kees Cook | 9ccaf77 | 2016-02-17 14:41:14 -0800 | [diff] [blame] | 301 | #if defined(CONFIG_X86_64) |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 302 | /* |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 303 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), |
| 304 | * kernel text mappings for the large page aligned text, rodata sections |
| 305 | * will be always read-only. For the kernel identity mappings covering |
| 306 | * the holes caused by this alignment can be anything that user asks. |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 307 | * |
| 308 | * This will preserve the large page mappings for kernel text/data |
| 309 | * at no extra cost. |
| 310 | */ |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 311 | if (kernel_set_to_readonly && |
| 312 | within(address, (unsigned long)_text, |
Suresh Siddha | 281ff33 | 2010-02-18 11:51:40 -0800 | [diff] [blame] | 313 | (unsigned long)__end_rodata_hpage_align)) { |
| 314 | unsigned int level; |
| 315 | |
| 316 | /* |
| 317 | * Don't enforce the !RW mapping for the kernel text mapping, |
| 318 | * if the current mapping is already using small page mapping. |
| 319 | * No need to work hard to preserve large page mappings in this |
| 320 | * case. |
| 321 | * |
| 322 | * This also fixes the Linux Xen paravirt guest boot failure |
| 323 | * (because of unexpected read-only mappings for kernel identity |
| 324 | * mappings). In this paravirt guest case, the kernel text |
| 325 | * mapping and the kernel identity mapping share the same |
| 326 | * page-table pages. Thus we can't really use different |
| 327 | * protections for the kernel text and identity mappings. Also, |
| 328 | * these shared mappings are made of small page mappings. |
| 329 | * Thus this don't enforce !RW mapping for small page kernel |
| 330 | * text mapping logic will help Linux Xen parvirt guest boot |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 331 | * as well. |
Suresh Siddha | 281ff33 | 2010-02-18 11:51:40 -0800 | [diff] [blame] | 332 | */ |
| 333 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) |
| 334 | pgprot_val(forbidden) |= _PAGE_RW; |
| 335 | } |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 336 | #endif |
| 337 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 338 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 339 | |
| 340 | return prot; |
| 341 | } |
| 342 | |
Matt Fleming | 426e34c | 2013-12-06 21:13:04 +0000 | [diff] [blame] | 343 | /* |
| 344 | * Lookup the page table entry for a virtual address in a specific pgd. |
| 345 | * Return a pointer to the entry and the level of the mapping. |
| 346 | */ |
| 347 | pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, |
| 348 | unsigned int *level) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 349 | { |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 350 | p4d_t *p4d; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | pud_t *pud; |
| 352 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 353 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 354 | *level = PG_LEVEL_NONE; |
| 355 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | if (pgd_none(*pgd)) |
| 357 | return NULL; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 358 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 359 | p4d = p4d_offset(pgd, address); |
| 360 | if (p4d_none(*p4d)) |
| 361 | return NULL; |
| 362 | |
| 363 | *level = PG_LEVEL_512G; |
| 364 | if (p4d_large(*p4d) || !p4d_present(*p4d)) |
| 365 | return (pte_t *)p4d; |
| 366 | |
| 367 | pud = pud_offset(p4d, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | if (pud_none(*pud)) |
| 369 | return NULL; |
Andi Kleen | c2f71ee | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 370 | |
| 371 | *level = PG_LEVEL_1G; |
| 372 | if (pud_large(*pud) || !pud_present(*pud)) |
| 373 | return (pte_t *)pud; |
| 374 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | pmd = pmd_offset(pud, address); |
| 376 | if (pmd_none(*pmd)) |
| 377 | return NULL; |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 378 | |
| 379 | *level = PG_LEVEL_2M; |
Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 380 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | return (pte_t *)pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 383 | *level = PG_LEVEL_4K; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 384 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 385 | return pte_offset_kernel(pmd, address); |
| 386 | } |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 387 | |
| 388 | /* |
| 389 | * Lookup the page table entry for a virtual address. Return a pointer |
| 390 | * to the entry and the level of the mapping. |
| 391 | * |
| 392 | * Note: We return pud and pmd either when the entry is marked large |
| 393 | * or when the present bit is not set. Otherwise we would return a |
| 394 | * pointer to a nonexisting mapping. |
| 395 | */ |
| 396 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
| 397 | { |
Matt Fleming | 426e34c | 2013-12-06 21:13:04 +0000 | [diff] [blame] | 398 | return lookup_address_in_pgd(pgd_offset_k(address), address, level); |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 399 | } |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 400 | EXPORT_SYMBOL_GPL(lookup_address); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 401 | |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 402 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, |
| 403 | unsigned int *level) |
| 404 | { |
| 405 | if (cpa->pgd) |
Matt Fleming | 426e34c | 2013-12-06 21:13:04 +0000 | [diff] [blame] | 406 | return lookup_address_in_pgd(cpa->pgd + pgd_index(address), |
Borislav Petkov | 0fd64c2 | 2013-10-31 17:25:00 +0100 | [diff] [blame] | 407 | address, level); |
| 408 | |
| 409 | return lookup_address(address, level); |
| 410 | } |
| 411 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 412 | /* |
Juergen Gross | 792230c | 2014-11-28 11:53:56 +0100 | [diff] [blame] | 413 | * Lookup the PMD entry for a virtual address. Return a pointer to the entry |
| 414 | * or NULL if not present. |
| 415 | */ |
| 416 | pmd_t *lookup_pmd_address(unsigned long address) |
| 417 | { |
| 418 | pgd_t *pgd; |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 419 | p4d_t *p4d; |
Juergen Gross | 792230c | 2014-11-28 11:53:56 +0100 | [diff] [blame] | 420 | pud_t *pud; |
| 421 | |
| 422 | pgd = pgd_offset_k(address); |
| 423 | if (pgd_none(*pgd)) |
| 424 | return NULL; |
| 425 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 426 | p4d = p4d_offset(pgd, address); |
| 427 | if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d)) |
| 428 | return NULL; |
| 429 | |
| 430 | pud = pud_offset(p4d, address); |
Juergen Gross | 792230c | 2014-11-28 11:53:56 +0100 | [diff] [blame] | 431 | if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) |
| 432 | return NULL; |
| 433 | |
| 434 | return pmd_offset(pud, address); |
| 435 | } |
| 436 | |
| 437 | /* |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 438 | * This is necessary because __pa() does not work on some |
| 439 | * kinds of memory, like vmalloc() or the alloc_remap() |
| 440 | * areas on 32-bit NUMA systems. The percpu areas can |
| 441 | * end up in this kind of memory, for instance. |
| 442 | * |
| 443 | * This could be optimized, but it is only intended to be |
| 444 | * used at inititalization time, and keeping it |
| 445 | * unoptimized should increase the testing coverage for |
| 446 | * the more obscure platforms. |
| 447 | */ |
| 448 | phys_addr_t slow_virt_to_phys(void *__virt_addr) |
| 449 | { |
| 450 | unsigned long virt_addr = (unsigned long)__virt_addr; |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 451 | phys_addr_t phys_addr; |
| 452 | unsigned long offset; |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 453 | enum pg_level level; |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 454 | pte_t *pte; |
| 455 | |
| 456 | pte = lookup_address(virt_addr, &level); |
| 457 | BUG_ON(!pte); |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 458 | |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 459 | /* |
| 460 | * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t |
| 461 | * before being left-shifted PAGE_SHIFT bits -- this trick is to |
| 462 | * make 32-PAE kernel work correctly. |
| 463 | */ |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 464 | switch (level) { |
| 465 | case PG_LEVEL_1G: |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 466 | phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT; |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 467 | offset = virt_addr & ~PUD_PAGE_MASK; |
| 468 | break; |
| 469 | case PG_LEVEL_2M: |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 470 | phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT; |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 471 | offset = virt_addr & ~PMD_PAGE_MASK; |
| 472 | break; |
| 473 | default: |
Dexuan Cui | bf70e55 | 2016-02-25 01:58:12 -0800 | [diff] [blame] | 474 | phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; |
Toshi Kani | 34437e6 | 2015-09-17 12:24:20 -0600 | [diff] [blame] | 475 | offset = virt_addr & ~PAGE_MASK; |
| 476 | } |
| 477 | |
| 478 | return (phys_addr_t)(phys_addr | offset); |
Dave Hansen | d765653 | 2013-01-22 13:24:33 -0800 | [diff] [blame] | 479 | } |
| 480 | EXPORT_SYMBOL_GPL(slow_virt_to_phys); |
| 481 | |
| 482 | /* |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 483 | * Set the new pmd in all the pgds we know about: |
| 484 | */ |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 485 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 486 | { |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 487 | /* change init_mm */ |
| 488 | set_pte_atomic(kpte, pte); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 489 | #ifdef CONFIG_X86_32 |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 490 | if (!SHARED_KERNEL_PMD) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 491 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 493 | list_for_each_entry(page, &pgd_list, lru) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 494 | pgd_t *pgd; |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 495 | p4d_t *p4d; |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 496 | pud_t *pud; |
| 497 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 498 | |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 499 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 500 | p4d = p4d_offset(pgd, address); |
| 501 | pud = pud_offset(p4d, address); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 502 | pmd = pmd_offset(pud, address); |
| 503 | set_pte_atomic((pte_t *)pmd, pte); |
| 504 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 506 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | } |
| 508 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 509 | static int |
| 510 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
| 511 | struct cpa_data *cpa) |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 512 | { |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 513 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 514 | pte_t new_pte, old_pte, *tmp; |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 515 | pgprot_t old_prot, new_prot, req_prot; |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 516 | int i, do_split = 1; |
Dave Hansen | f3c4fbb | 2013-01-22 13:24:32 -0800 | [diff] [blame] | 517 | enum pg_level level; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 518 | |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 519 | if (cpa->force_split) |
| 520 | return 1; |
| 521 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 522 | spin_lock(&pgd_lock); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 523 | /* |
| 524 | * Check for races, another CPU might have split this page |
| 525 | * up already: |
| 526 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 527 | tmp = _lookup_address_cpa(cpa, address, &level); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 528 | if (tmp != kpte) |
| 529 | goto out_unlock; |
| 530 | |
| 531 | switch (level) { |
| 532 | case PG_LEVEL_2M: |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 533 | old_prot = pmd_pgprot(*(pmd_t *)kpte); |
| 534 | old_pfn = pmd_pfn(*(pmd_t *)kpte); |
| 535 | break; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 536 | case PG_LEVEL_1G: |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 537 | old_prot = pud_pgprot(*(pud_t *)kpte); |
| 538 | old_pfn = pud_pfn(*(pud_t *)kpte); |
Dave Hansen | f3c4fbb | 2013-01-22 13:24:32 -0800 | [diff] [blame] | 539 | break; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 540 | default: |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 541 | do_split = -EINVAL; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 542 | goto out_unlock; |
| 543 | } |
| 544 | |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 545 | psize = page_level_size(level); |
| 546 | pmask = page_level_mask(level); |
| 547 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 548 | /* |
| 549 | * Calculate the number of pages, which fit into this large |
| 550 | * page starting at address: |
| 551 | */ |
| 552 | nextpage_addr = (address + psize) & pmask; |
| 553 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 554 | if (numpages < cpa->numpages) |
| 555 | cpa->numpages = numpages; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 556 | |
| 557 | /* |
| 558 | * We are safe now. Check whether the new pgprot is the same: |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 559 | * Convert protection attributes to 4k-format, as cpa->mask* are set |
| 560 | * up accordingly. |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 561 | */ |
| 562 | old_pte = *kpte; |
Toshi Kani | 55696b1 | 2015-09-17 12:24:24 -0600 | [diff] [blame] | 563 | req_prot = pgprot_large_2_4k(old_prot); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 564 | |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 565 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
| 566 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 567 | |
| 568 | /* |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 569 | * req_prot is in format of 4k pages. It must be converted to large |
| 570 | * page format: the caching mode includes the PAT bit located at |
| 571 | * different bit positions in the two formats. |
| 572 | */ |
| 573 | req_prot = pgprot_4k_2_large(req_prot); |
| 574 | |
| 575 | /* |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 576 | * Set the PSE and GLOBAL flags only if the PRESENT flag is |
| 577 | * set otherwise pmd_present/pmd_huge will return true even on |
| 578 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL |
| 579 | * for the ancient hardware that doesn't support it. |
| 580 | */ |
Andrea Arcangeli | f76cfa3 | 2013-04-10 15:28:25 +0200 | [diff] [blame] | 581 | if (pgprot_val(req_prot) & _PAGE_PRESENT) |
| 582 | pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 583 | else |
Andrea Arcangeli | f76cfa3 | 2013-04-10 15:28:25 +0200 | [diff] [blame] | 584 | pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 585 | |
Andrea Arcangeli | f76cfa3 | 2013-04-10 15:28:25 +0200 | [diff] [blame] | 586 | req_prot = canon_pgprot(req_prot); |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 587 | |
| 588 | /* |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 589 | * old_pfn points to the large page base pfn. So we need |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 590 | * to add the offset of the virtual address: |
| 591 | */ |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 592 | pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 593 | cpa->pfn = pfn; |
| 594 | |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 595 | new_prot = static_protections(req_prot, address, pfn); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 596 | |
| 597 | /* |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 598 | * We need to check the full range, whether |
| 599 | * static_protection() requires a different pgprot for one of |
| 600 | * the pages in the range we try to preserve: |
| 601 | */ |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 602 | addr = address & pmask; |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 603 | pfn = old_pfn; |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 604 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { |
| 605 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 606 | |
| 607 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) |
| 608 | goto out_unlock; |
| 609 | } |
| 610 | |
| 611 | /* |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 612 | * If there are no changes, return. maxpages has been updated |
| 613 | * above: |
| 614 | */ |
| 615 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 616 | do_split = 0; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 617 | goto out_unlock; |
| 618 | } |
| 619 | |
| 620 | /* |
| 621 | * We need to change the attributes. Check, whether we can |
| 622 | * change the large page in one go. We request a split, when |
| 623 | * the address is not aligned and the number of pages is |
| 624 | * smaller than the number of pages in the large page. Note |
| 625 | * that we limited the number of possible pages already to |
| 626 | * the number of pages in the large page. |
| 627 | */ |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 628 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 629 | /* |
| 630 | * The address is aligned and the number of pages |
| 631 | * covers the full page. |
| 632 | */ |
Toshi Kani | 3a19109 | 2015-09-17 12:24:22 -0600 | [diff] [blame] | 633 | new_pte = pfn_pte(old_pfn, new_prot); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 634 | __set_pmd_pte(kpte, address, new_pte); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 635 | cpa->flags |= CPA_FLUSHTLB; |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 636 | do_split = 0; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 637 | } |
| 638 | |
| 639 | out_unlock: |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 640 | spin_unlock(&pgd_lock); |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 641 | |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 642 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 643 | } |
| 644 | |
Borislav Petkov | 5952886 | 2013-03-21 18:16:57 +0100 | [diff] [blame] | 645 | static int |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 646 | __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, |
| 647 | struct page *base) |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 648 | { |
Borislav Petkov | 5952886 | 2013-03-21 18:16:57 +0100 | [diff] [blame] | 649 | pte_t *pbase = (pte_t *)page_address(base); |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 650 | unsigned long ref_pfn, pfn, pfninc = 1; |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 651 | unsigned int i, level; |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 652 | pte_t *tmp; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 653 | pgprot_t ref_prot; |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 654 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 655 | spin_lock(&pgd_lock); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 656 | /* |
| 657 | * Check for races, another CPU might have split this page |
| 658 | * up for us already: |
| 659 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 660 | tmp = _lookup_address_cpa(cpa, address, &level); |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 661 | if (tmp != kpte) { |
| 662 | spin_unlock(&pgd_lock); |
| 663 | return 1; |
| 664 | } |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 665 | |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 666 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 667 | |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 668 | switch (level) { |
| 669 | case PG_LEVEL_2M: |
| 670 | ref_prot = pmd_pgprot(*(pmd_t *)kpte); |
| 671 | /* clear PSE and promote PAT bit to correct position */ |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 672 | ref_prot = pgprot_large_2_4k(ref_prot); |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 673 | ref_pfn = pmd_pfn(*(pmd_t *)kpte); |
| 674 | break; |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 675 | |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 676 | case PG_LEVEL_1G: |
| 677 | ref_prot = pud_pgprot(*(pud_t *)kpte); |
| 678 | ref_pfn = pud_pfn(*(pud_t *)kpte); |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 679 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 680 | |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 681 | /* |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 682 | * Clear the PSE flags if the PRESENT flag is not set |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 683 | * otherwise pmd_present/pmd_huge will return true |
| 684 | * even on a non present pmd. |
| 685 | */ |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 686 | if (!(pgprot_val(ref_prot) & _PAGE_PRESENT)) |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 687 | pgprot_val(ref_prot) &= ~_PAGE_PSE; |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 688 | break; |
| 689 | |
| 690 | default: |
| 691 | spin_unlock(&pgd_lock); |
| 692 | return 1; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 693 | } |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 694 | |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 695 | /* |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 696 | * Set the GLOBAL flags only if the PRESENT flag is set |
| 697 | * otherwise pmd/pte_present will return true even on a non |
| 698 | * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL |
| 699 | * for the ancient hardware that doesn't support it. |
| 700 | */ |
| 701 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) |
| 702 | pgprot_val(ref_prot) |= _PAGE_GLOBAL; |
| 703 | else |
| 704 | pgprot_val(ref_prot) &= ~_PAGE_GLOBAL; |
| 705 | |
| 706 | /* |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 707 | * Get the target pfn from the original entry: |
| 708 | */ |
Toshi Kani | d551aaa | 2015-09-17 12:24:23 -0600 | [diff] [blame] | 709 | pfn = ref_pfn; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 710 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 711 | set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 712 | |
Sai Praneeth | 2c66e24d | 2015-10-16 16:20:27 -0700 | [diff] [blame] | 713 | if (virt_addr_valid(address)) { |
| 714 | unsigned long pfn = PFN_DOWN(__pa(address)); |
| 715 | |
| 716 | if (pfn_range_is_mapped(pfn, pfn + 1)) |
| 717 | split_page_count(level); |
| 718 | } |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 719 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 720 | /* |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 721 | * Install the new, split up pagetable. |
Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 722 | * |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 723 | * We use the standard kernel pagetable protections for the new |
| 724 | * pagetable protections, the actual ptes set above control the |
| 725 | * primary protection behavior: |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 726 | */ |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 727 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
Ingo Molnar | 211b3d0 | 2009-03-10 22:31:03 +0100 | [diff] [blame] | 728 | |
| 729 | /* |
| 730 | * Intel Atom errata AAH41 workaround. |
| 731 | * |
| 732 | * The real fix should be in hw or in a microcode update, but |
| 733 | * we also probabilistically try to reduce the window of having |
| 734 | * a large TLB mixed with 4K TLBs while instruction fetches are |
| 735 | * going on. |
| 736 | */ |
| 737 | __flush_tlb_all(); |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 738 | spin_unlock(&pgd_lock); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 739 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 740 | return 0; |
| 741 | } |
| 742 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 743 | static int split_large_page(struct cpa_data *cpa, pte_t *kpte, |
| 744 | unsigned long address) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 745 | { |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 746 | struct page *base; |
| 747 | |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 748 | if (!debug_pagealloc_enabled()) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 749 | spin_unlock(&cpa_lock); |
| 750 | base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 751 | if (!debug_pagealloc_enabled()) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 752 | spin_lock(&cpa_lock); |
| 753 | if (!base) |
| 754 | return -ENOMEM; |
| 755 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 756 | if (__split_large_page(cpa, kpte, address, base)) |
Wen Congyang | ae9aae9 | 2013-02-22 16:33:04 -0800 | [diff] [blame] | 757 | __free_page(base); |
| 758 | |
| 759 | return 0; |
| 760 | } |
| 761 | |
Borislav Petkov | 52a628f | 2013-10-31 17:25:06 +0100 | [diff] [blame] | 762 | static bool try_to_free_pte_page(pte_t *pte) |
| 763 | { |
| 764 | int i; |
| 765 | |
| 766 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 767 | if (!pte_none(pte[i])) |
| 768 | return false; |
| 769 | |
| 770 | free_page((unsigned long)pte); |
| 771 | return true; |
| 772 | } |
| 773 | |
| 774 | static bool try_to_free_pmd_page(pmd_t *pmd) |
| 775 | { |
| 776 | int i; |
| 777 | |
| 778 | for (i = 0; i < PTRS_PER_PMD; i++) |
| 779 | if (!pmd_none(pmd[i])) |
| 780 | return false; |
| 781 | |
| 782 | free_page((unsigned long)pmd); |
| 783 | return true; |
| 784 | } |
| 785 | |
| 786 | static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) |
| 787 | { |
| 788 | pte_t *pte = pte_offset_kernel(pmd, start); |
| 789 | |
| 790 | while (start < end) { |
| 791 | set_pte(pte, __pte(0)); |
| 792 | |
| 793 | start += PAGE_SIZE; |
| 794 | pte++; |
| 795 | } |
| 796 | |
| 797 | if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) { |
| 798 | pmd_clear(pmd); |
| 799 | return true; |
| 800 | } |
| 801 | return false; |
| 802 | } |
| 803 | |
| 804 | static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd, |
| 805 | unsigned long start, unsigned long end) |
| 806 | { |
| 807 | if (unmap_pte_range(pmd, start, end)) |
| 808 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) |
| 809 | pud_clear(pud); |
| 810 | } |
| 811 | |
| 812 | static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end) |
| 813 | { |
| 814 | pmd_t *pmd = pmd_offset(pud, start); |
| 815 | |
| 816 | /* |
| 817 | * Not on a 2MB page boundary? |
| 818 | */ |
| 819 | if (start & (PMD_SIZE - 1)) { |
| 820 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; |
| 821 | unsigned long pre_end = min_t(unsigned long, end, next_page); |
| 822 | |
| 823 | __unmap_pmd_range(pud, pmd, start, pre_end); |
| 824 | |
| 825 | start = pre_end; |
| 826 | pmd++; |
| 827 | } |
| 828 | |
| 829 | /* |
| 830 | * Try to unmap in 2M chunks. |
| 831 | */ |
| 832 | while (end - start >= PMD_SIZE) { |
| 833 | if (pmd_large(*pmd)) |
| 834 | pmd_clear(pmd); |
| 835 | else |
| 836 | __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE); |
| 837 | |
| 838 | start += PMD_SIZE; |
| 839 | pmd++; |
| 840 | } |
| 841 | |
| 842 | /* |
| 843 | * 4K leftovers? |
| 844 | */ |
| 845 | if (start < end) |
| 846 | return __unmap_pmd_range(pud, pmd, start, end); |
| 847 | |
| 848 | /* |
| 849 | * Try again to free the PMD page if haven't succeeded above. |
| 850 | */ |
| 851 | if (!pud_none(*pud)) |
| 852 | if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud))) |
| 853 | pud_clear(pud); |
| 854 | } |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 855 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 856 | static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end) |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 857 | { |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 858 | pud_t *pud = pud_offset(p4d, start); |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 859 | |
| 860 | /* |
| 861 | * Not on a GB page boundary? |
| 862 | */ |
| 863 | if (start & (PUD_SIZE - 1)) { |
| 864 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; |
| 865 | unsigned long pre_end = min_t(unsigned long, end, next_page); |
| 866 | |
| 867 | unmap_pmd_range(pud, start, pre_end); |
| 868 | |
| 869 | start = pre_end; |
| 870 | pud++; |
| 871 | } |
| 872 | |
| 873 | /* |
| 874 | * Try to unmap in 1G chunks? |
| 875 | */ |
| 876 | while (end - start >= PUD_SIZE) { |
| 877 | |
| 878 | if (pud_large(*pud)) |
| 879 | pud_clear(pud); |
| 880 | else |
| 881 | unmap_pmd_range(pud, start, start + PUD_SIZE); |
| 882 | |
| 883 | start += PUD_SIZE; |
| 884 | pud++; |
| 885 | } |
| 886 | |
| 887 | /* |
| 888 | * 2M leftovers? |
| 889 | */ |
| 890 | if (start < end) |
| 891 | unmap_pmd_range(pud, start, end); |
| 892 | |
| 893 | /* |
| 894 | * No need to try to free the PUD page because we'll free it in |
| 895 | * populate_pgd's error path |
| 896 | */ |
| 897 | } |
| 898 | |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 899 | static int alloc_pte_page(pmd_t *pmd) |
| 900 | { |
| 901 | pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); |
| 902 | if (!pte) |
| 903 | return -1; |
| 904 | |
| 905 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); |
| 906 | return 0; |
| 907 | } |
| 908 | |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 909 | static int alloc_pmd_page(pud_t *pud) |
| 910 | { |
| 911 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); |
| 912 | if (!pmd) |
| 913 | return -1; |
| 914 | |
| 915 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); |
| 916 | return 0; |
| 917 | } |
| 918 | |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 919 | static void populate_pte(struct cpa_data *cpa, |
| 920 | unsigned long start, unsigned long end, |
| 921 | unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) |
| 922 | { |
| 923 | pte_t *pte; |
| 924 | |
| 925 | pte = pte_offset_kernel(pmd, start); |
| 926 | |
Sai Praneeth | 397630150 | 2016-02-17 12:35:56 +0000 | [diff] [blame] | 927 | /* |
| 928 | * Set the GLOBAL flags only if the PRESENT flag is |
| 929 | * set otherwise pte_present will return true even on |
| 930 | * a non present pte. The canon_pgprot will clear |
| 931 | * _PAGE_GLOBAL for the ancient hardware that doesn't |
| 932 | * support it. |
| 933 | */ |
| 934 | if (pgprot_val(pgprot) & _PAGE_PRESENT) |
| 935 | pgprot_val(pgprot) |= _PAGE_GLOBAL; |
| 936 | else |
| 937 | pgprot_val(pgprot) &= ~_PAGE_GLOBAL; |
| 938 | |
| 939 | pgprot = canon_pgprot(pgprot); |
| 940 | |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 941 | while (num_pages-- && start < end) { |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 942 | set_pte(pte, pfn_pte(cpa->pfn, pgprot)); |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 943 | |
| 944 | start += PAGE_SIZE; |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 945 | cpa->pfn++; |
Borislav Petkov | c6b6f36 | 2013-10-31 17:25:04 +0100 | [diff] [blame] | 946 | pte++; |
| 947 | } |
| 948 | } |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 949 | |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 950 | static long populate_pmd(struct cpa_data *cpa, |
| 951 | unsigned long start, unsigned long end, |
| 952 | unsigned num_pages, pud_t *pud, pgprot_t pgprot) |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 953 | { |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 954 | long cur_pages = 0; |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 955 | pmd_t *pmd; |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 956 | pgprot_t pmd_pgprot; |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 957 | |
| 958 | /* |
| 959 | * Not on a 2M boundary? |
| 960 | */ |
| 961 | if (start & (PMD_SIZE - 1)) { |
| 962 | unsigned long pre_end = start + (num_pages << PAGE_SHIFT); |
| 963 | unsigned long next_page = (start + PMD_SIZE) & PMD_MASK; |
| 964 | |
| 965 | pre_end = min_t(unsigned long, pre_end, next_page); |
| 966 | cur_pages = (pre_end - start) >> PAGE_SHIFT; |
| 967 | cur_pages = min_t(unsigned int, num_pages, cur_pages); |
| 968 | |
| 969 | /* |
| 970 | * Need a PTE page? |
| 971 | */ |
| 972 | pmd = pmd_offset(pud, start); |
| 973 | if (pmd_none(*pmd)) |
| 974 | if (alloc_pte_page(pmd)) |
| 975 | return -1; |
| 976 | |
| 977 | populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot); |
| 978 | |
| 979 | start = pre_end; |
| 980 | } |
| 981 | |
| 982 | /* |
| 983 | * We mapped them all? |
| 984 | */ |
| 985 | if (num_pages == cur_pages) |
| 986 | return cur_pages; |
| 987 | |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 988 | pmd_pgprot = pgprot_4k_2_large(pgprot); |
| 989 | |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 990 | while (end - start >= PMD_SIZE) { |
| 991 | |
| 992 | /* |
| 993 | * We cannot use a 1G page so allocate a PMD page if needed. |
| 994 | */ |
| 995 | if (pud_none(*pud)) |
| 996 | if (alloc_pmd_page(pud)) |
| 997 | return -1; |
| 998 | |
| 999 | pmd = pmd_offset(pud, start); |
| 1000 | |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1001 | set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1002 | massage_pgprot(pmd_pgprot))); |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1003 | |
| 1004 | start += PMD_SIZE; |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1005 | cpa->pfn += PMD_SIZE >> PAGE_SHIFT; |
Borislav Petkov | f900a4b | 2013-10-31 17:25:03 +0100 | [diff] [blame] | 1006 | cur_pages += PMD_SIZE >> PAGE_SHIFT; |
| 1007 | } |
| 1008 | |
| 1009 | /* |
| 1010 | * Map trailing 4K pages. |
| 1011 | */ |
| 1012 | if (start < end) { |
| 1013 | pmd = pmd_offset(pud, start); |
| 1014 | if (pmd_none(*pmd)) |
| 1015 | if (alloc_pte_page(pmd)) |
| 1016 | return -1; |
| 1017 | |
| 1018 | populate_pte(cpa, start, end, num_pages - cur_pages, |
| 1019 | pmd, pgprot); |
| 1020 | } |
| 1021 | return num_pages; |
| 1022 | } |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1023 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1024 | static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d, |
| 1025 | pgprot_t pgprot) |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1026 | { |
| 1027 | pud_t *pud; |
| 1028 | unsigned long end; |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1029 | long cur_pages = 0; |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1030 | pgprot_t pud_pgprot; |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1031 | |
| 1032 | end = start + (cpa->numpages << PAGE_SHIFT); |
| 1033 | |
| 1034 | /* |
| 1035 | * Not on a Gb page boundary? => map everything up to it with |
| 1036 | * smaller pages. |
| 1037 | */ |
| 1038 | if (start & (PUD_SIZE - 1)) { |
| 1039 | unsigned long pre_end; |
| 1040 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; |
| 1041 | |
| 1042 | pre_end = min_t(unsigned long, end, next_page); |
| 1043 | cur_pages = (pre_end - start) >> PAGE_SHIFT; |
| 1044 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); |
| 1045 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1046 | pud = pud_offset(p4d, start); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1047 | |
| 1048 | /* |
| 1049 | * Need a PMD page? |
| 1050 | */ |
| 1051 | if (pud_none(*pud)) |
| 1052 | if (alloc_pmd_page(pud)) |
| 1053 | return -1; |
| 1054 | |
| 1055 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, |
| 1056 | pud, pgprot); |
| 1057 | if (cur_pages < 0) |
| 1058 | return cur_pages; |
| 1059 | |
| 1060 | start = pre_end; |
| 1061 | } |
| 1062 | |
| 1063 | /* We mapped them all? */ |
| 1064 | if (cpa->numpages == cur_pages) |
| 1065 | return cur_pages; |
| 1066 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1067 | pud = pud_offset(p4d, start); |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1068 | pud_pgprot = pgprot_4k_2_large(pgprot); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1069 | |
| 1070 | /* |
| 1071 | * Map everything starting from the Gb boundary, possibly with 1G pages |
| 1072 | */ |
Borislav Petkov | b8291adc | 2016-03-29 17:41:58 +0200 | [diff] [blame] | 1073 | while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1074 | set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | |
Juergen Gross | f5b2831 | 2014-11-03 14:02:02 +0100 | [diff] [blame] | 1075 | massage_pgprot(pud_pgprot))); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1076 | |
| 1077 | start += PUD_SIZE; |
Matt Fleming | edc3b91 | 2015-11-27 21:09:31 +0000 | [diff] [blame] | 1078 | cpa->pfn += PUD_SIZE >> PAGE_SHIFT; |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1079 | cur_pages += PUD_SIZE >> PAGE_SHIFT; |
| 1080 | pud++; |
| 1081 | } |
| 1082 | |
| 1083 | /* Map trailing leftover */ |
| 1084 | if (start < end) { |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1085 | long tmp; |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1086 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1087 | pud = pud_offset(p4d, start); |
Borislav Petkov | 4b23538 | 2013-10-31 17:25:02 +0100 | [diff] [blame] | 1088 | if (pud_none(*pud)) |
| 1089 | if (alloc_pmd_page(pud)) |
| 1090 | return -1; |
| 1091 | |
| 1092 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, |
| 1093 | pud, pgprot); |
| 1094 | if (tmp < 0) |
| 1095 | return cur_pages; |
| 1096 | |
| 1097 | cur_pages += tmp; |
| 1098 | } |
| 1099 | return cur_pages; |
| 1100 | } |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1101 | |
| 1102 | /* |
| 1103 | * Restrictions for kernel page table do not necessarily apply when mapping in |
| 1104 | * an alternate PGD. |
| 1105 | */ |
| 1106 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) |
| 1107 | { |
| 1108 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1109 | pud_t *pud = NULL; /* shut up gcc */ |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1110 | p4d_t *p4d; |
Borislav Petkov | 42a5477 | 2014-01-18 12:48:16 +0100 | [diff] [blame] | 1111 | pgd_t *pgd_entry; |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1112 | long ret; |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1113 | |
| 1114 | pgd_entry = cpa->pgd + pgd_index(addr); |
| 1115 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1116 | if (pgd_none(*pgd_entry)) { |
| 1117 | p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); |
| 1118 | if (!p4d) |
| 1119 | return -1; |
| 1120 | |
| 1121 | set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE)); |
| 1122 | } |
| 1123 | |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1124 | /* |
| 1125 | * Allocate a PUD page and hand it down for mapping. |
| 1126 | */ |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1127 | p4d = p4d_offset(pgd_entry, addr); |
| 1128 | if (p4d_none(*p4d)) { |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1129 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); |
| 1130 | if (!pud) |
| 1131 | return -1; |
Andy Lutomirski | 530dd8d | 2016-07-22 21:58:08 -0700 | [diff] [blame] | 1132 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1133 | set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1134 | } |
| 1135 | |
| 1136 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); |
| 1137 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); |
| 1138 | |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1139 | ret = populate_pud(cpa, addr, p4d, pgprot); |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1140 | if (ret < 0) { |
Andy Lutomirski | 55920d3 | 2016-07-23 09:59:28 -0700 | [diff] [blame] | 1141 | /* |
| 1142 | * Leave the PUD page in place in case some other CPU or thread |
| 1143 | * already found it, but remove any useless entries we just |
| 1144 | * added to it. |
| 1145 | */ |
Kirill A. Shutemov | 4547833 | 2017-03-17 21:55:12 +0300 | [diff] [blame] | 1146 | unmap_pud_range(p4d, addr, |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1147 | addr + (cpa->numpages << PAGE_SHIFT)); |
Borislav Petkov | 0bb8aee | 2013-10-31 17:25:05 +0100 | [diff] [blame] | 1148 | return ret; |
| 1149 | } |
Borislav Petkov | 42a5477 | 2014-01-18 12:48:16 +0100 | [diff] [blame] | 1150 | |
Borislav Petkov | f3f7296 | 2013-10-31 17:25:01 +0100 | [diff] [blame] | 1151 | cpa->numpages = ret; |
| 1152 | return 0; |
| 1153 | } |
| 1154 | |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1155 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
| 1156 | int primary) |
| 1157 | { |
Matt Fleming | 7fc8442 | 2016-04-25 21:06:35 +0100 | [diff] [blame] | 1158 | if (cpa->pgd) { |
| 1159 | /* |
| 1160 | * Right now, we only execute this code path when mapping |
| 1161 | * the EFI virtual memory map regions, no other users |
| 1162 | * provide a ->pgd value. This may change in the future. |
| 1163 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1164 | return populate_pgd(cpa, vaddr); |
Matt Fleming | 7fc8442 | 2016-04-25 21:06:35 +0100 | [diff] [blame] | 1165 | } |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1166 | |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1167 | /* |
| 1168 | * Ignore all non primary paths. |
| 1169 | */ |
Jan Beulich | 405e1133 | 2016-02-10 02:03:00 -0700 | [diff] [blame] | 1170 | if (!primary) { |
| 1171 | cpa->numpages = 1; |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1172 | return 0; |
Jan Beulich | 405e1133 | 2016-02-10 02:03:00 -0700 | [diff] [blame] | 1173 | } |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1174 | |
| 1175 | /* |
| 1176 | * Ignore the NULL PTE for kernel identity mapping, as it is expected |
| 1177 | * to have holes. |
| 1178 | * Also set numpages to '1' indicating that we processed cpa req for |
| 1179 | * one virtual address page and its pfn. TBD: numpages can be set based |
| 1180 | * on the initial value and the level returned by lookup_address(). |
| 1181 | */ |
| 1182 | if (within(vaddr, PAGE_OFFSET, |
| 1183 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { |
| 1184 | cpa->numpages = 1; |
| 1185 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; |
| 1186 | return 0; |
| 1187 | } else { |
| 1188 | WARN(1, KERN_WARNING "CPA: called for zero pte. " |
| 1189 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, |
| 1190 | *cpa->vaddr); |
| 1191 | |
| 1192 | return -EFAULT; |
| 1193 | } |
| 1194 | } |
| 1195 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1196 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1197 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1198 | unsigned long address; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 1199 | int do_split, err; |
| 1200 | unsigned int level; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1201 | pte_t *kpte, old_pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1203 | if (cpa->flags & CPA_PAGES_ARRAY) { |
| 1204 | struct page *page = cpa->pages[cpa->curpage]; |
| 1205 | if (unlikely(PageHighMem(page))) |
| 1206 | return 0; |
| 1207 | address = (unsigned long)page_address(page); |
| 1208 | } else if (cpa->flags & CPA_ARRAY) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1209 | address = cpa->vaddr[cpa->curpage]; |
| 1210 | else |
| 1211 | address = *cpa->vaddr; |
Ingo Molnar | 97f99fe | 2008-01-30 13:33:55 +0100 | [diff] [blame] | 1212 | repeat: |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1213 | kpte = _lookup_address_cpa(cpa, address, &level); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | if (!kpte) |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1215 | return __cpa_process_fault(cpa, address, primary); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1216 | |
| 1217 | old_pte = *kpte; |
Dave Hansen | dcb32d9 | 2016-07-07 17:19:15 -0700 | [diff] [blame] | 1218 | if (pte_none(old_pte)) |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1219 | return __cpa_process_fault(cpa, address, primary); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1220 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 1221 | if (level == PG_LEVEL_4K) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1222 | pte_t new_pte; |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 1223 | pgprot_t new_prot = pte_pgprot(old_pte); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1224 | unsigned long pfn = pte_pfn(old_pte); |
Thomas Gleixner | a72a08a | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1225 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1226 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
| 1227 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1228 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1229 | new_prot = static_protections(new_prot, address, pfn); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1230 | |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 1231 | /* |
Andrea Arcangeli | a8aed3e | 2013-02-22 15:11:51 -0800 | [diff] [blame] | 1232 | * Set the GLOBAL flags only if the PRESENT flag is |
| 1233 | * set otherwise pte_present will return true even on |
| 1234 | * a non present pte. The canon_pgprot will clear |
| 1235 | * _PAGE_GLOBAL for the ancient hardware that doesn't |
| 1236 | * support it. |
| 1237 | */ |
| 1238 | if (pgprot_val(new_prot) & _PAGE_PRESENT) |
| 1239 | pgprot_val(new_prot) |= _PAGE_GLOBAL; |
| 1240 | else |
| 1241 | pgprot_val(new_prot) &= ~_PAGE_GLOBAL; |
| 1242 | |
| 1243 | /* |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 1244 | * We need to keep the pfn from the existing PTE, |
| 1245 | * after all we're only going to change it's attributes |
| 1246 | * not the memory it points to |
| 1247 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1248 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
| 1249 | cpa->pfn = pfn; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1250 | /* |
| 1251 | * Do we really change anything ? |
| 1252 | */ |
| 1253 | if (pte_val(old_pte) != pte_val(new_pte)) { |
| 1254 | set_pte_atomic(kpte, new_pte); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1255 | cpa->flags |= CPA_FLUSHTLB; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1256 | } |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1257 | cpa->numpages = 1; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1258 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | } |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1260 | |
| 1261 | /* |
| 1262 | * Check, whether we can keep the large page intact |
| 1263 | * and just change the pte: |
| 1264 | */ |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 1265 | do_split = try_preserve_large_page(kpte, address, cpa); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1266 | /* |
| 1267 | * When the range fits into the existing large page, |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1268 | * return. cp->numpages and cpa->tlbflush have been updated in |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1269 | * try_large_page: |
| 1270 | */ |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1271 | if (do_split <= 0) |
| 1272 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1273 | |
| 1274 | /* |
| 1275 | * We have to split the large page: |
| 1276 | */ |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1277 | err = split_large_page(cpa, kpte, address); |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1278 | if (!err) { |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 1279 | /* |
| 1280 | * Do a global flush tlb after splitting the large page |
| 1281 | * and before we do the actual change page attribute in the PTE. |
| 1282 | * |
| 1283 | * With out this, we violate the TLB application note, that says |
| 1284 | * "The TLBs may contain both ordinary and large-page |
| 1285 | * translations for a 4-KByte range of linear addresses. This |
| 1286 | * may occur if software modifies the paging structures so that |
| 1287 | * the page size used for the address range changes. If the two |
| 1288 | * translations differ with respect to page frame or attributes |
| 1289 | * (e.g., permissions), processor behavior is undefined and may |
| 1290 | * be implementation-specific." |
| 1291 | * |
| 1292 | * We do this global tlb flush inside the cpa_lock, so that we |
| 1293 | * don't allow any other cpu, with stale tlb entries change the |
| 1294 | * page attribute in parallel, that also falls into the |
| 1295 | * just split large page entry. |
| 1296 | */ |
| 1297 | flush_tlb_all(); |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1298 | goto repeat; |
| 1299 | } |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 1300 | |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 1301 | return err; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1302 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1304 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
| 1305 | |
| 1306 | static int cpa_process_alias(struct cpa_data *cpa) |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 1307 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1308 | struct cpa_data alias_cpa; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1309 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
Tejun Heo | e933a73 | 2009-08-14 15:00:53 +0900 | [diff] [blame] | 1310 | unsigned long vaddr; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1311 | int ret; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1312 | |
Yinghai Lu | 8eb5779 | 2012-11-16 19:38:49 -0800 | [diff] [blame] | 1313 | if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1314 | return 0; |
| 1315 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1316 | /* |
| 1317 | * No need to redo, when the primary call touched the direct |
| 1318 | * mapping already: |
| 1319 | */ |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1320 | if (cpa->flags & CPA_PAGES_ARRAY) { |
| 1321 | struct page *page = cpa->pages[cpa->curpage]; |
| 1322 | if (unlikely(PageHighMem(page))) |
| 1323 | return 0; |
| 1324 | vaddr = (unsigned long)page_address(page); |
| 1325 | } else if (cpa->flags & CPA_ARRAY) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1326 | vaddr = cpa->vaddr[cpa->curpage]; |
| 1327 | else |
| 1328 | vaddr = *cpa->vaddr; |
| 1329 | |
| 1330 | if (!(within(vaddr, PAGE_OFFSET, |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 1331 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1332 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1333 | alias_cpa = *cpa; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1334 | alias_cpa.vaddr = &laddr; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1335 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1336 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1337 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1338 | if (ret) |
| 1339 | return ret; |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 1340 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 1341 | |
Arjan van de Ven | 488fd99 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1342 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1343 | /* |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1344 | * If the primary call didn't touch the high mapping already |
| 1345 | * and the physical address is inside the kernel map, we need |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1346 | * to touch the high mapped kernel as well: |
| 1347 | */ |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1348 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
Thomas Garnier | 4ff5308 | 2016-06-15 12:05:45 -0700 | [diff] [blame] | 1349 | within_inclusive(cpa->pfn, highmap_start_pfn(), |
| 1350 | highmap_end_pfn())) { |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1351 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + |
| 1352 | __START_KERNEL_map - phys_base; |
| 1353 | alias_cpa = *cpa; |
| 1354 | alias_cpa.vaddr = &temp_cpa_vaddr; |
| 1355 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1356 | |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1357 | /* |
| 1358 | * The high mapping range is imprecise, so ignore the |
| 1359 | * return value. |
| 1360 | */ |
| 1361 | __change_page_attr_set_clr(&alias_cpa, 0); |
| 1362 | } |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 1363 | #endif |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 1364 | |
| 1365 | return 0; |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 1366 | } |
| 1367 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1368 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1369 | { |
Matt Fleming | e535ec0 | 2016-09-20 14:26:21 +0100 | [diff] [blame] | 1370 | unsigned long numpages = cpa->numpages; |
| 1371 | int ret; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1372 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1373 | while (numpages) { |
| 1374 | /* |
| 1375 | * Store the remaining nr of pages for the large page |
| 1376 | * preservation check. |
| 1377 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1378 | cpa->numpages = numpages; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1379 | /* for array changes, we can't use large page */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1380 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1381 | cpa->numpages = 1; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1382 | |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 1383 | if (!debug_pagealloc_enabled()) |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 1384 | spin_lock(&cpa_lock); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1385 | ret = __change_page_attr(cpa, checkalias); |
Christian Borntraeger | 288cf3c | 2016-03-15 14:57:33 -0700 | [diff] [blame] | 1386 | if (!debug_pagealloc_enabled()) |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 1387 | spin_unlock(&cpa_lock); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1388 | if (ret) |
| 1389 | return ret; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1390 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 1391 | if (checkalias) { |
| 1392 | ret = cpa_process_alias(cpa); |
| 1393 | if (ret) |
| 1394 | return ret; |
| 1395 | } |
| 1396 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1397 | /* |
| 1398 | * Adjust the number of pages with the result of the |
| 1399 | * CPA operation. Either a large page has been |
| 1400 | * preserved or a single page update happened. |
| 1401 | */ |
Matt Fleming | 7425637 | 2016-01-29 11:36:10 +0000 | [diff] [blame] | 1402 | BUG_ON(cpa->numpages > numpages || !cpa->numpages); |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 1403 | numpages -= cpa->numpages; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1404 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1405 | cpa->curpage++; |
| 1406 | else |
| 1407 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; |
| 1408 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1409 | } |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1410 | return 0; |
| 1411 | } |
| 1412 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1413 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1414 | pgprot_t mask_set, pgprot_t mask_clr, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1415 | int force_split, int in_flag, |
| 1416 | struct page **pages) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1417 | { |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1418 | struct cpa_data cpa; |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 1419 | int ret, cache, checkalias; |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 1420 | unsigned long baddr = 0; |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1421 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1422 | memset(&cpa, 0, sizeof(cpa)); |
| 1423 | |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1424 | /* |
| 1425 | * Check, if we are requested to change a not supported |
| 1426 | * feature: |
| 1427 | */ |
| 1428 | mask_set = canon_pgprot(mask_set); |
| 1429 | mask_clr = canon_pgprot(mask_clr); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1430 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1431 | return 0; |
| 1432 | |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 1433 | /* Ensure we are PAGE_SIZE aligned */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1434 | if (in_flag & CPA_ARRAY) { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1435 | int i; |
| 1436 | for (i = 0; i < numpages; i++) { |
| 1437 | if (addr[i] & ~PAGE_MASK) { |
| 1438 | addr[i] &= PAGE_MASK; |
| 1439 | WARN_ON_ONCE(1); |
| 1440 | } |
| 1441 | } |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1442 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { |
| 1443 | /* |
| 1444 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. |
| 1445 | * No need to cehck in that case |
| 1446 | */ |
| 1447 | if (*addr & ~PAGE_MASK) { |
| 1448 | *addr &= PAGE_MASK; |
| 1449 | /* |
| 1450 | * People should not be passing in unaligned addresses: |
| 1451 | */ |
| 1452 | WARN_ON_ONCE(1); |
| 1453 | } |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 1454 | /* |
| 1455 | * Save address for cache flush. *addr is modified in the call |
| 1456 | * to __change_page_attr_set_clr() below. |
| 1457 | */ |
| 1458 | baddr = *addr; |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 1459 | } |
| 1460 | |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 1461 | /* Must avoid aliasing mappings in the highmem code */ |
| 1462 | kmap_flush_unused(); |
| 1463 | |
Nick Piggin | db64fe0 | 2008-10-18 20:27:03 -0700 | [diff] [blame] | 1464 | vm_unmap_aliases(); |
| 1465 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1466 | cpa.vaddr = addr; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1467 | cpa.pages = pages; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1468 | cpa.numpages = numpages; |
| 1469 | cpa.mask_set = mask_set; |
| 1470 | cpa.mask_clr = mask_clr; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1471 | cpa.flags = 0; |
| 1472 | cpa.curpage = 0; |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1473 | cpa.force_split = force_split; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1474 | |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1475 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
| 1476 | cpa.flags |= in_flag; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1477 | |
Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 1478 | /* No alias checking for _NX bit modifications */ |
| 1479 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; |
| 1480 | |
| 1481 | ret = __change_page_attr_set_clr(&cpa, checkalias); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1482 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1483 | /* |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1484 | * Check whether we really changed something: |
| 1485 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1486 | if (!(cpa.flags & CPA_FLUSHTLB)) |
Shaohua Li | 1ac2f7d | 2008-08-04 14:51:24 +0800 | [diff] [blame] | 1487 | goto out; |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 1488 | |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1489 | /* |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1490 | * No need to flush, when we did not set any of the caching |
| 1491 | * attributes: |
| 1492 | */ |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1493 | cache = !!pgprot2cachemode(mask_set); |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1494 | |
| 1495 | /* |
Borislav Petkov | b82ad3d | 2014-03-12 15:13:04 +0100 | [diff] [blame] | 1496 | * On success we use CLFLUSH, when the CPU supports it to |
| 1497 | * avoid the WBINVD. If the CPU does not support it and in the |
H. Peter Anvin | f026cfa | 2012-08-14 09:53:38 -0700 | [diff] [blame] | 1498 | * error case we fall back to cpa_flush_all (which uses |
Borislav Petkov | b82ad3d | 2014-03-12 15:13:04 +0100 | [diff] [blame] | 1499 | * WBINVD): |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1500 | */ |
Borislav Petkov | 906bf7f | 2016-03-29 17:41:59 +0200 | [diff] [blame] | 1501 | if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) { |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1502 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { |
| 1503 | cpa_flush_array(addr, numpages, cache, |
| 1504 | cpa.flags, pages); |
| 1505 | } else |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 1506 | cpa_flush_range(baddr, numpages, cache); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1507 | } else |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 1508 | cpa_flush_all(cache); |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 1509 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 1510 | out: |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1511 | return ret; |
| 1512 | } |
| 1513 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1514 | static inline int change_page_attr_set(unsigned long *addr, int numpages, |
| 1515 | pgprot_t mask, int array) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1516 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1517 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1518 | (array ? CPA_ARRAY : 0), NULL); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1519 | } |
| 1520 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1521 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, |
| 1522 | pgprot_t mask, int array) |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1523 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1524 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1525 | (array ? CPA_ARRAY : 0), NULL); |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1526 | } |
| 1527 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1528 | static inline int cpa_set_pages_array(struct page **pages, int numpages, |
| 1529 | pgprot_t mask) |
| 1530 | { |
| 1531 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, |
| 1532 | CPA_PAGES_ARRAY, pages); |
| 1533 | } |
| 1534 | |
| 1535 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, |
| 1536 | pgprot_t mask) |
| 1537 | { |
| 1538 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, |
| 1539 | CPA_PAGES_ARRAY, pages); |
| 1540 | } |
| 1541 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1542 | int _set_memory_uc(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1543 | { |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 1544 | /* |
| 1545 | * for now UC MINUS. see comments in ioremap_nocache() |
Luis R. Rodriguez | e4b6be33 | 2015-05-11 10:15:53 +0200 | [diff] [blame] | 1546 | * If you really need strong UC use ioremap_uc(), but note |
| 1547 | * that you cannot override IO areas with set_memory_*() as |
| 1548 | * these helpers cannot work with IO memory. |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 1549 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1550 | return change_page_attr_set(&addr, numpages, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1551 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
| 1552 | 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1553 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1554 | |
| 1555 | int set_memory_uc(unsigned long addr, int numpages) |
| 1556 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1557 | int ret; |
| 1558 | |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 1559 | /* |
| 1560 | * for now UC MINUS. see comments in ioremap_nocache() |
| 1561 | */ |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1562 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 1563 | _PAGE_CACHE_MODE_UC_MINUS, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1564 | if (ret) |
| 1565 | goto out_err; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1566 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1567 | ret = _set_memory_uc(addr, numpages); |
| 1568 | if (ret) |
| 1569 | goto out_free; |
| 1570 | |
| 1571 | return 0; |
| 1572 | |
| 1573 | out_free: |
| 1574 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 1575 | out_err: |
| 1576 | return ret; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1577 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1578 | EXPORT_SYMBOL(set_memory_uc); |
| 1579 | |
H Hartley Sweeten | 2d070ef | 2011-11-15 14:49:00 -0800 | [diff] [blame] | 1580 | static int _set_memory_array(unsigned long *addr, int addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1581 | enum page_cache_mode new_type) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1582 | { |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1583 | enum page_cache_mode set_type; |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1584 | int i, j; |
| 1585 | int ret; |
| 1586 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1587 | for (i = 0; i < addrinarray; i++) { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1588 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1589 | new_type, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1590 | if (ret) |
| 1591 | goto out_free; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1592 | } |
| 1593 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1594 | /* If WC, set to UC- first and then WC */ |
| 1595 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? |
| 1596 | _PAGE_CACHE_MODE_UC_MINUS : new_type; |
| 1597 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1598 | ret = change_page_attr_set(addr, addrinarray, |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1599 | cachemode2pgprot(set_type), 1); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1600 | |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1601 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1602 | ret = change_page_attr_set_clr(addr, addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1603 | cachemode2pgprot( |
| 1604 | _PAGE_CACHE_MODE_WC), |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1605 | __pgprot(_PAGE_CACHE_MASK), |
| 1606 | 0, CPA_ARRAY, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1607 | if (ret) |
| 1608 | goto out_free; |
Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 1609 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1610 | return 0; |
| 1611 | |
| 1612 | out_free: |
| 1613 | for (j = 0; j < i; j++) |
| 1614 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); |
| 1615 | |
| 1616 | return ret; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1617 | } |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1618 | |
| 1619 | int set_memory_array_uc(unsigned long *addr, int addrinarray) |
| 1620 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1621 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1622 | } |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1623 | EXPORT_SYMBOL(set_memory_array_uc); |
| 1624 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1625 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
| 1626 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1627 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1628 | } |
| 1629 | EXPORT_SYMBOL(set_memory_array_wc); |
| 1630 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1631 | int set_memory_array_wt(unsigned long *addr, int addrinarray) |
| 1632 | { |
| 1633 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT); |
| 1634 | } |
| 1635 | EXPORT_SYMBOL_GPL(set_memory_array_wt); |
| 1636 | |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1637 | int _set_memory_wc(unsigned long addr, int numpages) |
| 1638 | { |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1639 | int ret; |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1640 | unsigned long addr_copy = addr; |
| 1641 | |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1642 | ret = change_page_attr_set(&addr, numpages, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1643 | cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), |
| 1644 | 0); |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1645 | if (!ret) { |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1646 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1647 | cachemode2pgprot( |
| 1648 | _PAGE_CACHE_MODE_WC), |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1649 | __pgprot(_PAGE_CACHE_MASK), |
| 1650 | 0, 0, NULL); |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1651 | } |
| 1652 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1653 | } |
| 1654 | |
| 1655 | int set_memory_wc(unsigned long addr, int numpages) |
| 1656 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1657 | int ret; |
| 1658 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1659 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
Juergen Gross | e00c8cc | 2014-11-03 14:01:59 +0100 | [diff] [blame] | 1660 | _PAGE_CACHE_MODE_WC, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1661 | if (ret) |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1662 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1663 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1664 | ret = _set_memory_wc(addr, numpages); |
| 1665 | if (ret) |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1666 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1667 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1668 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1669 | } |
| 1670 | EXPORT_SYMBOL(set_memory_wc); |
| 1671 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1672 | int _set_memory_wt(unsigned long addr, int numpages) |
| 1673 | { |
| 1674 | return change_page_attr_set(&addr, numpages, |
| 1675 | cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0); |
| 1676 | } |
| 1677 | |
| 1678 | int set_memory_wt(unsigned long addr, int numpages) |
| 1679 | { |
| 1680 | int ret; |
| 1681 | |
| 1682 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
| 1683 | _PAGE_CACHE_MODE_WT, NULL); |
| 1684 | if (ret) |
| 1685 | return ret; |
| 1686 | |
| 1687 | ret = _set_memory_wt(addr, numpages); |
| 1688 | if (ret) |
| 1689 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 1690 | |
| 1691 | return ret; |
| 1692 | } |
| 1693 | EXPORT_SYMBOL_GPL(set_memory_wt); |
| 1694 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1695 | int _set_memory_wb(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1696 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1697 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1698 | return change_page_attr_clear(&addr, numpages, |
| 1699 | __pgprot(_PAGE_CACHE_MASK), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1700 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1701 | |
| 1702 | int set_memory_wb(unsigned long addr, int numpages) |
| 1703 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1704 | int ret; |
| 1705 | |
| 1706 | ret = _set_memory_wb(addr, numpages); |
| 1707 | if (ret) |
| 1708 | return ret; |
| 1709 | |
venkatesh.pallipadi@intel.com | c15238d | 2008-08-20 16:45:51 -0700 | [diff] [blame] | 1710 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1711 | return 0; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1712 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1713 | EXPORT_SYMBOL(set_memory_wb); |
| 1714 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1715 | int set_memory_array_wb(unsigned long *addr, int addrinarray) |
| 1716 | { |
| 1717 | int i; |
venkatesh.pallipadi@intel.com | a5593e0 | 2009-04-09 14:26:48 -0700 | [diff] [blame] | 1718 | int ret; |
| 1719 | |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1720 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
venkatesh.pallipadi@intel.com | a5593e0 | 2009-04-09 14:26:48 -0700 | [diff] [blame] | 1721 | ret = change_page_attr_clear(addr, addrinarray, |
| 1722 | __pgprot(_PAGE_CACHE_MASK), 1); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1723 | if (ret) |
| 1724 | return ret; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1725 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1726 | for (i = 0; i < addrinarray; i++) |
| 1727 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); |
Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 1728 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1729 | return 0; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1730 | } |
| 1731 | EXPORT_SYMBOL(set_memory_array_wb); |
| 1732 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1733 | int set_memory_x(unsigned long addr, int numpages) |
| 1734 | { |
H. Peter Anvin | 583140a | 2009-11-13 15:28:15 -0800 | [diff] [blame] | 1735 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 1736 | return 0; |
| 1737 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1738 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1739 | } |
| 1740 | EXPORT_SYMBOL(set_memory_x); |
| 1741 | |
| 1742 | int set_memory_nx(unsigned long addr, int numpages) |
| 1743 | { |
H. Peter Anvin | 583140a | 2009-11-13 15:28:15 -0800 | [diff] [blame] | 1744 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 1745 | return 0; |
| 1746 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1747 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1748 | } |
| 1749 | EXPORT_SYMBOL(set_memory_nx); |
| 1750 | |
| 1751 | int set_memory_ro(unsigned long addr, int numpages) |
| 1752 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1753 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1754 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1755 | |
| 1756 | int set_memory_rw(unsigned long addr, int numpages) |
| 1757 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1758 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1759 | } |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1760 | |
| 1761 | int set_memory_np(unsigned long addr, int numpages) |
| 1762 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1763 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1764 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1765 | |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1766 | int set_memory_4k(unsigned long addr, int numpages) |
| 1767 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1768 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1769 | __pgprot(0), 1, 0, NULL); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1770 | } |
| 1771 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1772 | int set_pages_uc(struct page *page, int numpages) |
| 1773 | { |
| 1774 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1775 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1776 | return set_memory_uc(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1777 | } |
| 1778 | EXPORT_SYMBOL(set_pages_uc); |
| 1779 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1780 | static int _set_pages_array(struct page **pages, int addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1781 | enum page_cache_mode new_type) |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1782 | { |
| 1783 | unsigned long start; |
| 1784 | unsigned long end; |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1785 | enum page_cache_mode set_type; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1786 | int i; |
| 1787 | int free_idx; |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1788 | int ret; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1789 | |
| 1790 | for (i = 0; i < addrinarray; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1791 | if (PageHighMem(pages[i])) |
| 1792 | continue; |
| 1793 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1794 | end = start + PAGE_SIZE; |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1795 | if (reserve_memtype(start, end, new_type, NULL)) |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1796 | goto err_out; |
| 1797 | } |
| 1798 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1799 | /* If WC, set to UC- first and then WC */ |
| 1800 | set_type = (new_type == _PAGE_CACHE_MODE_WC) ? |
| 1801 | _PAGE_CACHE_MODE_UC_MINUS : new_type; |
| 1802 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1803 | ret = cpa_set_pages_array(pages, addrinarray, |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1804 | cachemode2pgprot(set_type)); |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1805 | if (!ret && new_type == _PAGE_CACHE_MODE_WC) |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1806 | ret = change_page_attr_set_clr(NULL, addrinarray, |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1807 | cachemode2pgprot( |
| 1808 | _PAGE_CACHE_MODE_WC), |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1809 | __pgprot(_PAGE_CACHE_MASK), |
| 1810 | 0, CPA_PAGES_ARRAY, pages); |
| 1811 | if (ret) |
| 1812 | goto err_out; |
| 1813 | return 0; /* Success */ |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1814 | err_out: |
| 1815 | free_idx = i; |
| 1816 | for (i = 0; i < free_idx; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1817 | if (PageHighMem(pages[i])) |
| 1818 | continue; |
| 1819 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1820 | end = start + PAGE_SIZE; |
| 1821 | free_memtype(start, end); |
| 1822 | } |
| 1823 | return -EINVAL; |
| 1824 | } |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1825 | |
| 1826 | int set_pages_array_uc(struct page **pages, int addrinarray) |
| 1827 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1828 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1829 | } |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1830 | EXPORT_SYMBOL(set_pages_array_uc); |
| 1831 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1832 | int set_pages_array_wc(struct page **pages, int addrinarray) |
| 1833 | { |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1834 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1835 | } |
| 1836 | EXPORT_SYMBOL(set_pages_array_wc); |
| 1837 | |
Toshi Kani | 623dffb | 2015-06-04 18:55:20 +0200 | [diff] [blame] | 1838 | int set_pages_array_wt(struct page **pages, int addrinarray) |
| 1839 | { |
| 1840 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT); |
| 1841 | } |
| 1842 | EXPORT_SYMBOL_GPL(set_pages_array_wt); |
| 1843 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1844 | int set_pages_wb(struct page *page, int numpages) |
| 1845 | { |
| 1846 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1847 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1848 | return set_memory_wb(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1849 | } |
| 1850 | EXPORT_SYMBOL(set_pages_wb); |
| 1851 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1852 | int set_pages_array_wb(struct page **pages, int addrinarray) |
| 1853 | { |
| 1854 | int retval; |
| 1855 | unsigned long start; |
| 1856 | unsigned long end; |
| 1857 | int i; |
| 1858 | |
Juergen Gross | c06814d | 2014-11-03 14:01:57 +0100 | [diff] [blame] | 1859 | /* WB cache mode is hard wired to all cache attribute bits being 0 */ |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1860 | retval = cpa_clear_pages_array(pages, addrinarray, |
| 1861 | __pgprot(_PAGE_CACHE_MASK)); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1862 | if (retval) |
| 1863 | return retval; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1864 | |
| 1865 | for (i = 0; i < addrinarray; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1866 | if (PageHighMem(pages[i])) |
| 1867 | continue; |
| 1868 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1869 | end = start + PAGE_SIZE; |
| 1870 | free_memtype(start, end); |
| 1871 | } |
| 1872 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1873 | return 0; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1874 | } |
| 1875 | EXPORT_SYMBOL(set_pages_array_wb); |
| 1876 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1877 | int set_pages_x(struct page *page, int numpages) |
| 1878 | { |
| 1879 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1880 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1881 | return set_memory_x(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1882 | } |
| 1883 | EXPORT_SYMBOL(set_pages_x); |
| 1884 | |
| 1885 | int set_pages_nx(struct page *page, int numpages) |
| 1886 | { |
| 1887 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1888 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1889 | return set_memory_nx(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1890 | } |
| 1891 | EXPORT_SYMBOL(set_pages_nx); |
| 1892 | |
| 1893 | int set_pages_ro(struct page *page, int numpages) |
| 1894 | { |
| 1895 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1896 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1897 | return set_memory_ro(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1898 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1899 | |
| 1900 | int set_pages_rw(struct page *page, int numpages) |
| 1901 | { |
| 1902 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1903 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1904 | return set_memory_rw(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1905 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1906 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1907 | #ifdef CONFIG_DEBUG_PAGEALLOC |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1908 | |
| 1909 | static int __set_pages_p(struct page *page, int numpages) |
| 1910 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1911 | unsigned long tempaddr = (unsigned long) page_address(page); |
| 1912 | struct cpa_data cpa = { .vaddr = &tempaddr, |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1913 | .pgd = NULL, |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1914 | .numpages = numpages, |
| 1915 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1916 | .mask_clr = __pgprot(0), |
| 1917 | .flags = 0}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1918 | |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1919 | /* |
| 1920 | * No alias checking needed for setting present flag. otherwise, |
| 1921 | * we may need to break large pages for 64-bit kernel text |
| 1922 | * mappings (this adds to complexity if we want to do this from |
| 1923 | * atomic context especially). Let's keep it simple! |
| 1924 | */ |
| 1925 | return __change_page_attr_set_clr(&cpa, 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1926 | } |
| 1927 | |
| 1928 | static int __set_pages_np(struct page *page, int numpages) |
| 1929 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1930 | unsigned long tempaddr = (unsigned long) page_address(page); |
| 1931 | struct cpa_data cpa = { .vaddr = &tempaddr, |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1932 | .pgd = NULL, |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1933 | .numpages = numpages, |
| 1934 | .mask_set = __pgprot(0), |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1935 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
| 1936 | .flags = 0}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1937 | |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1938 | /* |
| 1939 | * No alias checking needed for setting not present flag. otherwise, |
| 1940 | * we may need to break large pages for 64-bit kernel text |
| 1941 | * mappings (this adds to complexity if we want to do this from |
| 1942 | * atomic context especially). Let's keep it simple! |
| 1943 | */ |
| 1944 | return __change_page_attr_set_clr(&cpa, 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1945 | } |
| 1946 | |
Joonsoo Kim | 031bc57 | 2014-12-12 16:55:52 -0800 | [diff] [blame] | 1947 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1948 | { |
| 1949 | if (PageHighMem(page)) |
| 1950 | return; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1951 | if (!enable) { |
Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 1952 | debug_check_no_locks_freed(page_address(page), |
| 1953 | numpages * PAGE_SIZE); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1954 | } |
Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 1955 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1956 | /* |
Ingo Molnar | f8d8406 | 2008-02-13 14:09:53 +0100 | [diff] [blame] | 1957 | * The return value is ignored as the calls cannot fail. |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1958 | * Large pages for identity mappings are not used at boot time |
| 1959 | * and hence no memory allocations during large page split. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | */ |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1961 | if (enable) |
| 1962 | __set_pages_p(page, numpages); |
| 1963 | else |
| 1964 | __set_pages_np(page, numpages); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1965 | |
| 1966 | /* |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 1967 | * We should perform an IPI and flush all tlbs, |
| 1968 | * but that can deadlock->flush only current cpu: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | */ |
| 1970 | __flush_tlb_all(); |
Boris Ostrovsky | 2656460 | 2013-04-11 13:59:52 -0400 | [diff] [blame] | 1971 | |
| 1972 | arch_flush_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1973 | } |
Rafael J. Wysocki | 8a235ef | 2008-02-20 01:47:44 +0100 | [diff] [blame] | 1974 | |
| 1975 | #ifdef CONFIG_HIBERNATION |
| 1976 | |
| 1977 | bool kernel_page_present(struct page *page) |
| 1978 | { |
| 1979 | unsigned int level; |
| 1980 | pte_t *pte; |
| 1981 | |
| 1982 | if (PageHighMem(page)) |
| 1983 | return false; |
| 1984 | |
| 1985 | pte = lookup_address((unsigned long)page_address(page), &level); |
| 1986 | return (pte_val(*pte) & _PAGE_PRESENT); |
| 1987 | } |
| 1988 | |
| 1989 | #endif /* CONFIG_HIBERNATION */ |
| 1990 | |
| 1991 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1992 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 1993 | int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, |
| 1994 | unsigned numpages, unsigned long page_flags) |
| 1995 | { |
| 1996 | int retval = -EINVAL; |
| 1997 | |
| 1998 | struct cpa_data cpa = { |
| 1999 | .vaddr = &address, |
| 2000 | .pfn = pfn, |
| 2001 | .pgd = pgd, |
| 2002 | .numpages = numpages, |
| 2003 | .mask_set = __pgprot(0), |
| 2004 | .mask_clr = __pgprot(0), |
| 2005 | .flags = 0, |
| 2006 | }; |
| 2007 | |
| 2008 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 2009 | goto out; |
| 2010 | |
| 2011 | if (!(page_flags & _PAGE_NX)) |
| 2012 | cpa.mask_clr = __pgprot(_PAGE_NX); |
| 2013 | |
Sai Praneeth | 15f003d | 2016-02-17 12:36:04 +0000 | [diff] [blame] | 2014 | if (!(page_flags & _PAGE_RW)) |
| 2015 | cpa.mask_clr = __pgprot(_PAGE_RW); |
| 2016 | |
Borislav Petkov | 82f0712 | 2013-10-31 17:25:07 +0100 | [diff] [blame] | 2017 | cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); |
| 2018 | |
| 2019 | retval = __change_page_attr_set_clr(&cpa, 0); |
| 2020 | __flush_tlb_all(); |
| 2021 | |
| 2022 | out: |
| 2023 | return retval; |
| 2024 | } |
| 2025 | |
Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 2026 | /* |
| 2027 | * The testcases use internal knowledge of the implementation that shouldn't |
| 2028 | * be exposed to the rest of the kernel. Include these directly here. |
| 2029 | */ |
| 2030 | #ifdef CONFIG_CPA_DEBUG |
| 2031 | #include "pageattr-test.c" |
| 2032 | #endif |