Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2002 Andi Kleen, SuSE Labs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * Thanks to Ben LaHaise for precious feedback. |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 4 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/highmem.h> |
Ingo Molnar | 8192206 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 6 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/module.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 8 | #include <linux/sched.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 9 | #include <linux/mm.h> |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Thomas Gleixner | ee7ae7a | 2008-04-17 17:40:45 +0200 | [diff] [blame] | 11 | #include <linux/seq_file.h> |
| 12 | #include <linux/debugfs.h> |
Tejun Heo | e59a1bb | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 13 | #include <linux/pfn.h> |
Tejun Heo | 8c4bfc6 | 2009-07-04 08:10:59 +0900 | [diff] [blame] | 14 | #include <linux/percpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/gfp.h> |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 16 | #include <linux/pci.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 17 | |
Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 18 | #include <asm/e820.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/processor.h> |
| 20 | #include <asm/tlbflush.h> |
Dave Jones | f8af095 | 2006-01-06 00:12:10 -0800 | [diff] [blame] | 21 | #include <asm/sections.h> |
Jeremy Fitzhardinge | 93dbda7 | 2009-02-26 17:35:44 -0800 | [diff] [blame] | 22 | #include <asm/setup.h> |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 23 | #include <asm/uaccess.h> |
| 24 | #include <asm/pgalloc.h> |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 25 | #include <asm/proto.h> |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 26 | #include <asm/pat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 28 | /* |
| 29 | * The current flushing context - we pass it instead of 5 arguments: |
| 30 | */ |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 31 | struct cpa_data { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 32 | unsigned long *vaddr; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 33 | pgprot_t mask_set; |
| 34 | pgprot_t mask_clr; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 35 | int numpages; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 36 | int flags; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 37 | unsigned long pfn; |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 38 | unsigned force_split : 1; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 39 | int curpage; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 40 | struct page **pages; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 41 | }; |
| 42 | |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 43 | /* |
| 44 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) |
| 45 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb |
| 46 | * entries change the page attribute in parallel to some other cpu |
| 47 | * splitting a large page entry along with changing the attribute. |
| 48 | */ |
| 49 | static DEFINE_SPINLOCK(cpa_lock); |
| 50 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 51 | #define CPA_FLUSHTLB 1 |
| 52 | #define CPA_ARRAY 2 |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 53 | #define CPA_PAGES_ARRAY 4 |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 54 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 55 | #ifdef CONFIG_PROC_FS |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 56 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
| 57 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 58 | void update_page_count(int level, unsigned long pages) |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 59 | { |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 60 | unsigned long flags; |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 61 | |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 62 | /* Protect against CPA */ |
| 63 | spin_lock_irqsave(&pgd_lock, flags); |
| 64 | direct_pages_count[level] += pages; |
| 65 | spin_unlock_irqrestore(&pgd_lock, flags); |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 66 | } |
| 67 | |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 68 | static void split_page_count(int level) |
| 69 | { |
| 70 | direct_pages_count[level]--; |
| 71 | direct_pages_count[level - 1] += PTRS_PER_PTE; |
| 72 | } |
| 73 | |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 74 | void arch_report_meminfo(struct seq_file *m) |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 75 | { |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 76 | seq_printf(m, "DirectMap4k: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 77 | direct_pages_count[PG_LEVEL_4K] << 2); |
| 78 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 79 | seq_printf(m, "DirectMap2M: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 80 | direct_pages_count[PG_LEVEL_2M] << 11); |
| 81 | #else |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 82 | seq_printf(m, "DirectMap4M: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 83 | direct_pages_count[PG_LEVEL_2M] << 12); |
| 84 | #endif |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 85 | #ifdef CONFIG_X86_64 |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 86 | if (direct_gbpages) |
Hugh Dickins | b9c3bfc | 2008-11-06 12:05:40 +0000 | [diff] [blame] | 87 | seq_printf(m, "DirectMap1G: %8lu kB\n", |
Hugh Dickins | a06de63 | 2008-08-15 13:58:32 +0100 | [diff] [blame] | 88 | direct_pages_count[PG_LEVEL_1G] << 20); |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 89 | #endif |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 90 | } |
| 91 | #else |
| 92 | static inline void split_page_count(int level) { } |
| 93 | #endif |
| 94 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 95 | #ifdef CONFIG_X86_64 |
| 96 | |
| 97 | static inline unsigned long highmap_start_pfn(void) |
| 98 | { |
| 99 | return __pa(_text) >> PAGE_SHIFT; |
| 100 | } |
| 101 | |
| 102 | static inline unsigned long highmap_end_pfn(void) |
| 103 | { |
Jeremy Fitzhardinge | 93dbda7 | 2009-02-26 17:35:44 -0800 | [diff] [blame] | 104 | return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | #endif |
| 108 | |
Ingo Molnar | 92cb54a | 2008-02-13 14:37:52 +0100 | [diff] [blame] | 109 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 110 | # define debug_pagealloc 1 |
| 111 | #else |
| 112 | # define debug_pagealloc 0 |
| 113 | #endif |
| 114 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 115 | static inline int |
| 116 | within(unsigned long addr, unsigned long start, unsigned long end) |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 117 | { |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 118 | return addr >= start && addr < end; |
| 119 | } |
| 120 | |
| 121 | /* |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 122 | * Flushing functions |
| 123 | */ |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 124 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 125 | /** |
| 126 | * clflush_cache_range - flush a cache range with clflush |
| 127 | * @addr: virtual start address |
| 128 | * @size: number of bytes to flush |
| 129 | * |
| 130 | * clflush is an unordered instruction which needs fencing with mfence |
| 131 | * to avoid ordering issues. |
| 132 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 133 | void clflush_cache_range(void *vaddr, unsigned int size) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 134 | { |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 135 | void *vend = vaddr + size - 1; |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 136 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 137 | mb(); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 138 | |
| 139 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) |
| 140 | clflush(vaddr); |
| 141 | /* |
| 142 | * Flush any possible final partial cacheline: |
| 143 | */ |
| 144 | clflush(vend); |
| 145 | |
Thomas Gleixner | cd8ddf1 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 146 | mb(); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 147 | } |
Eric Anholt | e517a5e | 2009-09-10 17:48:48 -0700 | [diff] [blame] | 148 | EXPORT_SYMBOL_GPL(clflush_cache_range); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 149 | |
Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 150 | static void __cpa_flush_all(void *arg) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 151 | { |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 152 | unsigned long cache = (unsigned long)arg; |
| 153 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 154 | /* |
| 155 | * Flush all to work around Errata in early athlons regarding |
| 156 | * large page flushing. |
| 157 | */ |
| 158 | __flush_tlb_all(); |
| 159 | |
venkatesh.pallipadi@intel.com | 0b82753 | 2009-05-22 13:23:37 -0700 | [diff] [blame] | 160 | if (cache && boot_cpu_data.x86 >= 4) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 161 | wbinvd(); |
| 162 | } |
| 163 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 164 | static void cpa_flush_all(unsigned long cache) |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 165 | { |
| 166 | BUG_ON(irqs_disabled()); |
| 167 | |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 168 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 169 | } |
| 170 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 171 | static void __cpa_flush_range(void *arg) |
| 172 | { |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 173 | /* |
| 174 | * We could optimize that further and do individual per page |
| 175 | * tlb invalidates for a low number of pages. Caveat: we must |
| 176 | * flush the high aliases on 64bit as well. |
| 177 | */ |
| 178 | __flush_tlb_all(); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 179 | } |
| 180 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 181 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 182 | { |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 183 | unsigned int i, level; |
| 184 | unsigned long addr; |
| 185 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 186 | BUG_ON(irqs_disabled()); |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 187 | WARN_ON(PAGE_ALIGN(start) != start); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 188 | |
Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 189 | on_each_cpu(__cpa_flush_range, NULL, 1); |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 190 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 191 | if (!cache) |
| 192 | return; |
| 193 | |
Thomas Gleixner | 3b233e5 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 194 | /* |
| 195 | * We only need to flush on one CPU, |
| 196 | * clflush is a MESI-coherent instruction that |
| 197 | * will cause all other CPUs to flush the same |
| 198 | * cachelines: |
| 199 | */ |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 200 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
| 201 | pte_t *pte = lookup_address(addr, &level); |
| 202 | |
| 203 | /* |
| 204 | * Only flush present addresses: |
| 205 | */ |
Thomas Gleixner | 7bfb72e | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 206 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
Ingo Molnar | 4c61afc | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 207 | clflush_cache_range((void *) addr, PAGE_SIZE); |
| 208 | } |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 209 | } |
| 210 | |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 211 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, |
| 212 | int in_flags, struct page **pages) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 213 | { |
| 214 | unsigned int i, level; |
Pallipadi, Venkatesh | 2171787 | 2009-05-26 10:33:35 -0700 | [diff] [blame] | 215 | unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 216 | |
| 217 | BUG_ON(irqs_disabled()); |
| 218 | |
Pallipadi, Venkatesh | 2171787 | 2009-05-26 10:33:35 -0700 | [diff] [blame] | 219 | on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 220 | |
Pallipadi, Venkatesh | 2171787 | 2009-05-26 10:33:35 -0700 | [diff] [blame] | 221 | if (!cache || do_wbinvd) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 222 | return; |
| 223 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 224 | /* |
| 225 | * We only need to flush on one CPU, |
| 226 | * clflush is a MESI-coherent instruction that |
| 227 | * will cause all other CPUs to flush the same |
| 228 | * cachelines: |
| 229 | */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 230 | for (i = 0; i < numpages; i++) { |
| 231 | unsigned long addr; |
| 232 | pte_t *pte; |
| 233 | |
| 234 | if (in_flags & CPA_PAGES_ARRAY) |
| 235 | addr = (unsigned long)page_address(pages[i]); |
| 236 | else |
| 237 | addr = start[i]; |
| 238 | |
| 239 | pte = lookup_address(addr, &level); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 240 | |
| 241 | /* |
| 242 | * Only flush present addresses: |
| 243 | */ |
| 244 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 245 | clflush_cache_range((void *)addr, PAGE_SIZE); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 246 | } |
| 247 | } |
| 248 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 249 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 250 | * Certain areas of memory on x86 require very specific protection flags, |
| 251 | * for example the BIOS area or kernel text. Callers don't always get this |
| 252 | * right (again, ioremap() on BIOS memory is not uncommon) so this function |
| 253 | * checks and fixes these known static required protection bits. |
| 254 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 255 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
| 256 | unsigned long pfn) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 257 | { |
| 258 | pgprot_t forbidden = __pgprot(0); |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 259 | pgprot_t required = __pgprot(0); |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 260 | |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 261 | /* |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 262 | * The BIOS area between 640k and 1Mb needs to be executable for |
| 263 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 264 | */ |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 265 | #ifdef CONFIG_PCI_BIOS |
| 266 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 267 | pgprot_val(forbidden) |= _PAGE_NX; |
Matthieu Castet | 5bd5a45 | 2010-11-16 22:31:26 +0100 | [diff] [blame] | 268 | #endif |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 269 | |
| 270 | /* |
| 271 | * The kernel text needs to be executable for obvious reasons |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 272 | * Does not cover __inittext since that is gone later on. On |
| 273 | * 64bit we do not enforce !NX on the low mapping |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 274 | */ |
| 275 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) |
| 276 | pgprot_val(forbidden) |= _PAGE_NX; |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 277 | |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 278 | /* |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 279 | * The .rodata section needs to be read-only. Using the pfn |
| 280 | * catches all aliases. |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 281 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 282 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
| 283 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) |
Arjan van de Ven | cc0f21b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 284 | pgprot_val(forbidden) |= _PAGE_RW; |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 285 | /* |
| 286 | * .data and .bss should always be writable. |
| 287 | */ |
| 288 | if (within(address, (unsigned long)_sdata, (unsigned long)_edata) || |
| 289 | within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop)) |
| 290 | pgprot_val(required) |= _PAGE_RW; |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 291 | |
Suresh Siddha | 55ca3cc | 2009-10-28 18:46:57 -0800 | [diff] [blame] | 292 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 293 | /* |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 294 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), |
| 295 | * kernel text mappings for the large page aligned text, rodata sections |
| 296 | * will be always read-only. For the kernel identity mappings covering |
| 297 | * the holes caused by this alignment can be anything that user asks. |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 298 | * |
| 299 | * This will preserve the large page mappings for kernel text/data |
| 300 | * at no extra cost. |
| 301 | */ |
Suresh Siddha | 502f660 | 2009-10-28 18:46:56 -0800 | [diff] [blame] | 302 | if (kernel_set_to_readonly && |
| 303 | within(address, (unsigned long)_text, |
Suresh Siddha | 281ff33 | 2010-02-18 11:51:40 -0800 | [diff] [blame] | 304 | (unsigned long)__end_rodata_hpage_align)) { |
| 305 | unsigned int level; |
| 306 | |
| 307 | /* |
| 308 | * Don't enforce the !RW mapping for the kernel text mapping, |
| 309 | * if the current mapping is already using small page mapping. |
| 310 | * No need to work hard to preserve large page mappings in this |
| 311 | * case. |
| 312 | * |
| 313 | * This also fixes the Linux Xen paravirt guest boot failure |
| 314 | * (because of unexpected read-only mappings for kernel identity |
| 315 | * mappings). In this paravirt guest case, the kernel text |
| 316 | * mapping and the kernel identity mapping share the same |
| 317 | * page-table pages. Thus we can't really use different |
| 318 | * protections for the kernel text and identity mappings. Also, |
| 319 | * these shared mappings are made of small page mappings. |
| 320 | * Thus this don't enforce !RW mapping for small page kernel |
| 321 | * text mapping logic will help Linux Xen parvirt guest boot |
| 322 | * aswell. |
| 323 | */ |
| 324 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) |
| 325 | pgprot_val(forbidden) |= _PAGE_RW; |
| 326 | } |
Suresh Siddha | 74e0817 | 2009-10-14 14:46:56 -0700 | [diff] [blame] | 327 | #endif |
| 328 | |
Arjan van de Ven | ed724be | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 329 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 330 | prot = __pgprot(pgprot_val(prot) | pgprot_val(required)); |
Ingo Molnar | 687c482 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 331 | |
| 332 | return prot; |
| 333 | } |
| 334 | |
Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 335 | /* |
| 336 | * Lookup the page table entry for a virtual address. Return a pointer |
| 337 | * to the entry and the level of the mapping. |
| 338 | * |
| 339 | * Note: We return pud and pmd either when the entry is marked large |
| 340 | * or when the present bit is not set. Otherwise we would return a |
| 341 | * pointer to a nonexisting mapping. |
| 342 | */ |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 343 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 344 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | pgd_t *pgd = pgd_offset_k(address); |
| 346 | pud_t *pud; |
| 347 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 348 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 349 | *level = PG_LEVEL_NONE; |
| 350 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | if (pgd_none(*pgd)) |
| 352 | return NULL; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 353 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | pud = pud_offset(pgd, address); |
| 355 | if (pud_none(*pud)) |
| 356 | return NULL; |
Andi Kleen | c2f71ee | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 357 | |
| 358 | *level = PG_LEVEL_1G; |
| 359 | if (pud_large(*pud) || !pud_present(*pud)) |
| 360 | return (pte_t *)pud; |
| 361 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | pmd = pmd_offset(pud, address); |
| 363 | if (pmd_none(*pmd)) |
| 364 | return NULL; |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 365 | |
| 366 | *level = PG_LEVEL_2M; |
Thomas Gleixner | 9a14aef | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 367 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | return (pte_t *)pmd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 370 | *level = PG_LEVEL_4K; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 371 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 372 | return pte_offset_kernel(pmd, address); |
| 373 | } |
Pekka Paalanen | 75bb883 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 374 | EXPORT_SYMBOL_GPL(lookup_address); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 375 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 376 | /* |
| 377 | * Set the new pmd in all the pgds we know about: |
| 378 | */ |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 379 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 380 | { |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 381 | /* change init_mm */ |
| 382 | set_pte_atomic(kpte, pte); |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 383 | #ifdef CONFIG_X86_32 |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 384 | if (!SHARED_KERNEL_PMD) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 385 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | |
Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 387 | list_for_each_entry(page, &pgd_list, lru) { |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 388 | pgd_t *pgd; |
| 389 | pud_t *pud; |
| 390 | pmd_t *pmd; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 391 | |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 392 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
| 393 | pud = pud_offset(pgd, address); |
| 394 | pmd = pmd_offset(pud, address); |
| 395 | set_pte_atomic((pte_t *)pmd, pte); |
| 396 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 398 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | } |
| 400 | |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 401 | static int |
| 402 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
| 403 | struct cpa_data *cpa) |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 404 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 405 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 406 | pte_t new_pte, old_pte, *tmp; |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 407 | pgprot_t old_prot, new_prot, req_prot; |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 408 | int i, do_split = 1; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 409 | unsigned int level; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 410 | |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 411 | if (cpa->force_split) |
| 412 | return 1; |
| 413 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 414 | spin_lock_irqsave(&pgd_lock, flags); |
| 415 | /* |
| 416 | * Check for races, another CPU might have split this page |
| 417 | * up already: |
| 418 | */ |
| 419 | tmp = lookup_address(address, &level); |
| 420 | if (tmp != kpte) |
| 421 | goto out_unlock; |
| 422 | |
| 423 | switch (level) { |
| 424 | case PG_LEVEL_2M: |
Andi Kleen | 31422c5 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 425 | psize = PMD_PAGE_SIZE; |
| 426 | pmask = PMD_PAGE_MASK; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 427 | break; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 428 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 429 | case PG_LEVEL_1G: |
Andi Kleen | 5d3c8b2 | 2008-02-13 16:20:35 +0100 | [diff] [blame] | 430 | psize = PUD_PAGE_SIZE; |
| 431 | pmask = PUD_PAGE_MASK; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 432 | break; |
| 433 | #endif |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 434 | default: |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 435 | do_split = -EINVAL; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 436 | goto out_unlock; |
| 437 | } |
| 438 | |
| 439 | /* |
| 440 | * Calculate the number of pages, which fit into this large |
| 441 | * page starting at address: |
| 442 | */ |
| 443 | nextpage_addr = (address + psize) & pmask; |
| 444 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 445 | if (numpages < cpa->numpages) |
| 446 | cpa->numpages = numpages; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 447 | |
| 448 | /* |
| 449 | * We are safe now. Check whether the new pgprot is the same: |
| 450 | */ |
| 451 | old_pte = *kpte; |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 452 | old_prot = new_prot = req_prot = pte_pgprot(old_pte); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 453 | |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 454 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); |
| 455 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 456 | |
| 457 | /* |
| 458 | * old_pte points to the large page base address. So we need |
| 459 | * to add the offset of the virtual address: |
| 460 | */ |
| 461 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); |
| 462 | cpa->pfn = pfn; |
| 463 | |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 464 | new_prot = static_protections(req_prot, address, pfn); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 465 | |
| 466 | /* |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 467 | * We need to check the full range, whether |
| 468 | * static_protection() requires a different pgprot for one of |
| 469 | * the pages in the range we try to preserve: |
| 470 | */ |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 471 | addr = address & pmask; |
| 472 | pfn = pte_pfn(old_pte); |
| 473 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { |
| 474 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); |
Thomas Gleixner | fac8493 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 475 | |
| 476 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) |
| 477 | goto out_unlock; |
| 478 | } |
| 479 | |
| 480 | /* |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 481 | * If there are no changes, return. maxpages has been updated |
| 482 | * above: |
| 483 | */ |
| 484 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 485 | do_split = 0; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 486 | goto out_unlock; |
| 487 | } |
| 488 | |
| 489 | /* |
| 490 | * We need to change the attributes. Check, whether we can |
| 491 | * change the large page in one go. We request a split, when |
| 492 | * the address is not aligned and the number of pages is |
| 493 | * smaller than the number of pages in the large page. Note |
| 494 | * that we limited the number of possible pages already to |
| 495 | * the number of pages in the large page. |
| 496 | */ |
matthieu castet | 64edc8e | 2010-11-16 22:30:27 +0100 | [diff] [blame] | 497 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 498 | /* |
| 499 | * The address is aligned and the number of pages |
| 500 | * covers the full page. |
| 501 | */ |
| 502 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); |
| 503 | __set_pmd_pte(kpte, address, new_pte); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 504 | cpa->flags |= CPA_FLUSHTLB; |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 505 | do_split = 0; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | out_unlock: |
| 509 | spin_unlock_irqrestore(&pgd_lock, flags); |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 510 | |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 511 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 512 | } |
| 513 | |
Ingo Molnar | 7afe15b | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 514 | static int split_large_page(pte_t *kpte, unsigned long address) |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 515 | { |
Thomas Gleixner | 7b610ee | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 516 | unsigned long flags, pfn, pfninc = 1; |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 517 | unsigned int i, level; |
Ingo Molnar | 9df8499 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 518 | pte_t *pbase, *tmp; |
| 519 | pgprot_t ref_prot; |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 520 | struct page *base; |
| 521 | |
| 522 | if (!debug_pagealloc) |
| 523 | spin_unlock(&cpa_lock); |
Vegard Nossum | 9e73023 | 2009-02-22 11:28:25 +0100 | [diff] [blame] | 524 | base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 525 | if (!debug_pagealloc) |
| 526 | spin_lock(&cpa_lock); |
Suresh Siddha | 8311eb8 | 2008-09-23 14:00:41 -0700 | [diff] [blame] | 527 | if (!base) |
| 528 | return -ENOMEM; |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 529 | |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 530 | spin_lock_irqsave(&pgd_lock, flags); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 531 | /* |
| 532 | * Check for races, another CPU might have split this page |
| 533 | * up for us already: |
| 534 | */ |
| 535 | tmp = lookup_address(address, &level); |
Ingo Molnar | 6ce9fc1 | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 536 | if (tmp != kpte) |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 537 | goto out_unlock; |
| 538 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 539 | pbase = (pte_t *)page_address(base); |
Jeremy Fitzhardinge | 6944a9c | 2008-03-17 16:37:01 -0700 | [diff] [blame] | 540 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
Thomas Gleixner | 07cf89c | 2008-02-04 16:48:08 +0100 | [diff] [blame] | 541 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
Ingo Molnar | 7a5714e | 2009-02-20 17:44:21 +0100 | [diff] [blame] | 542 | /* |
| 543 | * If we ever want to utilize the PAT bit, we need to |
| 544 | * update this function to make sure it's converted from |
| 545 | * bit 12 to bit 7 when we cross from the 2MB level to |
| 546 | * the 4K level: |
| 547 | */ |
| 548 | WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 549 | |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 550 | #ifdef CONFIG_X86_64 |
| 551 | if (level == PG_LEVEL_1G) { |
| 552 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
| 553 | pgprot_val(ref_prot) |= _PAGE_PSE; |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 554 | } |
| 555 | #endif |
| 556 | |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 557 | /* |
| 558 | * Get the target pfn from the original entry: |
| 559 | */ |
| 560 | pfn = pte_pfn(*kpte); |
Andi Kleen | f07333f | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 561 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
Thomas Gleixner | 63c1dcf | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 562 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 563 | |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 564 | if (address >= (unsigned long)__va(0) && |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 565 | address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT)) |
| 566 | split_page_count(level); |
| 567 | |
| 568 | #ifdef CONFIG_X86_64 |
| 569 | if (address >= (unsigned long)__va(1UL<<32) && |
Thomas Gleixner | 65280e6 | 2008-05-05 16:35:21 +0200 | [diff] [blame] | 570 | address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) |
| 571 | split_page_count(level); |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 572 | #endif |
Andi Kleen | ce0c0e5 | 2008-05-02 11:46:49 +0200 | [diff] [blame] | 573 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 574 | /* |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 575 | * Install the new, split up pagetable. |
Huang, Ying | 4c881ca | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 576 | * |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 577 | * We use the standard kernel pagetable protections for the new |
| 578 | * pagetable protections, the actual ptes set above control the |
| 579 | * primary protection behavior: |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 580 | */ |
Ingo Molnar | 07a66d7 | 2009-02-20 08:04:13 +0100 | [diff] [blame] | 581 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
Ingo Molnar | 211b3d0 | 2009-03-10 22:31:03 +0100 | [diff] [blame] | 582 | |
| 583 | /* |
| 584 | * Intel Atom errata AAH41 workaround. |
| 585 | * |
| 586 | * The real fix should be in hw or in a microcode update, but |
| 587 | * we also probabilistically try to reduce the window of having |
| 588 | * a large TLB mixed with 4K TLBs while instruction fetches are |
| 589 | * going on. |
| 590 | */ |
| 591 | __flush_tlb_all(); |
| 592 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 593 | base = NULL; |
| 594 | |
| 595 | out_unlock: |
Thomas Gleixner | eb5b5f0 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 596 | /* |
| 597 | * If we dropped out via the lookup_address check under |
| 598 | * pgd_lock then stick the page back into the pool: |
| 599 | */ |
Suresh Siddha | 8311eb8 | 2008-09-23 14:00:41 -0700 | [diff] [blame] | 600 | if (base) |
| 601 | __free_page(base); |
Ingo Molnar | 9a3dc78 | 2008-01-30 13:33:57 +0100 | [diff] [blame] | 602 | spin_unlock_irqrestore(&pgd_lock, flags); |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 603 | |
Ingo Molnar | bb5c2db | 2008-01-30 13:33:56 +0100 | [diff] [blame] | 604 | return 0; |
| 605 | } |
| 606 | |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 607 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, |
| 608 | int primary) |
| 609 | { |
| 610 | /* |
| 611 | * Ignore all non primary paths. |
| 612 | */ |
| 613 | if (!primary) |
| 614 | return 0; |
| 615 | |
| 616 | /* |
| 617 | * Ignore the NULL PTE for kernel identity mapping, as it is expected |
| 618 | * to have holes. |
| 619 | * Also set numpages to '1' indicating that we processed cpa req for |
| 620 | * one virtual address page and its pfn. TBD: numpages can be set based |
| 621 | * on the initial value and the level returned by lookup_address(). |
| 622 | */ |
| 623 | if (within(vaddr, PAGE_OFFSET, |
| 624 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { |
| 625 | cpa->numpages = 1; |
| 626 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; |
| 627 | return 0; |
| 628 | } else { |
| 629 | WARN(1, KERN_WARNING "CPA: called for zero pte. " |
| 630 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, |
| 631 | *cpa->vaddr); |
| 632 | |
| 633 | return -EFAULT; |
| 634 | } |
| 635 | } |
| 636 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 637 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 638 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 639 | unsigned long address; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 640 | int do_split, err; |
| 641 | unsigned int level; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 642 | pte_t *kpte, old_pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 643 | |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 644 | if (cpa->flags & CPA_PAGES_ARRAY) { |
| 645 | struct page *page = cpa->pages[cpa->curpage]; |
| 646 | if (unlikely(PageHighMem(page))) |
| 647 | return 0; |
| 648 | address = (unsigned long)page_address(page); |
| 649 | } else if (cpa->flags & CPA_ARRAY) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 650 | address = cpa->vaddr[cpa->curpage]; |
| 651 | else |
| 652 | address = *cpa->vaddr; |
Ingo Molnar | 97f99fe | 2008-01-30 13:33:55 +0100 | [diff] [blame] | 653 | repeat: |
Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 654 | kpte = lookup_address(address, &level); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | if (!kpte) |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 656 | return __cpa_process_fault(cpa, address, primary); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 657 | |
| 658 | old_pte = *kpte; |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 659 | if (!pte_val(old_pte)) |
| 660 | return __cpa_process_fault(cpa, address, primary); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 661 | |
Thomas Gleixner | 30551bb | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 662 | if (level == PG_LEVEL_4K) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 663 | pte_t new_pte; |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 664 | pgprot_t new_prot = pte_pgprot(old_pte); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 665 | unsigned long pfn = pte_pfn(old_pte); |
Thomas Gleixner | a72a08a | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 666 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 667 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
| 668 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 669 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 670 | new_prot = static_protections(new_prot, address, pfn); |
Ingo Molnar | 86f0398 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 671 | |
Arjan van de Ven | 626c2c9 | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 672 | /* |
| 673 | * We need to keep the pfn from the existing PTE, |
| 674 | * after all we're only going to change it's attributes |
| 675 | * not the memory it points to |
| 676 | */ |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 677 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
| 678 | cpa->pfn = pfn; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 679 | /* |
| 680 | * Do we really change anything ? |
| 681 | */ |
| 682 | if (pte_val(old_pte) != pte_val(new_pte)) { |
| 683 | set_pte_atomic(kpte, new_pte); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 684 | cpa->flags |= CPA_FLUSHTLB; |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 685 | } |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 686 | cpa->numpages = 1; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 687 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | } |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 689 | |
| 690 | /* |
| 691 | * Check, whether we can keep the large page intact |
| 692 | * and just change the pte: |
| 693 | */ |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 694 | do_split = try_preserve_large_page(kpte, address, cpa); |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 695 | /* |
| 696 | * When the range fits into the existing large page, |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 697 | * return. cp->numpages and cpa->tlbflush have been updated in |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 698 | * try_large_page: |
| 699 | */ |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 700 | if (do_split <= 0) |
| 701 | return do_split; |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 702 | |
| 703 | /* |
| 704 | * We have to split the large page: |
| 705 | */ |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 706 | err = split_large_page(kpte, address); |
| 707 | if (!err) { |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 708 | /* |
| 709 | * Do a global flush tlb after splitting the large page |
| 710 | * and before we do the actual change page attribute in the PTE. |
| 711 | * |
| 712 | * With out this, we violate the TLB application note, that says |
| 713 | * "The TLBs may contain both ordinary and large-page |
| 714 | * translations for a 4-KByte range of linear addresses. This |
| 715 | * may occur if software modifies the paging structures so that |
| 716 | * the page size used for the address range changes. If the two |
| 717 | * translations differ with respect to page frame or attributes |
| 718 | * (e.g., permissions), processor behavior is undefined and may |
| 719 | * be implementation-specific." |
| 720 | * |
| 721 | * We do this global tlb flush inside the cpa_lock, so that we |
| 722 | * don't allow any other cpu, with stale tlb entries change the |
| 723 | * page attribute in parallel, that also falls into the |
| 724 | * just split large page entry. |
| 725 | */ |
| 726 | flush_tlb_all(); |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 727 | goto repeat; |
| 728 | } |
Ingo Molnar | beaff63 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 729 | |
Ingo Molnar | 87f7f8f | 2008-02-04 16:48:10 +0100 | [diff] [blame] | 730 | return err; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 731 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 733 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
| 734 | |
| 735 | static int cpa_process_alias(struct cpa_data *cpa) |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 736 | { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 737 | struct cpa_data alias_cpa; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 738 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); |
Tejun Heo | e933a73 | 2009-08-14 15:00:53 +0900 | [diff] [blame] | 739 | unsigned long vaddr; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 740 | int ret; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 741 | |
Yinghai Lu | 965194c | 2008-07-12 14:31:28 -0700 | [diff] [blame] | 742 | if (cpa->pfn >= max_pfn_mapped) |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 743 | return 0; |
| 744 | |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 745 | #ifdef CONFIG_X86_64 |
Yinghai Lu | 965194c | 2008-07-12 14:31:28 -0700 | [diff] [blame] | 746 | if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT))) |
Yinghai Lu | f361a45 | 2008-07-10 20:38:26 -0700 | [diff] [blame] | 747 | return 0; |
| 748 | #endif |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 749 | /* |
| 750 | * No need to redo, when the primary call touched the direct |
| 751 | * mapping already: |
| 752 | */ |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 753 | if (cpa->flags & CPA_PAGES_ARRAY) { |
| 754 | struct page *page = cpa->pages[cpa->curpage]; |
| 755 | if (unlikely(PageHighMem(page))) |
| 756 | return 0; |
| 757 | vaddr = (unsigned long)page_address(page); |
| 758 | } else if (cpa->flags & CPA_ARRAY) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 759 | vaddr = cpa->vaddr[cpa->curpage]; |
| 760 | else |
| 761 | vaddr = *cpa->vaddr; |
| 762 | |
| 763 | if (!(within(vaddr, PAGE_OFFSET, |
Suresh Siddha | a1e4621 | 2009-01-20 14:20:21 -0800 | [diff] [blame] | 764 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 765 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 766 | alias_cpa = *cpa; |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 767 | alias_cpa.vaddr = &laddr; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 768 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 769 | |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 770 | ret = __change_page_attr_set_clr(&alias_cpa, 0); |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 771 | if (ret) |
| 772 | return ret; |
Thomas Gleixner | f34b439 | 2008-02-15 22:17:57 +0100 | [diff] [blame] | 773 | } |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 774 | |
Arjan van de Ven | 488fd99 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 775 | #ifdef CONFIG_X86_64 |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 776 | /* |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 777 | * If the primary call didn't touch the high mapping already |
| 778 | * and the physical address is inside the kernel map, we need |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 779 | * to touch the high mapped kernel as well: |
| 780 | */ |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 781 | if (!within(vaddr, (unsigned long)_text, _brk_end) && |
| 782 | within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) { |
| 783 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + |
| 784 | __START_KERNEL_map - phys_base; |
| 785 | alias_cpa = *cpa; |
| 786 | alias_cpa.vaddr = &temp_cpa_vaddr; |
| 787 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 788 | |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 789 | /* |
| 790 | * The high mapping range is imprecise, so ignore the |
| 791 | * return value. |
| 792 | */ |
| 793 | __change_page_attr_set_clr(&alias_cpa, 0); |
| 794 | } |
Thomas Gleixner | 0879750 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 795 | #endif |
Tejun Heo | 992f4c1 | 2009-06-22 11:56:24 +0900 | [diff] [blame] | 796 | |
| 797 | return 0; |
Ingo Molnar | 44af6c4 | 2008-01-30 13:34:03 +0100 | [diff] [blame] | 798 | } |
| 799 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 800 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 801 | { |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 802 | int ret, numpages = cpa->numpages; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 803 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 804 | while (numpages) { |
| 805 | /* |
| 806 | * Store the remaining nr of pages for the large page |
| 807 | * preservation check. |
| 808 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 809 | cpa->numpages = numpages; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 810 | /* for array changes, we can't use large page */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 811 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 812 | cpa->numpages = 1; |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 813 | |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 814 | if (!debug_pagealloc) |
| 815 | spin_lock(&cpa_lock); |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 816 | ret = __change_page_attr(cpa, checkalias); |
Suresh Siddha | ad5ca55 | 2008-09-23 14:00:42 -0700 | [diff] [blame] | 817 | if (!debug_pagealloc) |
| 818 | spin_unlock(&cpa_lock); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 819 | if (ret) |
| 820 | return ret; |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 821 | |
Thomas Gleixner | c31c7d4 | 2008-02-18 20:54:14 +0100 | [diff] [blame] | 822 | if (checkalias) { |
| 823 | ret = cpa_process_alias(cpa); |
| 824 | if (ret) |
| 825 | return ret; |
| 826 | } |
| 827 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 828 | /* |
| 829 | * Adjust the number of pages with the result of the |
| 830 | * CPA operation. Either a large page has been |
| 831 | * preserved or a single page update happened. |
| 832 | */ |
Rafael J. Wysocki | 9b5cf48 | 2008-03-03 01:17:37 +0100 | [diff] [blame] | 833 | BUG_ON(cpa->numpages > numpages); |
| 834 | numpages -= cpa->numpages; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 835 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 836 | cpa->curpage++; |
| 837 | else |
| 838 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; |
| 839 | |
Thomas Gleixner | 65e074d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 840 | } |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 841 | return 0; |
| 842 | } |
| 843 | |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 844 | static inline int cache_attr(pgprot_t attr) |
| 845 | { |
| 846 | return pgprot_val(attr) & |
| 847 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); |
| 848 | } |
| 849 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 850 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 851 | pgprot_t mask_set, pgprot_t mask_clr, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 852 | int force_split, int in_flag, |
| 853 | struct page **pages) |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 854 | { |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 855 | struct cpa_data cpa; |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 856 | int ret, cache, checkalias; |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 857 | unsigned long baddr = 0; |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 858 | |
| 859 | /* |
| 860 | * Check, if we are requested to change a not supported |
| 861 | * feature: |
| 862 | */ |
| 863 | mask_set = canon_pgprot(mask_set); |
| 864 | mask_clr = canon_pgprot(mask_clr); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 865 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
Thomas Gleixner | 331e406 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 866 | return 0; |
| 867 | |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 868 | /* Ensure we are PAGE_SIZE aligned */ |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 869 | if (in_flag & CPA_ARRAY) { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 870 | int i; |
| 871 | for (i = 0; i < numpages; i++) { |
| 872 | if (addr[i] & ~PAGE_MASK) { |
| 873 | addr[i] &= PAGE_MASK; |
| 874 | WARN_ON_ONCE(1); |
| 875 | } |
| 876 | } |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 877 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { |
| 878 | /* |
| 879 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. |
| 880 | * No need to cehck in that case |
| 881 | */ |
| 882 | if (*addr & ~PAGE_MASK) { |
| 883 | *addr &= PAGE_MASK; |
| 884 | /* |
| 885 | * People should not be passing in unaligned addresses: |
| 886 | */ |
| 887 | WARN_ON_ONCE(1); |
| 888 | } |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 889 | /* |
| 890 | * Save address for cache flush. *addr is modified in the call |
| 891 | * to __change_page_attr_set_clr() below. |
| 892 | */ |
| 893 | baddr = *addr; |
Thomas Gleixner | 69b1415 | 2008-02-13 11:04:50 +0100 | [diff] [blame] | 894 | } |
| 895 | |
Nick Piggin | 5843d9a | 2008-08-01 03:15:21 +0200 | [diff] [blame] | 896 | /* Must avoid aliasing mappings in the highmem code */ |
| 897 | kmap_flush_unused(); |
| 898 | |
Nick Piggin | db64fe0 | 2008-10-18 20:27:03 -0700 | [diff] [blame] | 899 | vm_unmap_aliases(); |
| 900 | |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 901 | cpa.vaddr = addr; |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 902 | cpa.pages = pages; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 903 | cpa.numpages = numpages; |
| 904 | cpa.mask_set = mask_set; |
| 905 | cpa.mask_clr = mask_clr; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 906 | cpa.flags = 0; |
| 907 | cpa.curpage = 0; |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 908 | cpa.force_split = force_split; |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 909 | |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 910 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) |
| 911 | cpa.flags |= in_flag; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 912 | |
Thomas Gleixner | af96e44 | 2008-02-15 21:49:46 +0100 | [diff] [blame] | 913 | /* No alias checking for _NX bit modifications */ |
| 914 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; |
| 915 | |
| 916 | ret = __change_page_attr_set_clr(&cpa, checkalias); |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 917 | |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 918 | /* |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 919 | * Check whether we really changed something: |
| 920 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 921 | if (!(cpa.flags & CPA_FLUSHTLB)) |
Shaohua Li | 1ac2f7d | 2008-08-04 14:51:24 +0800 | [diff] [blame] | 922 | goto out; |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 923 | |
Thomas Gleixner | f4ae5da | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 924 | /* |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 925 | * No need to flush, when we did not set any of the caching |
| 926 | * attributes: |
| 927 | */ |
| 928 | cache = cache_attr(mask_set); |
| 929 | |
| 930 | /* |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 931 | * On success we use clflush, when the CPU supports it to |
| 932 | * avoid the wbindv. If the CPU does not support it and in the |
Thomas Gleixner | af1e684 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 933 | * error case we fall back to cpa_flush_all (which uses |
Thomas Gleixner | 57a6a46 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 934 | * wbindv): |
| 935 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 936 | if (!ret && cpu_has_clflush) { |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 937 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { |
| 938 | cpa_flush_array(addr, numpages, cache, |
| 939 | cpa.flags, pages); |
| 940 | } else |
Jack Steiner | fa526d0 | 2009-09-03 12:56:02 -0500 | [diff] [blame] | 941 | cpa_flush_range(baddr, numpages, cache); |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 942 | } else |
Andi Kleen | 6bb8383 | 2008-02-04 16:48:06 +0100 | [diff] [blame] | 943 | cpa_flush_all(cache); |
Ingo Molnar | cacf890 | 2008-08-21 13:46:33 +0200 | [diff] [blame] | 944 | |
Thomas Gleixner | 76ebd05 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 945 | out: |
Thomas Gleixner | ff31452 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 946 | return ret; |
| 947 | } |
| 948 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 949 | static inline int change_page_attr_set(unsigned long *addr, int numpages, |
| 950 | pgprot_t mask, int array) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 951 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 952 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 953 | (array ? CPA_ARRAY : 0), NULL); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 954 | } |
| 955 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 956 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, |
| 957 | pgprot_t mask, int array) |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 958 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 959 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 960 | (array ? CPA_ARRAY : 0), NULL); |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 961 | } |
| 962 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 963 | static inline int cpa_set_pages_array(struct page **pages, int numpages, |
| 964 | pgprot_t mask) |
| 965 | { |
| 966 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, |
| 967 | CPA_PAGES_ARRAY, pages); |
| 968 | } |
| 969 | |
| 970 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, |
| 971 | pgprot_t mask) |
| 972 | { |
| 973 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, |
| 974 | CPA_PAGES_ARRAY, pages); |
| 975 | } |
| 976 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 977 | int _set_memory_uc(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 978 | { |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 979 | /* |
| 980 | * for now UC MINUS. see comments in ioremap_nocache() |
| 981 | */ |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 982 | return change_page_attr_set(&addr, numpages, |
| 983 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 984 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 985 | |
| 986 | int set_memory_uc(unsigned long addr, int numpages) |
| 987 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 988 | int ret; |
| 989 | |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 990 | /* |
| 991 | * for now UC MINUS. see comments in ioremap_nocache() |
| 992 | */ |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 993 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
| 994 | _PAGE_CACHE_UC_MINUS, NULL); |
| 995 | if (ret) |
| 996 | goto out_err; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 997 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 998 | ret = _set_memory_uc(addr, numpages); |
| 999 | if (ret) |
| 1000 | goto out_free; |
| 1001 | |
| 1002 | return 0; |
| 1003 | |
| 1004 | out_free: |
| 1005 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 1006 | out_err: |
| 1007 | return ret; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1008 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1009 | EXPORT_SYMBOL(set_memory_uc); |
| 1010 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1011 | int _set_memory_array(unsigned long *addr, int addrinarray, |
| 1012 | unsigned long new_type) |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1013 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1014 | int i, j; |
| 1015 | int ret; |
| 1016 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1017 | /* |
| 1018 | * for now UC MINUS. see comments in ioremap_nocache() |
| 1019 | */ |
| 1020 | for (i = 0; i < addrinarray; i++) { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1021 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1022 | new_type, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1023 | if (ret) |
| 1024 | goto out_free; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1025 | } |
| 1026 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1027 | ret = change_page_attr_set(addr, addrinarray, |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1028 | __pgprot(_PAGE_CACHE_UC_MINUS), 1); |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1029 | |
| 1030 | if (!ret && new_type == _PAGE_CACHE_WC) |
| 1031 | ret = change_page_attr_set_clr(addr, addrinarray, |
| 1032 | __pgprot(_PAGE_CACHE_WC), |
| 1033 | __pgprot(_PAGE_CACHE_MASK), |
| 1034 | 0, CPA_ARRAY, NULL); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1035 | if (ret) |
| 1036 | goto out_free; |
Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 1037 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1038 | return 0; |
| 1039 | |
| 1040 | out_free: |
| 1041 | for (j = 0; j < i; j++) |
| 1042 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); |
| 1043 | |
| 1044 | return ret; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1045 | } |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1046 | |
| 1047 | int set_memory_array_uc(unsigned long *addr, int addrinarray) |
| 1048 | { |
| 1049 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); |
| 1050 | } |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1051 | EXPORT_SYMBOL(set_memory_array_uc); |
| 1052 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1053 | int set_memory_array_wc(unsigned long *addr, int addrinarray) |
| 1054 | { |
| 1055 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); |
| 1056 | } |
| 1057 | EXPORT_SYMBOL(set_memory_array_wc); |
| 1058 | |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1059 | int _set_memory_wc(unsigned long addr, int numpages) |
| 1060 | { |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1061 | int ret; |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1062 | unsigned long addr_copy = addr; |
| 1063 | |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1064 | ret = change_page_attr_set(&addr, numpages, |
| 1065 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1066 | if (!ret) { |
Pallipadi, Venkatesh | bdc6340 | 2009-07-30 14:43:19 -0700 | [diff] [blame] | 1067 | ret = change_page_attr_set_clr(&addr_copy, numpages, |
| 1068 | __pgprot(_PAGE_CACHE_WC), |
| 1069 | __pgprot(_PAGE_CACHE_MASK), |
| 1070 | 0, 0, NULL); |
venkatesh.pallipadi@intel.com | 3869c4a | 2009-04-09 14:26:50 -0700 | [diff] [blame] | 1071 | } |
| 1072 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1073 | } |
| 1074 | |
| 1075 | int set_memory_wc(unsigned long addr, int numpages) |
| 1076 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1077 | int ret; |
| 1078 | |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 1079 | if (!pat_enabled) |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1080 | return set_memory_uc(addr, numpages); |
| 1081 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1082 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, |
| 1083 | _PAGE_CACHE_WC, NULL); |
| 1084 | if (ret) |
| 1085 | goto out_err; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1086 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1087 | ret = _set_memory_wc(addr, numpages); |
| 1088 | if (ret) |
| 1089 | goto out_free; |
| 1090 | |
| 1091 | return 0; |
| 1092 | |
| 1093 | out_free: |
| 1094 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
| 1095 | out_err: |
| 1096 | return ret; |
venkatesh.pallipadi@intel.com | ef354af | 2008-03-18 17:00:23 -0700 | [diff] [blame] | 1097 | } |
| 1098 | EXPORT_SYMBOL(set_memory_wc); |
| 1099 | |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1100 | int _set_memory_wb(unsigned long addr, int numpages) |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1101 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1102 | return change_page_attr_clear(&addr, numpages, |
| 1103 | __pgprot(_PAGE_CACHE_MASK), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1104 | } |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1105 | |
| 1106 | int set_memory_wb(unsigned long addr, int numpages) |
| 1107 | { |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1108 | int ret; |
| 1109 | |
| 1110 | ret = _set_memory_wb(addr, numpages); |
| 1111 | if (ret) |
| 1112 | return ret; |
| 1113 | |
venkatesh.pallipadi@intel.com | c15238d | 2008-08-20 16:45:51 -0700 | [diff] [blame] | 1114 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1115 | return 0; |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 1116 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1117 | EXPORT_SYMBOL(set_memory_wb); |
| 1118 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1119 | int set_memory_array_wb(unsigned long *addr, int addrinarray) |
| 1120 | { |
| 1121 | int i; |
venkatesh.pallipadi@intel.com | a5593e0 | 2009-04-09 14:26:48 -0700 | [diff] [blame] | 1122 | int ret; |
| 1123 | |
| 1124 | ret = change_page_attr_clear(addr, addrinarray, |
| 1125 | __pgprot(_PAGE_CACHE_MASK), 1); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1126 | if (ret) |
| 1127 | return ret; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1128 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1129 | for (i = 0; i < addrinarray; i++) |
| 1130 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); |
Rene Herman | c5e147c | 2008-08-22 01:02:20 +0200 | [diff] [blame] | 1131 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1132 | return 0; |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1133 | } |
| 1134 | EXPORT_SYMBOL(set_memory_array_wb); |
| 1135 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1136 | int set_memory_x(unsigned long addr, int numpages) |
| 1137 | { |
H. Peter Anvin | 583140a | 2009-11-13 15:28:15 -0800 | [diff] [blame] | 1138 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 1139 | return 0; |
| 1140 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1141 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1142 | } |
| 1143 | EXPORT_SYMBOL(set_memory_x); |
| 1144 | |
| 1145 | int set_memory_nx(unsigned long addr, int numpages) |
| 1146 | { |
H. Peter Anvin | 583140a | 2009-11-13 15:28:15 -0800 | [diff] [blame] | 1147 | if (!(__supported_pte_mask & _PAGE_NX)) |
| 1148 | return 0; |
| 1149 | |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1150 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1151 | } |
| 1152 | EXPORT_SYMBOL(set_memory_nx); |
| 1153 | |
| 1154 | int set_memory_ro(unsigned long addr, int numpages) |
| 1155 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1156 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1157 | } |
Bruce Allan | a03352d | 2008-09-29 20:19:22 -0700 | [diff] [blame] | 1158 | EXPORT_SYMBOL_GPL(set_memory_ro); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1159 | |
| 1160 | int set_memory_rw(unsigned long addr, int numpages) |
| 1161 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1162 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1163 | } |
Bruce Allan | a03352d | 2008-09-29 20:19:22 -0700 | [diff] [blame] | 1164 | EXPORT_SYMBOL_GPL(set_memory_rw); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1165 | |
| 1166 | int set_memory_np(unsigned long addr, int numpages) |
| 1167 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1168 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1169 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1170 | |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1171 | int set_memory_4k(unsigned long addr, int numpages) |
| 1172 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1173 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), |
venkatesh.pallipadi@intel.com | 9ae2847 | 2009-03-19 14:51:14 -0700 | [diff] [blame] | 1174 | __pgprot(0), 1, 0, NULL); |
Andi Kleen | c9caa02 | 2008-03-12 03:53:29 +0100 | [diff] [blame] | 1175 | } |
| 1176 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1177 | int set_pages_uc(struct page *page, int numpages) |
| 1178 | { |
| 1179 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1180 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1181 | return set_memory_uc(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1182 | } |
| 1183 | EXPORT_SYMBOL(set_pages_uc); |
| 1184 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1185 | static int _set_pages_array(struct page **pages, int addrinarray, |
| 1186 | unsigned long new_type) |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1187 | { |
| 1188 | unsigned long start; |
| 1189 | unsigned long end; |
| 1190 | int i; |
| 1191 | int free_idx; |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1192 | int ret; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1193 | |
| 1194 | for (i = 0; i < addrinarray; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1195 | if (PageHighMem(pages[i])) |
| 1196 | continue; |
| 1197 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1198 | end = start + PAGE_SIZE; |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1199 | if (reserve_memtype(start, end, new_type, NULL)) |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1200 | goto err_out; |
| 1201 | } |
| 1202 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1203 | ret = cpa_set_pages_array(pages, addrinarray, |
| 1204 | __pgprot(_PAGE_CACHE_UC_MINUS)); |
| 1205 | if (!ret && new_type == _PAGE_CACHE_WC) |
| 1206 | ret = change_page_attr_set_clr(NULL, addrinarray, |
| 1207 | __pgprot(_PAGE_CACHE_WC), |
| 1208 | __pgprot(_PAGE_CACHE_MASK), |
| 1209 | 0, CPA_PAGES_ARRAY, pages); |
| 1210 | if (ret) |
| 1211 | goto err_out; |
| 1212 | return 0; /* Success */ |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1213 | err_out: |
| 1214 | free_idx = i; |
| 1215 | for (i = 0; i < free_idx; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1216 | if (PageHighMem(pages[i])) |
| 1217 | continue; |
| 1218 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1219 | end = start + PAGE_SIZE; |
| 1220 | free_memtype(start, end); |
| 1221 | } |
| 1222 | return -EINVAL; |
| 1223 | } |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1224 | |
| 1225 | int set_pages_array_uc(struct page **pages, int addrinarray) |
| 1226 | { |
| 1227 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); |
| 1228 | } |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1229 | EXPORT_SYMBOL(set_pages_array_uc); |
| 1230 | |
Pauli Nieminen | 4f64625 | 2010-04-01 12:45:01 +0000 | [diff] [blame] | 1231 | int set_pages_array_wc(struct page **pages, int addrinarray) |
| 1232 | { |
| 1233 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); |
| 1234 | } |
| 1235 | EXPORT_SYMBOL(set_pages_array_wc); |
| 1236 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1237 | int set_pages_wb(struct page *page, int numpages) |
| 1238 | { |
| 1239 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1240 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1241 | return set_memory_wb(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1242 | } |
| 1243 | EXPORT_SYMBOL(set_pages_wb); |
| 1244 | |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1245 | int set_pages_array_wb(struct page **pages, int addrinarray) |
| 1246 | { |
| 1247 | int retval; |
| 1248 | unsigned long start; |
| 1249 | unsigned long end; |
| 1250 | int i; |
| 1251 | |
| 1252 | retval = cpa_clear_pages_array(pages, addrinarray, |
| 1253 | __pgprot(_PAGE_CACHE_MASK)); |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1254 | if (retval) |
| 1255 | return retval; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1256 | |
| 1257 | for (i = 0; i < addrinarray; i++) { |
Thomas Hellstrom | 8523acf | 2009-08-03 09:25:45 +0200 | [diff] [blame] | 1258 | if (PageHighMem(pages[i])) |
| 1259 | continue; |
| 1260 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1261 | end = start + PAGE_SIZE; |
| 1262 | free_memtype(start, end); |
| 1263 | } |
| 1264 | |
venkatesh.pallipadi@intel.com | 9fa3ab3 | 2009-04-09 14:26:49 -0700 | [diff] [blame] | 1265 | return 0; |
venkatesh.pallipadi@intel.com | 0f35075 | 2009-03-19 14:51:15 -0700 | [diff] [blame] | 1266 | } |
| 1267 | EXPORT_SYMBOL(set_pages_array_wb); |
| 1268 | |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1269 | int set_pages_x(struct page *page, int numpages) |
| 1270 | { |
| 1271 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1272 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1273 | return set_memory_x(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1274 | } |
| 1275 | EXPORT_SYMBOL(set_pages_x); |
| 1276 | |
| 1277 | int set_pages_nx(struct page *page, int numpages) |
| 1278 | { |
| 1279 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1280 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1281 | return set_memory_nx(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1282 | } |
| 1283 | EXPORT_SYMBOL(set_pages_nx); |
| 1284 | |
| 1285 | int set_pages_ro(struct page *page, int numpages) |
| 1286 | { |
| 1287 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1288 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1289 | return set_memory_ro(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1290 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1291 | |
| 1292 | int set_pages_rw(struct page *page, int numpages) |
| 1293 | { |
| 1294 | unsigned long addr = (unsigned long)page_address(page); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1295 | |
Thomas Gleixner | d7c8f21 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1296 | return set_memory_rw(addr, numpages); |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1297 | } |
Arjan van de Ven | 75cbade | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 1298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | #ifdef CONFIG_DEBUG_PAGEALLOC |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1300 | |
| 1301 | static int __set_pages_p(struct page *page, int numpages) |
| 1302 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1303 | unsigned long tempaddr = (unsigned long) page_address(page); |
| 1304 | struct cpa_data cpa = { .vaddr = &tempaddr, |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1305 | .numpages = numpages, |
| 1306 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1307 | .mask_clr = __pgprot(0), |
| 1308 | .flags = 0}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1309 | |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1310 | /* |
| 1311 | * No alias checking needed for setting present flag. otherwise, |
| 1312 | * we may need to break large pages for 64-bit kernel text |
| 1313 | * mappings (this adds to complexity if we want to do this from |
| 1314 | * atomic context especially). Let's keep it simple! |
| 1315 | */ |
| 1316 | return __change_page_attr_set_clr(&cpa, 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1317 | } |
| 1318 | |
| 1319 | static int __set_pages_np(struct page *page, int numpages) |
| 1320 | { |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1321 | unsigned long tempaddr = (unsigned long) page_address(page); |
| 1322 | struct cpa_data cpa = { .vaddr = &tempaddr, |
Thomas Gleixner | 72e458d | 2008-02-04 16:48:07 +0100 | [diff] [blame] | 1323 | .numpages = numpages, |
| 1324 | .mask_set = __pgprot(0), |
Shaohua Li | d75586a | 2008-08-21 10:46:06 +0800 | [diff] [blame] | 1325 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), |
| 1326 | .flags = 0}; |
Thomas Gleixner | 72932c7 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 1327 | |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1328 | /* |
| 1329 | * No alias checking needed for setting not present flag. otherwise, |
| 1330 | * we may need to break large pages for 64-bit kernel text |
| 1331 | * mappings (this adds to complexity if we want to do this from |
| 1332 | * atomic context especially). Let's keep it simple! |
| 1333 | */ |
| 1334 | return __change_page_attr_set_clr(&cpa, 0); |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1335 | } |
| 1336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | void kernel_map_pages(struct page *page, int numpages, int enable) |
| 1338 | { |
| 1339 | if (PageHighMem(page)) |
| 1340 | return; |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1341 | if (!enable) { |
Ingo Molnar | f9b8404 | 2006-06-27 02:54:49 -0700 | [diff] [blame] | 1342 | debug_check_no_locks_freed(page_address(page), |
| 1343 | numpages * PAGE_SIZE); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1344 | } |
Ingo Molnar | de5097c | 2006-01-09 15:59:21 -0800 | [diff] [blame] | 1345 | |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1346 | /* |
Ingo Molnar | 12d6f21 | 2008-01-30 13:33:58 +0100 | [diff] [blame] | 1347 | * If page allocator is not up yet then do not call c_p_a(): |
| 1348 | */ |
| 1349 | if (!debug_pagealloc_enabled) |
| 1350 | return; |
| 1351 | |
| 1352 | /* |
Ingo Molnar | f8d8406 | 2008-02-13 14:09:53 +0100 | [diff] [blame] | 1353 | * The return value is ignored as the calls cannot fail. |
Suresh Siddha | 55121b4 | 2008-09-23 14:00:40 -0700 | [diff] [blame] | 1354 | * Large pages for identity mappings are not used at boot time |
| 1355 | * and hence no memory allocations during large page split. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | */ |
Ingo Molnar | f62d0f0 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1357 | if (enable) |
| 1358 | __set_pages_p(page, numpages); |
| 1359 | else |
| 1360 | __set_pages_np(page, numpages); |
Ingo Molnar | 9f4c815 | 2008-01-30 13:33:41 +0100 | [diff] [blame] | 1361 | |
| 1362 | /* |
Ingo Molnar | e4b71dc | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 1363 | * We should perform an IPI and flush all tlbs, |
| 1364 | * but that can deadlock->flush only current cpu: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | */ |
| 1366 | __flush_tlb_all(); |
| 1367 | } |
Rafael J. Wysocki | 8a235ef | 2008-02-20 01:47:44 +0100 | [diff] [blame] | 1368 | |
| 1369 | #ifdef CONFIG_HIBERNATION |
| 1370 | |
| 1371 | bool kernel_page_present(struct page *page) |
| 1372 | { |
| 1373 | unsigned int level; |
| 1374 | pte_t *pte; |
| 1375 | |
| 1376 | if (PageHighMem(page)) |
| 1377 | return false; |
| 1378 | |
| 1379 | pte = lookup_address((unsigned long)page_address(page), &level); |
| 1380 | return (pte_val(*pte) & _PAGE_PRESENT); |
| 1381 | } |
| 1382 | |
| 1383 | #endif /* CONFIG_HIBERNATION */ |
| 1384 | |
| 1385 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
Arjan van de Ven | d1028a1 | 2008-01-30 13:34:07 +0100 | [diff] [blame] | 1386 | |
| 1387 | /* |
| 1388 | * The testcases use internal knowledge of the implementation that shouldn't |
| 1389 | * be exposed to the rest of the kernel. Include these directly here. |
| 1390 | */ |
| 1391 | #ifdef CONFIG_CPA_DEBUG |
| 1392 | #include "pageattr-test.c" |
| 1393 | #endif |