blob: e17efed088c54a7b546b7b76c073ab55231d425e [file] [log] [blame]
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Thanks to Ben LaHaise for precious feedback.
Ingo Molnar9f4c8152008-01-30 13:33:41 +01004 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/highmem.h>
Ingo Molnar81922062008-01-30 13:34:04 +01006#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +01008#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/slab.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010010#include <linux/mm.h>
Thomas Gleixner76ebd052008-02-09 23:24:09 +010011#include <linux/interrupt.h>
Thomas Gleixneree7ae7a2008-04-17 17:40:45 +020012#include <linux/seq_file.h>
13#include <linux/debugfs.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010014
Thomas Gleixner950f9d92008-01-30 13:34:06 +010015#include <asm/e820.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/processor.h>
17#include <asm/tlbflush.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080018#include <asm/sections.h>
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -080019#include <asm/setup.h>
Ingo Molnar9f4c8152008-01-30 13:33:41 +010020#include <asm/uaccess.h>
21#include <asm/pgalloc.h>
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010022#include <asm/proto.h>
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Ingo Molnar9df84992008-02-04 16:48:09 +010025/*
26 * The current flushing context - we pass it instead of 5 arguments:
27 */
Thomas Gleixner72e458d2008-02-04 16:48:07 +010028struct cpa_data {
Shaohua Lid75586a2008-08-21 10:46:06 +080029 unsigned long *vaddr;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010030 pgprot_t mask_set;
31 pgprot_t mask_clr;
Thomas Gleixner65e074d2008-02-04 16:48:07 +010032 int numpages;
Shaohua Lid75586a2008-08-21 10:46:06 +080033 int flags;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010034 unsigned long pfn;
Andi Kleenc9caa022008-03-12 03:53:29 +010035 unsigned force_split : 1;
Shaohua Lid75586a2008-08-21 10:46:06 +080036 int curpage;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -070037 struct page **pages;
Thomas Gleixner72e458d2008-02-04 16:48:07 +010038};
39
Suresh Siddhaad5ca552008-09-23 14:00:42 -070040/*
41 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
42 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
43 * entries change the page attribute in parallel to some other cpu
44 * splitting a large page entry along with changing the attribute.
45 */
46static DEFINE_SPINLOCK(cpa_lock);
47
Shaohua Lid75586a2008-08-21 10:46:06 +080048#define CPA_FLUSHTLB 1
49#define CPA_ARRAY 2
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -070050#define CPA_PAGES_ARRAY 4
Shaohua Lid75586a2008-08-21 10:46:06 +080051
Thomas Gleixner65280e62008-05-05 16:35:21 +020052#ifdef CONFIG_PROC_FS
Andi Kleence0c0e52008-05-02 11:46:49 +020053static unsigned long direct_pages_count[PG_LEVEL_NUM];
54
Thomas Gleixner65280e62008-05-05 16:35:21 +020055void update_page_count(int level, unsigned long pages)
Andi Kleence0c0e52008-05-02 11:46:49 +020056{
Andi Kleence0c0e52008-05-02 11:46:49 +020057 unsigned long flags;
Thomas Gleixner65280e62008-05-05 16:35:21 +020058
Andi Kleence0c0e52008-05-02 11:46:49 +020059 /* Protect against CPA */
60 spin_lock_irqsave(&pgd_lock, flags);
61 direct_pages_count[level] += pages;
62 spin_unlock_irqrestore(&pgd_lock, flags);
Andi Kleence0c0e52008-05-02 11:46:49 +020063}
64
Thomas Gleixner65280e62008-05-05 16:35:21 +020065static void split_page_count(int level)
66{
67 direct_pages_count[level]--;
68 direct_pages_count[level - 1] += PTRS_PER_PTE;
69}
70
Alexey Dobriyane1759c22008-10-15 23:50:22 +040071void arch_report_meminfo(struct seq_file *m)
Thomas Gleixner65280e62008-05-05 16:35:21 +020072{
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000073 seq_printf(m, "DirectMap4k: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010074 direct_pages_count[PG_LEVEL_4K] << 2);
75#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000076 seq_printf(m, "DirectMap2M: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010077 direct_pages_count[PG_LEVEL_2M] << 11);
78#else
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000079 seq_printf(m, "DirectMap4M: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010080 direct_pages_count[PG_LEVEL_2M] << 12);
81#endif
Thomas Gleixner65280e62008-05-05 16:35:21 +020082#ifdef CONFIG_X86_64
Hugh Dickinsa06de632008-08-15 13:58:32 +010083 if (direct_gbpages)
Hugh Dickinsb9c3bfc2008-11-06 12:05:40 +000084 seq_printf(m, "DirectMap1G: %8lu kB\n",
Hugh Dickinsa06de632008-08-15 13:58:32 +010085 direct_pages_count[PG_LEVEL_1G] << 20);
Thomas Gleixner65280e62008-05-05 16:35:21 +020086#endif
Thomas Gleixner65280e62008-05-05 16:35:21 +020087}
88#else
89static inline void split_page_count(int level) { }
90#endif
91
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +010092#ifdef CONFIG_X86_64
93
94static inline unsigned long highmap_start_pfn(void)
95{
96 return __pa(_text) >> PAGE_SHIFT;
97}
98
99static inline unsigned long highmap_end_pfn(void)
100{
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -0800101 return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100102}
103
104#endif
105
Ingo Molnar92cb54a2008-02-13 14:37:52 +0100106#ifdef CONFIG_DEBUG_PAGEALLOC
107# define debug_pagealloc 1
108#else
109# define debug_pagealloc 0
110#endif
111
Arjan van de Vened724be2008-01-30 13:34:04 +0100112static inline int
113within(unsigned long addr, unsigned long start, unsigned long end)
Ingo Molnar687c4822008-01-30 13:34:04 +0100114{
Arjan van de Vened724be2008-01-30 13:34:04 +0100115 return addr >= start && addr < end;
116}
117
118/*
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100119 * Flushing functions
120 */
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100121
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100122/**
123 * clflush_cache_range - flush a cache range with clflush
124 * @addr: virtual start address
125 * @size: number of bytes to flush
126 *
127 * clflush is an unordered instruction which needs fencing with mfence
128 * to avoid ordering issues.
129 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100130void clflush_cache_range(void *vaddr, unsigned int size)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100131{
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100132 void *vend = vaddr + size - 1;
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100133
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100134 mb();
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100135
136 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
137 clflush(vaddr);
138 /*
139 * Flush any possible final partial cacheline:
140 */
141 clflush(vend);
142
Thomas Gleixnercd8ddf12008-01-30 13:34:08 +0100143 mb();
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100144}
145
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100146static void __cpa_flush_all(void *arg)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100147{
Andi Kleen6bb83832008-02-04 16:48:06 +0100148 unsigned long cache = (unsigned long)arg;
149
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100150 /*
151 * Flush all to work around Errata in early athlons regarding
152 * large page flushing.
153 */
154 __flush_tlb_all();
155
venkatesh.pallipadi@intel.com0b827532009-05-22 13:23:37 -0700156 if (cache && boot_cpu_data.x86 >= 4)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100157 wbinvd();
158}
159
Andi Kleen6bb83832008-02-04 16:48:06 +0100160static void cpa_flush_all(unsigned long cache)
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100161{
162 BUG_ON(irqs_disabled());
163
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200164 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100165}
166
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100167static void __cpa_flush_range(void *arg)
168{
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100169 /*
170 * We could optimize that further and do individual per page
171 * tlb invalidates for a low number of pages. Caveat: we must
172 * flush the high aliases on 64bit as well.
173 */
174 __flush_tlb_all();
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100175}
176
Andi Kleen6bb83832008-02-04 16:48:06 +0100177static void cpa_flush_range(unsigned long start, int numpages, int cache)
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100178{
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100179 unsigned int i, level;
180 unsigned long addr;
181
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100182 BUG_ON(irqs_disabled());
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100183 WARN_ON(PAGE_ALIGN(start) != start);
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100184
Jens Axboe15c8b6c2008-05-09 09:39:44 +0200185 on_each_cpu(__cpa_flush_range, NULL, 1);
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100186
Andi Kleen6bb83832008-02-04 16:48:06 +0100187 if (!cache)
188 return;
189
Thomas Gleixner3b233e52008-01-30 13:34:08 +0100190 /*
191 * We only need to flush on one CPU,
192 * clflush is a MESI-coherent instruction that
193 * will cause all other CPUs to flush the same
194 * cachelines:
195 */
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100196 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
197 pte_t *pte = lookup_address(addr, &level);
198
199 /*
200 * Only flush present addresses:
201 */
Thomas Gleixner7bfb72e2008-02-04 16:48:08 +0100202 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
Ingo Molnar4c61afc2008-01-30 13:34:09 +0100203 clflush_cache_range((void *) addr, PAGE_SIZE);
204 }
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100205}
206
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700207static void cpa_flush_array(unsigned long *start, int numpages, int cache,
208 int in_flags, struct page **pages)
Shaohua Lid75586a2008-08-21 10:46:06 +0800209{
210 unsigned int i, level;
Pallipadi, Venkatesh21717872009-05-26 10:33:35 -0700211 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
Shaohua Lid75586a2008-08-21 10:46:06 +0800212
213 BUG_ON(irqs_disabled());
214
Pallipadi, Venkatesh21717872009-05-26 10:33:35 -0700215 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
Shaohua Lid75586a2008-08-21 10:46:06 +0800216
Pallipadi, Venkatesh21717872009-05-26 10:33:35 -0700217 if (!cache || do_wbinvd)
Shaohua Lid75586a2008-08-21 10:46:06 +0800218 return;
219
Shaohua Lid75586a2008-08-21 10:46:06 +0800220 /*
221 * We only need to flush on one CPU,
222 * clflush is a MESI-coherent instruction that
223 * will cause all other CPUs to flush the same
224 * cachelines:
225 */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700226 for (i = 0; i < numpages; i++) {
227 unsigned long addr;
228 pte_t *pte;
229
230 if (in_flags & CPA_PAGES_ARRAY)
231 addr = (unsigned long)page_address(pages[i]);
232 else
233 addr = start[i];
234
235 pte = lookup_address(addr, &level);
Shaohua Lid75586a2008-08-21 10:46:06 +0800236
237 /*
238 * Only flush present addresses:
239 */
240 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700241 clflush_cache_range((void *)addr, PAGE_SIZE);
Shaohua Lid75586a2008-08-21 10:46:06 +0800242 }
243}
244
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +0100245/*
Arjan van de Vened724be2008-01-30 13:34:04 +0100246 * Certain areas of memory on x86 require very specific protection flags,
247 * for example the BIOS area or kernel text. Callers don't always get this
248 * right (again, ioremap() on BIOS memory is not uncommon) so this function
249 * checks and fixes these known static required protection bits.
250 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100251static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
252 unsigned long pfn)
Arjan van de Vened724be2008-01-30 13:34:04 +0100253{
254 pgprot_t forbidden = __pgprot(0);
255
Ingo Molnar687c4822008-01-30 13:34:04 +0100256 /*
Arjan van de Vened724be2008-01-30 13:34:04 +0100257 * The BIOS area between 640k and 1Mb needs to be executable for
258 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
Ingo Molnar687c4822008-01-30 13:34:04 +0100259 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100260 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
Arjan van de Vened724be2008-01-30 13:34:04 +0100261 pgprot_val(forbidden) |= _PAGE_NX;
262
263 /*
264 * The kernel text needs to be executable for obvious reasons
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100265 * Does not cover __inittext since that is gone later on. On
266 * 64bit we do not enforce !NX on the low mapping
Arjan van de Vened724be2008-01-30 13:34:04 +0100267 */
268 if (within(address, (unsigned long)_text, (unsigned long)_etext))
269 pgprot_val(forbidden) |= _PAGE_NX;
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100270
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100271 /*
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100272 * The .rodata section needs to be read-only. Using the pfn
273 * catches all aliases.
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100274 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100275 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
276 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
Arjan van de Vencc0f21b2008-02-04 16:48:05 +0100277 pgprot_val(forbidden) |= _PAGE_RW;
Arjan van de Vened724be2008-01-30 13:34:04 +0100278
279 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
Ingo Molnar687c4822008-01-30 13:34:04 +0100280
281 return prot;
282}
283
Thomas Gleixner9a14aef2008-02-04 16:48:07 +0100284/*
285 * Lookup the page table entry for a virtual address. Return a pointer
286 * to the entry and the level of the mapping.
287 *
288 * Note: We return pud and pmd either when the entry is marked large
289 * or when the present bit is not set. Otherwise we would return a
290 * pointer to a nonexisting mapping.
291 */
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100292pte_t *lookup_address(unsigned long address, unsigned int *level)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100293{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 pgd_t *pgd = pgd_offset_k(address);
295 pud_t *pud;
296 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100297
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100298 *level = PG_LEVEL_NONE;
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 if (pgd_none(*pgd))
301 return NULL;
Ingo Molnar9df84992008-02-04 16:48:09 +0100302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 pud = pud_offset(pgd, address);
304 if (pud_none(*pud))
305 return NULL;
Andi Kleenc2f71ee2008-02-04 16:48:09 +0100306
307 *level = PG_LEVEL_1G;
308 if (pud_large(*pud) || !pud_present(*pud))
309 return (pte_t *)pud;
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 pmd = pmd_offset(pud, address);
312 if (pmd_none(*pmd))
313 return NULL;
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100314
315 *level = PG_LEVEL_2M;
Thomas Gleixner9a14aef2008-02-04 16:48:07 +0100316 if (pmd_large(*pmd) || !pmd_present(*pmd))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 return (pte_t *)pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100319 *level = PG_LEVEL_4K;
Ingo Molnar9df84992008-02-04 16:48:09 +0100320
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100321 return pte_offset_kernel(pmd, address);
322}
Pekka Paalanen75bb8832008-05-12 21:20:56 +0200323EXPORT_SYMBOL_GPL(lookup_address);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100324
Ingo Molnar9df84992008-02-04 16:48:09 +0100325/*
326 * Set the new pmd in all the pgds we know about:
327 */
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100328static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100329{
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100330 /* change init_mm */
331 set_pte_atomic(kpte, pte);
Ingo Molnar44af6c42008-01-30 13:34:03 +0100332#ifdef CONFIG_X86_32
Ingo Molnare4b71dc2008-01-30 13:34:04 +0100333 if (!SHARED_KERNEL_PMD) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100334 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100336 list_for_each_entry(page, &pgd_list, lru) {
Ingo Molnar44af6c42008-01-30 13:34:03 +0100337 pgd_t *pgd;
338 pud_t *pud;
339 pmd_t *pmd;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100340
Ingo Molnar44af6c42008-01-30 13:34:03 +0100341 pgd = (pgd_t *)page_address(page) + pgd_index(address);
342 pud = pud_offset(pgd, address);
343 pmd = pmd_offset(pud, address);
344 set_pte_atomic((pte_t *)pmd, pte);
345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100347#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Ingo Molnar9df84992008-02-04 16:48:09 +0100350static int
351try_preserve_large_page(pte_t *kpte, unsigned long address,
352 struct cpa_data *cpa)
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100353{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100354 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100355 pte_t new_pte, old_pte, *tmp;
356 pgprot_t old_prot, new_prot;
Thomas Gleixnerfac84932008-02-09 23:24:09 +0100357 int i, do_split = 1;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100358 unsigned int level;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100359
Andi Kleenc9caa022008-03-12 03:53:29 +0100360 if (cpa->force_split)
361 return 1;
362
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100363 spin_lock_irqsave(&pgd_lock, flags);
364 /*
365 * Check for races, another CPU might have split this page
366 * up already:
367 */
368 tmp = lookup_address(address, &level);
369 if (tmp != kpte)
370 goto out_unlock;
371
372 switch (level) {
373 case PG_LEVEL_2M:
Andi Kleen31422c52008-02-04 16:48:08 +0100374 psize = PMD_PAGE_SIZE;
375 pmask = PMD_PAGE_MASK;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100376 break;
Andi Kleenf07333f2008-02-04 16:48:09 +0100377#ifdef CONFIG_X86_64
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100378 case PG_LEVEL_1G:
Andi Kleen5d3c8b22008-02-13 16:20:35 +0100379 psize = PUD_PAGE_SIZE;
380 pmask = PUD_PAGE_MASK;
Andi Kleenf07333f2008-02-04 16:48:09 +0100381 break;
382#endif
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100383 default:
Ingo Molnarbeaff632008-02-04 16:48:09 +0100384 do_split = -EINVAL;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100385 goto out_unlock;
386 }
387
388 /*
389 * Calculate the number of pages, which fit into this large
390 * page starting at address:
391 */
392 nextpage_addr = (address + psize) & pmask;
393 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100394 if (numpages < cpa->numpages)
395 cpa->numpages = numpages;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100396
397 /*
398 * We are safe now. Check whether the new pgprot is the same:
399 */
400 old_pte = *kpte;
401 old_prot = new_prot = pte_pgprot(old_pte);
402
403 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
404 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100405
406 /*
407 * old_pte points to the large page base address. So we need
408 * to add the offset of the virtual address:
409 */
410 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
411 cpa->pfn = pfn;
412
413 new_prot = static_protections(new_prot, address, pfn);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100414
415 /*
Thomas Gleixnerfac84932008-02-09 23:24:09 +0100416 * We need to check the full range, whether
417 * static_protection() requires a different pgprot for one of
418 * the pages in the range we try to preserve:
419 */
420 addr = address + PAGE_SIZE;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100421 pfn++;
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100422 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100423 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
Thomas Gleixnerfac84932008-02-09 23:24:09 +0100424
425 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
426 goto out_unlock;
427 }
428
429 /*
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100430 * If there are no changes, return. maxpages has been updated
431 * above:
432 */
433 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
Ingo Molnarbeaff632008-02-04 16:48:09 +0100434 do_split = 0;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100435 goto out_unlock;
436 }
437
438 /*
439 * We need to change the attributes. Check, whether we can
440 * change the large page in one go. We request a split, when
441 * the address is not aligned and the number of pages is
442 * smaller than the number of pages in the large page. Note
443 * that we limited the number of possible pages already to
444 * the number of pages in the large page.
445 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100446 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100447 /*
448 * The address is aligned and the number of pages
449 * covers the full page.
450 */
451 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
452 __set_pmd_pte(kpte, address, new_pte);
Shaohua Lid75586a2008-08-21 10:46:06 +0800453 cpa->flags |= CPA_FLUSHTLB;
Ingo Molnarbeaff632008-02-04 16:48:09 +0100454 do_split = 0;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100455 }
456
457out_unlock:
458 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnar9df84992008-02-04 16:48:09 +0100459
Ingo Molnarbeaff632008-02-04 16:48:09 +0100460 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100461}
462
Ingo Molnar7afe15b2008-01-30 13:33:57 +0100463static int split_large_page(pte_t *kpte, unsigned long address)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100464{
Thomas Gleixner7b610ee2008-02-04 16:48:10 +0100465 unsigned long flags, pfn, pfninc = 1;
Ingo Molnar86f03982008-01-30 13:34:09 +0100466 unsigned int i, level;
Ingo Molnar9df84992008-02-04 16:48:09 +0100467 pte_t *pbase, *tmp;
468 pgprot_t ref_prot;
Suresh Siddhaad5ca552008-09-23 14:00:42 -0700469 struct page *base;
470
471 if (!debug_pagealloc)
472 spin_unlock(&cpa_lock);
473 base = alloc_pages(GFP_KERNEL, 0);
474 if (!debug_pagealloc)
475 spin_lock(&cpa_lock);
Suresh Siddha8311eb82008-09-23 14:00:41 -0700476 if (!base)
477 return -ENOMEM;
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100478
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100479 spin_lock_irqsave(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100480 /*
481 * Check for races, another CPU might have split this page
482 * up for us already:
483 */
484 tmp = lookup_address(address, &level);
Ingo Molnar6ce9fc12008-02-04 16:48:08 +0100485 if (tmp != kpte)
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100486 goto out_unlock;
487
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100488 pbase = (pte_t *)page_address(base);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700489 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
Thomas Gleixner07cf89c2008-02-04 16:48:08 +0100490 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
Ingo Molnar7a5714e2009-02-20 17:44:21 +0100491 /*
492 * If we ever want to utilize the PAT bit, we need to
493 * update this function to make sure it's converted from
494 * bit 12 to bit 7 when we cross from the 2MB level to
495 * the 4K level:
496 */
497 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100498
Andi Kleenf07333f2008-02-04 16:48:09 +0100499#ifdef CONFIG_X86_64
500 if (level == PG_LEVEL_1G) {
501 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
502 pgprot_val(ref_prot) |= _PAGE_PSE;
Andi Kleenf07333f2008-02-04 16:48:09 +0100503 }
504#endif
505
Thomas Gleixner63c1dcf2008-02-04 16:48:05 +0100506 /*
507 * Get the target pfn from the original entry:
508 */
509 pfn = pte_pfn(*kpte);
Andi Kleenf07333f2008-02-04 16:48:09 +0100510 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
Thomas Gleixner63c1dcf2008-02-04 16:48:05 +0100511 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100512
Andi Kleence0c0e52008-05-02 11:46:49 +0200513 if (address >= (unsigned long)__va(0) &&
Yinghai Luf361a452008-07-10 20:38:26 -0700514 address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
515 split_page_count(level);
516
517#ifdef CONFIG_X86_64
518 if (address >= (unsigned long)__va(1UL<<32) &&
Thomas Gleixner65280e62008-05-05 16:35:21 +0200519 address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
520 split_page_count(level);
Yinghai Luf361a452008-07-10 20:38:26 -0700521#endif
Andi Kleence0c0e52008-05-02 11:46:49 +0200522
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100523 /*
Ingo Molnar07a66d72009-02-20 08:04:13 +0100524 * Install the new, split up pagetable.
Huang, Ying4c881ca2008-01-30 13:34:04 +0100525 *
Ingo Molnar07a66d72009-02-20 08:04:13 +0100526 * We use the standard kernel pagetable protections for the new
527 * pagetable protections, the actual ptes set above control the
528 * primary protection behavior:
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100529 */
Ingo Molnar07a66d72009-02-20 08:04:13 +0100530 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
Ingo Molnar211b3d02009-03-10 22:31:03 +0100531
532 /*
533 * Intel Atom errata AAH41 workaround.
534 *
535 * The real fix should be in hw or in a microcode update, but
536 * we also probabilistically try to reduce the window of having
537 * a large TLB mixed with 4K TLBs while instruction fetches are
538 * going on.
539 */
540 __flush_tlb_all();
541
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100542 base = NULL;
543
544out_unlock:
Thomas Gleixnereb5b5f02008-02-09 23:24:09 +0100545 /*
546 * If we dropped out via the lookup_address check under
547 * pgd_lock then stick the page back into the pool:
548 */
Suresh Siddha8311eb82008-09-23 14:00:41 -0700549 if (base)
550 __free_page(base);
Ingo Molnar9a3dc782008-01-30 13:33:57 +0100551 spin_unlock_irqrestore(&pgd_lock, flags);
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100552
Ingo Molnarbb5c2db2008-01-30 13:33:56 +0100553 return 0;
554}
555
Suresh Siddhaa1e46212009-01-20 14:20:21 -0800556static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
557 int primary)
558{
559 /*
560 * Ignore all non primary paths.
561 */
562 if (!primary)
563 return 0;
564
565 /*
566 * Ignore the NULL PTE for kernel identity mapping, as it is expected
567 * to have holes.
568 * Also set numpages to '1' indicating that we processed cpa req for
569 * one virtual address page and its pfn. TBD: numpages can be set based
570 * on the initial value and the level returned by lookup_address().
571 */
572 if (within(vaddr, PAGE_OFFSET,
573 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
574 cpa->numpages = 1;
575 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
576 return 0;
577 } else {
578 WARN(1, KERN_WARNING "CPA: called for zero pte. "
579 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
580 *cpa->vaddr);
581
582 return -EFAULT;
583 }
584}
585
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100586static int __change_page_attr(struct cpa_data *cpa, int primary)
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100587{
Shaohua Lid75586a2008-08-21 10:46:06 +0800588 unsigned long address;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100589 int do_split, err;
590 unsigned int level;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100591 pte_t *kpte, old_pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700593 if (cpa->flags & CPA_PAGES_ARRAY)
594 address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
595 else if (cpa->flags & CPA_ARRAY)
Shaohua Lid75586a2008-08-21 10:46:06 +0800596 address = cpa->vaddr[cpa->curpage];
597 else
598 address = *cpa->vaddr;
Ingo Molnar97f99fe2008-01-30 13:33:55 +0100599repeat:
Ingo Molnarf0646e42008-01-30 13:33:43 +0100600 kpte = lookup_address(address, &level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 if (!kpte)
Suresh Siddhaa1e46212009-01-20 14:20:21 -0800602 return __cpa_process_fault(cpa, address, primary);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100603
604 old_pte = *kpte;
Suresh Siddhaa1e46212009-01-20 14:20:21 -0800605 if (!pte_val(old_pte))
606 return __cpa_process_fault(cpa, address, primary);
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100607
Thomas Gleixner30551bb2008-01-30 13:34:04 +0100608 if (level == PG_LEVEL_4K) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100609 pte_t new_pte;
Arjan van de Ven626c2c92008-02-04 16:48:05 +0100610 pgprot_t new_prot = pte_pgprot(old_pte);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100611 unsigned long pfn = pte_pfn(old_pte);
Thomas Gleixnera72a08a2008-01-30 13:34:07 +0100612
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100613 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
614 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
Ingo Molnar86f03982008-01-30 13:34:09 +0100615
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100616 new_prot = static_protections(new_prot, address, pfn);
Ingo Molnar86f03982008-01-30 13:34:09 +0100617
Arjan van de Ven626c2c92008-02-04 16:48:05 +0100618 /*
619 * We need to keep the pfn from the existing PTE,
620 * after all we're only going to change it's attributes
621 * not the memory it points to
622 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100623 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
624 cpa->pfn = pfn;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100625 /*
626 * Do we really change anything ?
627 */
628 if (pte_val(old_pte) != pte_val(new_pte)) {
629 set_pte_atomic(kpte, new_pte);
Shaohua Lid75586a2008-08-21 10:46:06 +0800630 cpa->flags |= CPA_FLUSHTLB;
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100631 }
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100632 cpa->numpages = 1;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100633 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100635
636 /*
637 * Check, whether we can keep the large page intact
638 * and just change the pte:
639 */
Ingo Molnarbeaff632008-02-04 16:48:09 +0100640 do_split = try_preserve_large_page(kpte, address, cpa);
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100641 /*
642 * When the range fits into the existing large page,
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100643 * return. cp->numpages and cpa->tlbflush have been updated in
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100644 * try_large_page:
645 */
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100646 if (do_split <= 0)
647 return do_split;
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100648
649 /*
650 * We have to split the large page:
651 */
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100652 err = split_large_page(kpte, address);
653 if (!err) {
Suresh Siddhaad5ca552008-09-23 14:00:42 -0700654 /*
655 * Do a global flush tlb after splitting the large page
656 * and before we do the actual change page attribute in the PTE.
657 *
658 * With out this, we violate the TLB application note, that says
659 * "The TLBs may contain both ordinary and large-page
660 * translations for a 4-KByte range of linear addresses. This
661 * may occur if software modifies the paging structures so that
662 * the page size used for the address range changes. If the two
663 * translations differ with respect to page frame or attributes
664 * (e.g., permissions), processor behavior is undefined and may
665 * be implementation-specific."
666 *
667 * We do this global tlb flush inside the cpa_lock, so that we
668 * don't allow any other cpu, with stale tlb entries change the
669 * page attribute in parallel, that also falls into the
670 * just split large page entry.
671 */
672 flush_tlb_all();
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100673 goto repeat;
674 }
Ingo Molnarbeaff632008-02-04 16:48:09 +0100675
Ingo Molnar87f7f8f2008-02-04 16:48:10 +0100676 return err;
Ingo Molnar9f4c8152008-01-30 13:33:41 +0100677}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100679static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
680
681static int cpa_process_alias(struct cpa_data *cpa)
Ingo Molnar44af6c42008-01-30 13:34:03 +0100682{
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100683 struct cpa_data alias_cpa;
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100684 int ret = 0;
Shaohua Lid75586a2008-08-21 10:46:06 +0800685 unsigned long temp_cpa_vaddr, vaddr;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100686
Yinghai Lu965194c2008-07-12 14:31:28 -0700687 if (cpa->pfn >= max_pfn_mapped)
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100688 return 0;
689
Yinghai Luf361a452008-07-10 20:38:26 -0700690#ifdef CONFIG_X86_64
Yinghai Lu965194c2008-07-12 14:31:28 -0700691 if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
Yinghai Luf361a452008-07-10 20:38:26 -0700692 return 0;
693#endif
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100694 /*
695 * No need to redo, when the primary call touched the direct
696 * mapping already:
697 */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700698 if (cpa->flags & CPA_PAGES_ARRAY)
699 vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
700 else if (cpa->flags & CPA_ARRAY)
Shaohua Lid75586a2008-08-21 10:46:06 +0800701 vaddr = cpa->vaddr[cpa->curpage];
702 else
703 vaddr = *cpa->vaddr;
704
705 if (!(within(vaddr, PAGE_OFFSET,
Suresh Siddhaa1e46212009-01-20 14:20:21 -0800706 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100707
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100708 alias_cpa = *cpa;
Shaohua Lid75586a2008-08-21 10:46:06 +0800709 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
710 alias_cpa.vaddr = &temp_cpa_vaddr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700711 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
Shaohua Lid75586a2008-08-21 10:46:06 +0800712
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100713
714 ret = __change_page_attr_set_clr(&alias_cpa, 0);
715 }
Ingo Molnar44af6c42008-01-30 13:34:03 +0100716
Arjan van de Ven488fd992008-01-30 13:34:07 +0100717#ifdef CONFIG_X86_64
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100718 if (ret)
719 return ret;
Thomas Gleixner08797502008-01-30 13:34:09 +0100720 /*
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100721 * No need to redo, when the primary call touched the high
722 * mapping already:
723 */
Jeremy Fitzhardinge93dbda72009-02-26 17:35:44 -0800724 if (within(vaddr, (unsigned long) _text, _brk_end))
Thomas Gleixnerf34b4392008-02-15 22:17:57 +0100725 return 0;
726
727 /*
Thomas Gleixner08797502008-01-30 13:34:09 +0100728 * If the physical address is inside the kernel map, we need
729 * to touch the high mapped kernel as well:
730 */
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100731 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
732 return 0;
Thomas Gleixner08797502008-01-30 13:34:09 +0100733
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100734 alias_cpa = *cpa;
Shaohua Lid75586a2008-08-21 10:46:06 +0800735 temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
736 alias_cpa.vaddr = &temp_cpa_vaddr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700737 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100738
739 /*
740 * The high mapping range is imprecise, so ignore the return value.
741 */
742 __change_page_attr_set_clr(&alias_cpa, 0);
Thomas Gleixner08797502008-01-30 13:34:09 +0100743#endif
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100744 return ret;
Ingo Molnar44af6c42008-01-30 13:34:03 +0100745}
746
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100747static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
Thomas Gleixnerff314522008-01-30 13:34:08 +0100748{
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100749 int ret, numpages = cpa->numpages;
Thomas Gleixnerff314522008-01-30 13:34:08 +0100750
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100751 while (numpages) {
752 /*
753 * Store the remaining nr of pages for the large page
754 * preservation check.
755 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100756 cpa->numpages = numpages;
Shaohua Lid75586a2008-08-21 10:46:06 +0800757 /* for array changes, we can't use large page */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700758 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
Shaohua Lid75586a2008-08-21 10:46:06 +0800759 cpa->numpages = 1;
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100760
Suresh Siddhaad5ca552008-09-23 14:00:42 -0700761 if (!debug_pagealloc)
762 spin_lock(&cpa_lock);
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100763 ret = __change_page_attr(cpa, checkalias);
Suresh Siddhaad5ca552008-09-23 14:00:42 -0700764 if (!debug_pagealloc)
765 spin_unlock(&cpa_lock);
Thomas Gleixnerff314522008-01-30 13:34:08 +0100766 if (ret)
767 return ret;
Thomas Gleixnerff314522008-01-30 13:34:08 +0100768
Thomas Gleixnerc31c7d42008-02-18 20:54:14 +0100769 if (checkalias) {
770 ret = cpa_process_alias(cpa);
771 if (ret)
772 return ret;
773 }
774
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100775 /*
776 * Adjust the number of pages with the result of the
777 * CPA operation. Either a large page has been
778 * preserved or a single page update happened.
779 */
Rafael J. Wysocki9b5cf482008-03-03 01:17:37 +0100780 BUG_ON(cpa->numpages > numpages);
781 numpages -= cpa->numpages;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700782 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
Shaohua Lid75586a2008-08-21 10:46:06 +0800783 cpa->curpage++;
784 else
785 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
786
Thomas Gleixner65e074d2008-02-04 16:48:07 +0100787 }
Thomas Gleixnerff314522008-01-30 13:34:08 +0100788 return 0;
789}
790
Andi Kleen6bb83832008-02-04 16:48:06 +0100791static inline int cache_attr(pgprot_t attr)
792{
793 return pgprot_val(attr) &
794 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
795}
796
Shaohua Lid75586a2008-08-21 10:46:06 +0800797static int change_page_attr_set_clr(unsigned long *addr, int numpages,
Andi Kleenc9caa022008-03-12 03:53:29 +0100798 pgprot_t mask_set, pgprot_t mask_clr,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700799 int force_split, int in_flag,
800 struct page **pages)
Thomas Gleixnerff314522008-01-30 13:34:08 +0100801{
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100802 struct cpa_data cpa;
Ingo Molnarcacf8902008-08-21 13:46:33 +0200803 int ret, cache, checkalias;
Thomas Gleixner331e4062008-02-04 16:48:06 +0100804
805 /*
806 * Check, if we are requested to change a not supported
807 * feature:
808 */
809 mask_set = canon_pgprot(mask_set);
810 mask_clr = canon_pgprot(mask_clr);
Andi Kleenc9caa022008-03-12 03:53:29 +0100811 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
Thomas Gleixner331e4062008-02-04 16:48:06 +0100812 return 0;
813
Thomas Gleixner69b14152008-02-13 11:04:50 +0100814 /* Ensure we are PAGE_SIZE aligned */
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700815 if (in_flag & CPA_ARRAY) {
Shaohua Lid75586a2008-08-21 10:46:06 +0800816 int i;
817 for (i = 0; i < numpages; i++) {
818 if (addr[i] & ~PAGE_MASK) {
819 addr[i] &= PAGE_MASK;
820 WARN_ON_ONCE(1);
821 }
822 }
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700823 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
824 /*
825 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
826 * No need to cehck in that case
827 */
828 if (*addr & ~PAGE_MASK) {
829 *addr &= PAGE_MASK;
830 /*
831 * People should not be passing in unaligned addresses:
832 */
833 WARN_ON_ONCE(1);
834 }
Thomas Gleixner69b14152008-02-13 11:04:50 +0100835 }
836
Nick Piggin5843d9a2008-08-01 03:15:21 +0200837 /* Must avoid aliasing mappings in the highmem code */
838 kmap_flush_unused();
839
Nick Piggindb64fe02008-10-18 20:27:03 -0700840 vm_unmap_aliases();
841
Thomas Gleixner7ad9de62009-02-12 21:16:09 +0100842 /*
843 * If we're called with lazy mmu updates enabled, the
844 * in-memory pte state may be stale. Flush pending updates to
845 * bring them up to date.
846 */
847 arch_flush_lazy_mmu_mode();
848
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100849 cpa.vaddr = addr;
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700850 cpa.pages = pages;
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100851 cpa.numpages = numpages;
852 cpa.mask_set = mask_set;
853 cpa.mask_clr = mask_clr;
Shaohua Lid75586a2008-08-21 10:46:06 +0800854 cpa.flags = 0;
855 cpa.curpage = 0;
Andi Kleenc9caa022008-03-12 03:53:29 +0100856 cpa.force_split = force_split;
Thomas Gleixner72e458d2008-02-04 16:48:07 +0100857
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700858 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
859 cpa.flags |= in_flag;
Shaohua Lid75586a2008-08-21 10:46:06 +0800860
Thomas Gleixneraf96e442008-02-15 21:49:46 +0100861 /* No alias checking for _NX bit modifications */
862 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
863
864 ret = __change_page_attr_set_clr(&cpa, checkalias);
Thomas Gleixnerff314522008-01-30 13:34:08 +0100865
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100866 /*
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100867 * Check whether we really changed something:
868 */
Shaohua Lid75586a2008-08-21 10:46:06 +0800869 if (!(cpa.flags & CPA_FLUSHTLB))
Shaohua Li1ac2f7d2008-08-04 14:51:24 +0800870 goto out;
Ingo Molnarcacf8902008-08-21 13:46:33 +0200871
Thomas Gleixnerf4ae5da2008-02-04 16:48:07 +0100872 /*
Andi Kleen6bb83832008-02-04 16:48:06 +0100873 * No need to flush, when we did not set any of the caching
874 * attributes:
875 */
876 cache = cache_attr(mask_set);
877
878 /*
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100879 * On success we use clflush, when the CPU supports it to
880 * avoid the wbindv. If the CPU does not support it and in the
Thomas Gleixneraf1e6842008-01-30 13:34:08 +0100881 * error case we fall back to cpa_flush_all (which uses
Thomas Gleixner57a6a462008-01-30 13:34:08 +0100882 * wbindv):
883 */
Shaohua Lid75586a2008-08-21 10:46:06 +0800884 if (!ret && cpu_has_clflush) {
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700885 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
886 cpa_flush_array(addr, numpages, cache,
887 cpa.flags, pages);
888 } else
Shaohua Lid75586a2008-08-21 10:46:06 +0800889 cpa_flush_range(*addr, numpages, cache);
890 } else
Andi Kleen6bb83832008-02-04 16:48:06 +0100891 cpa_flush_all(cache);
Ingo Molnarcacf8902008-08-21 13:46:33 +0200892
Jeremy Fitzhardinge4f06b042009-02-11 09:32:19 -0800893 /*
894 * If we've been called with lazy mmu updates enabled, then
895 * make sure that everything gets flushed out before we
896 * return.
897 */
898 arch_flush_lazy_mmu_mode();
899
Thomas Gleixner76ebd052008-02-09 23:24:09 +0100900out:
Thomas Gleixnerff314522008-01-30 13:34:08 +0100901 return ret;
902}
903
Shaohua Lid75586a2008-08-21 10:46:06 +0800904static inline int change_page_attr_set(unsigned long *addr, int numpages,
905 pgprot_t mask, int array)
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100906{
Shaohua Lid75586a2008-08-21 10:46:06 +0800907 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700908 (array ? CPA_ARRAY : 0), NULL);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100909}
910
Shaohua Lid75586a2008-08-21 10:46:06 +0800911static inline int change_page_attr_clear(unsigned long *addr, int numpages,
912 pgprot_t mask, int array)
Thomas Gleixner72932c72008-01-30 13:34:08 +0100913{
Shaohua Lid75586a2008-08-21 10:46:06 +0800914 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -0700915 (array ? CPA_ARRAY : 0), NULL);
Thomas Gleixner72932c72008-01-30 13:34:08 +0100916}
917
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -0700918static inline int cpa_set_pages_array(struct page **pages, int numpages,
919 pgprot_t mask)
920{
921 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
922 CPA_PAGES_ARRAY, pages);
923}
924
925static inline int cpa_clear_pages_array(struct page **pages, int numpages,
926 pgprot_t mask)
927{
928 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
929 CPA_PAGES_ARRAY, pages);
930}
931
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700932int _set_memory_uc(unsigned long addr, int numpages)
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100933{
Suresh Siddhade33c442008-04-25 17:07:22 -0700934 /*
935 * for now UC MINUS. see comments in ioremap_nocache()
936 */
Shaohua Lid75586a2008-08-21 10:46:06 +0800937 return change_page_attr_set(&addr, numpages,
938 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100939}
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700940
941int set_memory_uc(unsigned long addr, int numpages)
942{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700943 int ret;
944
Suresh Siddhade33c442008-04-25 17:07:22 -0700945 /*
946 * for now UC MINUS. see comments in ioremap_nocache()
947 */
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700948 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
949 _PAGE_CACHE_UC_MINUS, NULL);
950 if (ret)
951 goto out_err;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700952
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700953 ret = _set_memory_uc(addr, numpages);
954 if (ret)
955 goto out_free;
956
957 return 0;
958
959out_free:
960 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
961out_err:
962 return ret;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700963}
Arjan van de Ven75cbade2008-01-30 13:34:06 +0100964EXPORT_SYMBOL(set_memory_uc);
965
Shaohua Lid75586a2008-08-21 10:46:06 +0800966int set_memory_array_uc(unsigned long *addr, int addrinarray)
967{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700968 int i, j;
969 int ret;
970
Shaohua Lid75586a2008-08-21 10:46:06 +0800971 /*
972 * for now UC MINUS. see comments in ioremap_nocache()
973 */
974 for (i = 0; i < addrinarray; i++) {
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700975 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
976 _PAGE_CACHE_UC_MINUS, NULL);
977 if (ret)
978 goto out_free;
Shaohua Lid75586a2008-08-21 10:46:06 +0800979 }
980
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700981 ret = change_page_attr_set(addr, addrinarray,
Shaohua Lid75586a2008-08-21 10:46:06 +0800982 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700983 if (ret)
984 goto out_free;
Rene Hermanc5e147c2008-08-22 01:02:20 +0200985
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -0700986 return 0;
987
988out_free:
989 for (j = 0; j < i; j++)
990 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
991
992 return ret;
Shaohua Lid75586a2008-08-21 10:46:06 +0800993}
994EXPORT_SYMBOL(set_memory_array_uc);
995
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -0700996int _set_memory_wc(unsigned long addr, int numpages)
997{
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -0700998 int ret;
999 ret = change_page_attr_set(&addr, numpages,
1000 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1001
1002 if (!ret) {
1003 ret = change_page_attr_set(&addr, numpages,
Shaohua Lid75586a2008-08-21 10:46:06 +08001004 __pgprot(_PAGE_CACHE_WC), 0);
venkatesh.pallipadi@intel.com3869c4a2009-04-09 14:26:50 -07001005 }
1006 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001007}
1008
1009int set_memory_wc(unsigned long addr, int numpages)
1010{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001011 int ret;
1012
Andreas Herrmann499f8f82008-06-10 16:06:21 +02001013 if (!pat_enabled)
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001014 return set_memory_uc(addr, numpages);
1015
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001016 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1017 _PAGE_CACHE_WC, NULL);
1018 if (ret)
1019 goto out_err;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001020
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001021 ret = _set_memory_wc(addr, numpages);
1022 if (ret)
1023 goto out_free;
1024
1025 return 0;
1026
1027out_free:
1028 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1029out_err:
1030 return ret;
venkatesh.pallipadi@intel.comef354af2008-03-18 17:00:23 -07001031}
1032EXPORT_SYMBOL(set_memory_wc);
1033
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001034int _set_memory_wb(unsigned long addr, int numpages)
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001035{
Shaohua Lid75586a2008-08-21 10:46:06 +08001036 return change_page_attr_clear(&addr, numpages,
1037 __pgprot(_PAGE_CACHE_MASK), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001038}
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001039
1040int set_memory_wb(unsigned long addr, int numpages)
1041{
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001042 int ret;
1043
1044 ret = _set_memory_wb(addr, numpages);
1045 if (ret)
1046 return ret;
1047
venkatesh.pallipadi@intel.comc15238d2008-08-20 16:45:51 -07001048 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001049 return 0;
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -07001050}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001051EXPORT_SYMBOL(set_memory_wb);
1052
Shaohua Lid75586a2008-08-21 10:46:06 +08001053int set_memory_array_wb(unsigned long *addr, int addrinarray)
1054{
1055 int i;
venkatesh.pallipadi@intel.coma5593e02009-04-09 14:26:48 -07001056 int ret;
1057
1058 ret = change_page_attr_clear(addr, addrinarray,
1059 __pgprot(_PAGE_CACHE_MASK), 1);
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001060 if (ret)
1061 return ret;
Shaohua Lid75586a2008-08-21 10:46:06 +08001062
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001063 for (i = 0; i < addrinarray; i++)
1064 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
Rene Hermanc5e147c2008-08-22 01:02:20 +02001065
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001066 return 0;
Shaohua Lid75586a2008-08-21 10:46:06 +08001067}
1068EXPORT_SYMBOL(set_memory_array_wb);
1069
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001070int set_memory_x(unsigned long addr, int numpages)
1071{
Shaohua Lid75586a2008-08-21 10:46:06 +08001072 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001073}
1074EXPORT_SYMBOL(set_memory_x);
1075
1076int set_memory_nx(unsigned long addr, int numpages)
1077{
Shaohua Lid75586a2008-08-21 10:46:06 +08001078 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001079}
1080EXPORT_SYMBOL(set_memory_nx);
1081
1082int set_memory_ro(unsigned long addr, int numpages)
1083{
Shaohua Lid75586a2008-08-21 10:46:06 +08001084 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001085}
Bruce Allana03352d2008-09-29 20:19:22 -07001086EXPORT_SYMBOL_GPL(set_memory_ro);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001087
1088int set_memory_rw(unsigned long addr, int numpages)
1089{
Shaohua Lid75586a2008-08-21 10:46:06 +08001090 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001091}
Bruce Allana03352d2008-09-29 20:19:22 -07001092EXPORT_SYMBOL_GPL(set_memory_rw);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01001093
1094int set_memory_np(unsigned long addr, int numpages)
1095{
Shaohua Lid75586a2008-08-21 10:46:06 +08001096 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01001097}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001098
Andi Kleenc9caa022008-03-12 03:53:29 +01001099int set_memory_4k(unsigned long addr, int numpages)
1100{
Shaohua Lid75586a2008-08-21 10:46:06 +08001101 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
venkatesh.pallipadi@intel.com9ae28472009-03-19 14:51:14 -07001102 __pgprot(0), 1, 0, NULL);
Andi Kleenc9caa022008-03-12 03:53:29 +01001103}
1104
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001105int set_pages_uc(struct page *page, int numpages)
1106{
1107 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001108
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01001109 return set_memory_uc(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001110}
1111EXPORT_SYMBOL(set_pages_uc);
1112
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07001113int set_pages_array_uc(struct page **pages, int addrinarray)
1114{
1115 unsigned long start;
1116 unsigned long end;
1117 int i;
1118 int free_idx;
1119
1120 for (i = 0; i < addrinarray; i++) {
1121 start = (unsigned long)page_address(pages[i]);
1122 end = start + PAGE_SIZE;
1123 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
1124 goto err_out;
1125 }
1126
1127 if (cpa_set_pages_array(pages, addrinarray,
1128 __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
1129 return 0; /* Success */
1130 }
1131err_out:
1132 free_idx = i;
1133 for (i = 0; i < free_idx; i++) {
1134 start = (unsigned long)page_address(pages[i]);
1135 end = start + PAGE_SIZE;
1136 free_memtype(start, end);
1137 }
1138 return -EINVAL;
1139}
1140EXPORT_SYMBOL(set_pages_array_uc);
1141
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001142int set_pages_wb(struct page *page, int numpages)
1143{
1144 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001145
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01001146 return set_memory_wb(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001147}
1148EXPORT_SYMBOL(set_pages_wb);
1149
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07001150int set_pages_array_wb(struct page **pages, int addrinarray)
1151{
1152 int retval;
1153 unsigned long start;
1154 unsigned long end;
1155 int i;
1156
1157 retval = cpa_clear_pages_array(pages, addrinarray,
1158 __pgprot(_PAGE_CACHE_MASK));
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001159 if (retval)
1160 return retval;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07001161
1162 for (i = 0; i < addrinarray; i++) {
1163 start = (unsigned long)page_address(pages[i]);
1164 end = start + PAGE_SIZE;
1165 free_memtype(start, end);
1166 }
1167
venkatesh.pallipadi@intel.com9fa3ab32009-04-09 14:26:49 -07001168 return 0;
venkatesh.pallipadi@intel.com0f350752009-03-19 14:51:15 -07001169}
1170EXPORT_SYMBOL(set_pages_array_wb);
1171
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001172int set_pages_x(struct page *page, int numpages)
1173{
1174 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001175
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01001176 return set_memory_x(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001177}
1178EXPORT_SYMBOL(set_pages_x);
1179
1180int set_pages_nx(struct page *page, int numpages)
1181{
1182 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001183
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01001184 return set_memory_nx(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001185}
1186EXPORT_SYMBOL(set_pages_nx);
1187
1188int set_pages_ro(struct page *page, int numpages)
1189{
1190 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001191
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01001192 return set_memory_ro(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001193}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001194
1195int set_pages_rw(struct page *page, int numpages)
1196{
1197 unsigned long addr = (unsigned long)page_address(page);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001198
Thomas Gleixnerd7c8f212008-01-30 13:34:07 +01001199 return set_memory_rw(addr, numpages);
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001200}
Arjan van de Ven75cbade2008-01-30 13:34:06 +01001201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202#ifdef CONFIG_DEBUG_PAGEALLOC
Ingo Molnarf62d0f02008-01-30 13:34:07 +01001203
1204static int __set_pages_p(struct page *page, int numpages)
1205{
Shaohua Lid75586a2008-08-21 10:46:06 +08001206 unsigned long tempaddr = (unsigned long) page_address(page);
1207 struct cpa_data cpa = { .vaddr = &tempaddr,
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001208 .numpages = numpages,
1209 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
Shaohua Lid75586a2008-08-21 10:46:06 +08001210 .mask_clr = __pgprot(0),
1211 .flags = 0};
Thomas Gleixner72932c72008-01-30 13:34:08 +01001212
Suresh Siddha55121b42008-09-23 14:00:40 -07001213 /*
1214 * No alias checking needed for setting present flag. otherwise,
1215 * we may need to break large pages for 64-bit kernel text
1216 * mappings (this adds to complexity if we want to do this from
1217 * atomic context especially). Let's keep it simple!
1218 */
1219 return __change_page_attr_set_clr(&cpa, 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01001220}
1221
1222static int __set_pages_np(struct page *page, int numpages)
1223{
Shaohua Lid75586a2008-08-21 10:46:06 +08001224 unsigned long tempaddr = (unsigned long) page_address(page);
1225 struct cpa_data cpa = { .vaddr = &tempaddr,
Thomas Gleixner72e458d2008-02-04 16:48:07 +01001226 .numpages = numpages,
1227 .mask_set = __pgprot(0),
Shaohua Lid75586a2008-08-21 10:46:06 +08001228 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1229 .flags = 0};
Thomas Gleixner72932c72008-01-30 13:34:08 +01001230
Suresh Siddha55121b42008-09-23 14:00:40 -07001231 /*
1232 * No alias checking needed for setting not present flag. otherwise,
1233 * we may need to break large pages for 64-bit kernel text
1234 * mappings (this adds to complexity if we want to do this from
1235 * atomic context especially). Let's keep it simple!
1236 */
1237 return __change_page_attr_set_clr(&cpa, 0);
Ingo Molnarf62d0f02008-01-30 13:34:07 +01001238}
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240void kernel_map_pages(struct page *page, int numpages, int enable)
1241{
1242 if (PageHighMem(page))
1243 return;
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001244 if (!enable) {
Ingo Molnarf9b84042006-06-27 02:54:49 -07001245 debug_check_no_locks_freed(page_address(page),
1246 numpages * PAGE_SIZE);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001247 }
Ingo Molnarde5097c2006-01-09 15:59:21 -08001248
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001249 /*
Ingo Molnar12d6f212008-01-30 13:33:58 +01001250 * If page allocator is not up yet then do not call c_p_a():
1251 */
1252 if (!debug_pagealloc_enabled)
1253 return;
1254
1255 /*
Ingo Molnarf8d84062008-02-13 14:09:53 +01001256 * The return value is ignored as the calls cannot fail.
Suresh Siddha55121b42008-09-23 14:00:40 -07001257 * Large pages for identity mappings are not used at boot time
1258 * and hence no memory allocations during large page split.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 */
Ingo Molnarf62d0f02008-01-30 13:34:07 +01001260 if (enable)
1261 __set_pages_p(page, numpages);
1262 else
1263 __set_pages_np(page, numpages);
Ingo Molnar9f4c8152008-01-30 13:33:41 +01001264
1265 /*
Ingo Molnare4b71dc2008-01-30 13:34:04 +01001266 * We should perform an IPI and flush all tlbs,
1267 * but that can deadlock->flush only current cpu:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 */
1269 __flush_tlb_all();
1270}
Rafael J. Wysocki8a235ef2008-02-20 01:47:44 +01001271
1272#ifdef CONFIG_HIBERNATION
1273
1274bool kernel_page_present(struct page *page)
1275{
1276 unsigned int level;
1277 pte_t *pte;
1278
1279 if (PageHighMem(page))
1280 return false;
1281
1282 pte = lookup_address((unsigned long)page_address(page), &level);
1283 return (pte_val(*pte) & _PAGE_PRESENT);
1284}
1285
1286#endif /* CONFIG_HIBERNATION */
1287
1288#endif /* CONFIG_DEBUG_PAGEALLOC */
Arjan van de Vend1028a12008-01-30 13:34:07 +01001289
1290/*
1291 * The testcases use internal knowledge of the implementation that shouldn't
1292 * be exposed to the rest of the kernel. Include these directly here.
1293 */
1294#ifdef CONFIG_CPA_DEBUG
1295#include "pageattr-test.c"
1296#endif