blob: 36aa999b26318888bc9bf5b39d8966a4e084701a [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09002#include <linux/gfp.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07003#include <asm/pgalloc.h>
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -07004#include <asm/pgtable.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07005#include <asm/tlb.h>
Ingo Molnara1d5a862008-06-20 15:34:46 +02006#include <asm/fixmap.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07007
Vegard Nossum9e730232009-02-22 11:28:25 +01008#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
Ian Campbell14315592010-02-17 10:38:10 +000010#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070018pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
Vegard Nossum9e730232009-02-22 11:28:25 +010020 return (pte_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070021}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
Ian Campbell14315592010-02-17 10:38:10 +000027 pte = alloc_pages(__userpte_alloc_gfp, 0);
Kirill A. Shutemovcecbd1b2013-11-14 14:31:47 -080028 if (!pte)
29 return NULL;
30 if (!pgtable_page_ctor(pte)) {
31 __free_page(pte);
32 return NULL;
33 }
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070034 return pte;
35}
36
Ian Campbell14315592010-02-17 10:38:10 +000037static int __init setup_userpte(char *arg)
38{
39 if (!arg)
40 return -EINVAL;
41
42 /*
43 * "userpte=nohigh" disables allocation of user pagetables in
44 * high memory.
45 */
46 if (strcmp(arg, "nohigh") == 0)
47 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
48 else
49 return -EINVAL;
50 return 0;
51}
52early_param("userpte", setup_userpte);
53
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100054void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070055{
56 pgtable_page_dtor(pte);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070057 paravirt_release_pte(page_to_pfn(pte));
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070058 tlb_remove_page(tlb, pte);
59}
60
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070061#if PAGETABLE_LEVELS > 2
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100062void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070063{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070064 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
Dave Hansen1de14c32013-04-12 16:23:54 -070065 /*
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table
67 * entries need a full cr3 reload to flush.
68 */
69#ifdef CONFIG_X86_PAE
70 tlb->need_flush_all = 1;
71#endif
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070072 tlb_remove_page(tlb, virt_to_page(pmd));
73}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070074
75#if PAGETABLE_LEVELS > 3
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100076void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070077{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -070078 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070079 tlb_remove_page(tlb, virt_to_page(pud));
80}
81#endif /* PAGETABLE_LEVELS > 3 */
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070082#endif /* PAGETABLE_LEVELS > 2 */
83
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070084static inline void pgd_list_add(pgd_t *pgd)
85{
86 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070087
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070088 list_add(&page->lru, &pgd_list);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070089}
90
91static inline void pgd_list_del(pgd_t *pgd)
92{
93 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070094
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070095 list_del(&page->lru);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070096}
97
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070098#define UNSHARED_PTRS_PER_PGD \
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -070099 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700100
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700101
102static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
103{
104 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
105 virt_to_page(pgd)->index = (pgoff_t)mm;
106}
107
108struct mm_struct *pgd_page_get_mm(struct page *page)
109{
110 return (struct mm_struct *)page->index;
111}
112
113static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700114{
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700115 /* If the pgd points to a shared pagetable level (either the
116 ptes in non-PAE, or shared PMD in PAE), then just copy the
117 references from swapper_pg_dir. */
118 if (PAGETABLE_LEVELS == 2 ||
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700119 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
120 PAGETABLE_LEVELS == 4) {
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700121 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
122 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700123 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700124 }
125
126 /* list required to sync kernel mapping updates */
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700127 if (!SHARED_KERNEL_PMD) {
128 pgd_set_mm(pgd, mm);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700129 pgd_list_add(pgd);
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700130 }
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700131}
132
Jan Beulich17b74622008-08-29 12:51:32 +0100133static void pgd_dtor(pgd_t *pgd)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700134{
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700135 if (SHARED_KERNEL_PMD)
136 return;
137
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800138 spin_lock(&pgd_lock);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700139 pgd_list_del(pgd);
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800140 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700141}
142
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700143/*
144 * List of all pgd's needed for non-PAE so it can invalidate entries
145 * in both cached and uncached pgd's; not needed for PAE since the
146 * kernel pmd is shared. If PAE were not to share the pmd a similar
147 * tactic would be needed. This is essentially codepath-based locking
148 * against pageattr.c; it is the unique case in which a valid change
149 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
150 * vmalloc faults work because attached pagetables are never freed.
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +0100151 * -- nyc
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700152 */
153
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700154#ifdef CONFIG_X86_PAE
155/*
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700156 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
157 * updating the top-level pagetable entries to guarantee the
158 * processor notices the update. Since this is expensive, and
159 * all 4 top-level entries are used almost immediately in a
160 * new process's life, we just pre-populate them here.
161 *
162 * Also, if we're in a paravirt environment where the kernel pmd is
163 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
164 * and initialize the kernel pmds here.
165 */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400166#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100167
168void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
169{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700170 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100171
172 /* Note: almost everything apart from _PAGE_PRESENT is
173 reserved at the pmd (PDPT) level. */
174 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
175
176 /*
177 * According to Intel App note "TLBs, Paging-Structure Caches,
178 * and Their Invalidation", April 2007, document 317080-001,
179 * section 8.1: in PAE mode we explicitly have to flush the
180 * TLB via cr3 if the top-level pgd is changed...
181 */
Shaohua Li4981d012011-03-16 11:37:29 +0800182 flush_tlb_mm(mm);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100183}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700184#else /* !CONFIG_X86_PAE */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400185
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700186/* No need to prepopulate any pagetable entries in non-PAE modes. */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400187#define PREALLOCATED_PMDS 0
188
189#endif /* CONFIG_X86_PAE */
190
191static void free_pmds(pmd_t *pmds[])
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700192{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400193 int i;
194
195 for(i = 0; i < PREALLOCATED_PMDS; i++)
Kirill A. Shutemov09ef4932013-11-14 14:31:13 -0800196 if (pmds[i]) {
197 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400198 free_page((unsigned long)pmds[i]);
Kirill A. Shutemov09ef4932013-11-14 14:31:13 -0800199 }
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700200}
201
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400202static int preallocate_pmds(pmd_t *pmds[])
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700203{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400204 int i;
205 bool failed = false;
206
207 for(i = 0; i < PREALLOCATED_PMDS; i++) {
Vegard Nossum9e730232009-02-22 11:28:25 +0100208 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
Kirill A. Shutemov09ef4932013-11-14 14:31:13 -0800209 if (!pmd)
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400210 failed = true;
Kirill A. Shutemov09ef4932013-11-14 14:31:13 -0800211 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
Al Viro2a46eed2013-11-20 22:16:36 +0000212 free_page((unsigned long)pmd);
Kirill A. Shutemov09ef4932013-11-14 14:31:13 -0800213 pmd = NULL;
214 failed = true;
215 }
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400216 pmds[i] = pmd;
217 }
218
219 if (failed) {
220 free_pmds(pmds);
221 return -ENOMEM;
222 }
223
224 return 0;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700225}
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400226
227/*
228 * Mop up any pmd pages which may still be attached to the pgd.
229 * Normally they will be freed by munmap/exit_mmap, but any pmd we
230 * preallocate which never got a corresponding vma will need to be
231 * freed manually.
232 */
233static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
234{
235 int i;
236
237 for(i = 0; i < PREALLOCATED_PMDS; i++) {
238 pgd_t pgd = pgdp[i];
239
240 if (pgd_val(pgd) != 0) {
241 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
242
243 pgdp[i] = native_make_pgd(0);
244
245 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
246 pmd_free(mm, pmd);
247 }
248 }
249}
250
251static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
252{
253 pud_t *pud;
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400254 int i;
255
Jeremy Fitzhardingecf3e5052008-08-08 13:46:07 -0700256 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
257 return;
258
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400259 pud = pud_offset(pgd, 0);
260
Wanpeng Li73b44ff2013-07-08 16:00:17 -0700261 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400262 pmd_t *pmd = pmds[i];
263
264 if (i >= KERNEL_PGD_BOUNDARY)
265 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
266 sizeof(pmd_t) * PTRS_PER_PMD);
267
268 pud_populate(mm, pud, pmd);
269 }
270}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700271
272pgd_t *pgd_alloc(struct mm_struct *mm)
273{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400274 pgd_t *pgd;
275 pmd_t *pmds[PREALLOCATED_PMDS];
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700276
Vegard Nossum9e730232009-02-22 11:28:25 +0100277 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400278
279 if (pgd == NULL)
280 goto out;
281
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700282 mm->pgd = pgd;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700283
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400284 if (preallocate_pmds(pmds) != 0)
285 goto out_free_pgd;
286
287 if (paravirt_pgd_alloc(mm) != 0)
288 goto out_free_pmds;
289
290 /*
291 * Make sure that pre-populating the pmds is atomic with
292 * respect to anything walking the pgd_list, so that they
293 * never see a partially populated pgd.
294 */
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800295 spin_lock(&pgd_lock);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400296
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700297 pgd_ctor(mm, pgd);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400298 pgd_prepopulate_pmd(mm, pgd, pmds);
299
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800300 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700301
302 return pgd;
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400303
304out_free_pmds:
305 free_pmds(pmds);
306out_free_pgd:
307 free_page((unsigned long)pgd);
308out:
309 return NULL;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700310}
311
312void pgd_free(struct mm_struct *mm, pgd_t *pgd)
313{
314 pgd_mop_up_pmds(mm, pgd);
315 pgd_dtor(pgd);
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -0400316 paravirt_pgd_free(mm, pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700317 free_page((unsigned long)pgd);
318}
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700319
Rik van Riel0f9a9212012-11-06 09:54:47 +0000320/*
321 * Used to set accessed or dirty bits in the page table entries
322 * on other architectures. On x86, the accessed and dirty bits
323 * are tracked by hardware. However, do_wp_page calls this function
324 * to also make the pte writeable at the same time the dirty bit is
325 * set. In that case we do actually need to write the PTE.
326 */
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700327int ptep_set_access_flags(struct vm_area_struct *vma,
328 unsigned long address, pte_t *ptep,
329 pte_t entry, int dirty)
330{
331 int changed = !pte_same(*ptep, entry);
332
333 if (changed && dirty) {
334 *ptep = entry;
335 pte_update_defer(vma->vm_mm, address, ptep);
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700336 }
337
338 return changed;
339}
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700340
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800341#ifdef CONFIG_TRANSPARENT_HUGEPAGE
342int pmdp_set_access_flags(struct vm_area_struct *vma,
343 unsigned long address, pmd_t *pmdp,
344 pmd_t entry, int dirty)
345{
346 int changed = !pmd_same(*pmdp, entry);
347
348 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
349
350 if (changed && dirty) {
351 *pmdp = entry;
352 pmd_update_defer(vma->vm_mm, address, pmdp);
Ingo Molnar5e4bf1a2012-11-20 13:02:51 +0100353 /*
354 * We had a write-protection fault here and changed the pmd
355 * to to more permissive. No need to flush the TLB for that,
356 * #PF is architecturally guaranteed to do that and in the
357 * worst-case we'll generate a spurious fault.
358 */
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800359 }
360
361 return changed;
362}
363#endif
364
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700365int ptep_test_and_clear_young(struct vm_area_struct *vma,
366 unsigned long addr, pte_t *ptep)
367{
368 int ret = 0;
369
370 if (pte_young(*ptep))
371 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
Thomas Gleixner48e23952008-05-24 17:24:34 +0200372 (unsigned long *) &ptep->pte);
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700373
374 if (ret)
375 pte_update(vma->vm_mm, addr, ptep);
376
377 return ret;
378}
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700379
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800380#ifdef CONFIG_TRANSPARENT_HUGEPAGE
381int pmdp_test_and_clear_young(struct vm_area_struct *vma,
382 unsigned long addr, pmd_t *pmdp)
383{
384 int ret = 0;
385
386 if (pmd_young(*pmdp))
387 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800388 (unsigned long *)pmdp);
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800389
390 if (ret)
391 pmd_update(vma->vm_mm, addr, pmdp);
392
393 return ret;
394}
395#endif
396
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700397int ptep_clear_flush_young(struct vm_area_struct *vma,
398 unsigned long address, pte_t *ptep)
399{
400 int young;
401
402 young = ptep_test_and_clear_young(vma, address, ptep);
403 if (young)
404 flush_tlb_page(vma, address);
405
406 return young;
407}
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700408
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800409#ifdef CONFIG_TRANSPARENT_HUGEPAGE
410int pmdp_clear_flush_young(struct vm_area_struct *vma,
411 unsigned long address, pmd_t *pmdp)
412{
413 int young;
414
415 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
416
417 young = pmdp_test_and_clear_young(vma, address, pmdp);
418 if (young)
419 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
420
421 return young;
422}
423
424void pmdp_splitting_flush(struct vm_area_struct *vma,
425 unsigned long address, pmd_t *pmdp)
426{
427 int set;
428 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
429 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
Johannes Weinerf2d6bfe2011-01-13 15:47:01 -0800430 (unsigned long *)pmdp);
Andrea Arcangelidb3eb96f2011-01-13 15:46:41 -0800431 if (set) {
432 pmd_update(vma->vm_mm, address, pmdp);
433 /* need tlb flush only to serialize against gup-fast */
434 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
435 }
436}
437#endif
438
Gustavo F. Padovanfd862dd2009-02-15 21:48:54 -0300439/**
440 * reserve_top_address - reserves a hole in the top of kernel address space
441 * @reserve - size of hole to reserve
442 *
443 * Can be used to relocate the fixmap area and poke a hole in the top
444 * of kernel address space to make room for a hypervisor.
445 */
446void __init reserve_top_address(unsigned long reserve)
447{
448#ifdef CONFIG_X86_32
449 BUG_ON(fixmaps_set > 0);
450 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
451 (int)-reserve);
452 __FIXADDR_TOP = -reserve - PAGE_SIZE;
Gustavo F. Padovanfd862dd2009-02-15 21:48:54 -0300453#endif
454}
455
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700456int fixmaps_set;
457
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700458void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700459{
460 unsigned long address = __fix_to_virt(idx);
461
462 if (idx >= __end_of_fixed_addresses) {
463 BUG();
464 return;
465 }
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700466 set_pte_vaddr(address, pte);
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700467 fixmaps_set++;
468}
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700469
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -0700470void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
471 pgprot_t flags)
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700472{
473 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
474}