blob: d030548047e21d6f955d459d6b10b94f4a3bc799 [file] [log] [blame]
Andrea Arcangelie2cda322011-01-13 15:46:40 -08001/*
2 * mm/pgtable-generic.c
3 *
4 * Generic pgtable methods declared in asm-generic/pgtable.h
5 *
6 * Copyright (C) 2010 Linus Torvalds
7 */
8
9#include <asm/tlb.h>
10#include <asm-generic/pgtable.h>
11
12#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
13/*
14 * Only sets the access flags (dirty, accessed, and
15 * writable). Furthermore, we know it always gets set to a "more
16 * permissive" setting, which allows most architectures to optimize
17 * this. We return whether the PTE actually changed, which in turn
18 * instructs the caller to do things like update__mmu_cache. This
19 * used to be done in the caller, but sparc needs minor faults to
20 * force that call on sun4c so we changed this macro slightly
21 */
22int ptep_set_access_flags(struct vm_area_struct *vma,
23 unsigned long address, pte_t *ptep,
24 pte_t entry, int dirty)
25{
26 int changed = !pte_same(*ptep, entry);
27 if (changed) {
28 set_pte_at(vma->vm_mm, address, ptep, entry);
29 flush_tlb_page(vma, address);
30 }
31 return changed;
32}
33#endif
34
35#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
36int pmdp_set_access_flags(struct vm_area_struct *vma,
37 unsigned long address, pmd_t *pmdp,
38 pmd_t entry, int dirty)
39{
40#ifdef CONFIG_TRANSPARENT_HUGEPAGE
41 int changed = !pmd_same(*pmdp, entry);
42 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
43 if (changed) {
44 set_pmd_at(vma->vm_mm, address, pmdp, entry);
45 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
46 }
47 return changed;
48#else /* CONFIG_TRANSPARENT_HUGEPAGE */
49 BUG();
50 return 0;
51#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
52}
53#endif
54
55#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
56int ptep_clear_flush_young(struct vm_area_struct *vma,
57 unsigned long address, pte_t *ptep)
58{
59 int young;
60 young = ptep_test_and_clear_young(vma, address, ptep);
61 if (young)
62 flush_tlb_page(vma, address);
63 return young;
64}
65#endif
66
67#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
68int pmdp_clear_flush_young(struct vm_area_struct *vma,
69 unsigned long address, pmd_t *pmdp)
70{
71 int young;
72#ifndef CONFIG_TRANSPARENT_HUGEPAGE
73 BUG();
74#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
75 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
76 young = pmdp_test_and_clear_young(vma, address, pmdp);
77 if (young)
78 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
79 return young;
80}
81#endif
82
83#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
84pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
85 pte_t *ptep)
86{
87 pte_t pte;
88 pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
89 flush_tlb_page(vma, address);
90 return pte;
91}
92#endif
93
94#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
95pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
96 pmd_t *pmdp)
97{
98 pmd_t pmd;
99#ifndef CONFIG_TRANSPARENT_HUGEPAGE
100 BUG();
101#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
102 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
103 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
104 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
105 return pmd;
106}
107#endif
108
109#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
110pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
111 pmd_t *pmdp)
112{
113#ifdef CONFIG_TRANSPARENT_HUGEPAGE
114 pmd_t pmd = pmd_mksplitting(*pmdp);
115 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
116 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
117 /* tlb flush only to serialize against gup-fast */
118 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
119#else /* CONFIG_TRANSPARENT_HUGEPAGE */
120 BUG();
121#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
122}
123#endif