blob: 5856a66ab4047e02c1ce8ed09a69b7c6a2b209be [file] [log] [blame]
Gerald Schaefer6d779072008-04-28 02:13:27 -07001#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
4#include <asm/page.h>
5
David Gibson883a3e52009-10-26 19:24:31 +00006pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
7 unsigned long addr, unsigned *shift);
8
David Gibson0895ecd2009-10-26 19:24:31 +00009void flush_dcache_icache_hugepage(struct page *page);
10
Gerald Schaefer6d779072008-04-28 02:13:27 -070011int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
12 unsigned long len);
13
Jan Beulich42b77722008-07-23 21:27:10 -070014void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
Gerald Schaefer6d779072008-04-28 02:13:27 -070015 unsigned long end, unsigned long floor,
16 unsigned long ceiling);
17
Gerald Schaefer6d779072008-04-28 02:13:27 -070018/*
Mel Gorman33402892009-01-06 14:38:54 -080019 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
20 * to override the version in mm/hugetlb.c
21 */
22#define vma_mmu_pagesize vma_mmu_pagesize
23
24/*
Gerald Schaefer6d779072008-04-28 02:13:27 -070025 * If the arch doesn't supply something else, assume that hugepage
26 * size aligned regions are ok without further preparation.
27 */
Andi Kleena5516432008-07-23 21:27:41 -070028static inline int prepare_hugepage_range(struct file *file,
29 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -070030{
Jon Tollefson0d9ea752008-07-23 21:27:56 -070031 struct hstate *h = hstate_file(file);
32 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070033 return -EINVAL;
Jon Tollefson0d9ea752008-07-23 21:27:56 -070034 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070035 return -EINVAL;
36 return 0;
37}
38
39static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
40{
41}
42
David Gibson0895ecd2009-10-26 19:24:31 +000043
44static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, pte_t pte)
46{
47 set_pte_at(mm, addr, ptep, pte);
48}
49
50static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
51 unsigned long addr, pte_t *ptep)
52{
53 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
54 return __pte(old);
55}
56
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070057static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
58 unsigned long addr, pte_t *ptep)
59{
David Gibson0895ecd2009-10-26 19:24:31 +000060 pte_t pte;
61 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
62 flush_tlb_page(vma, addr);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070063}
64
Gerald Schaefer7f2e9522008-04-28 02:13:29 -070065static inline int huge_pte_none(pte_t pte)
66{
67 return pte_none(pte);
68}
69
70static inline pte_t huge_pte_wrprotect(pte_t pte)
71{
72 return pte_wrprotect(pte);
73}
74
Gerald Schaefer7f2e9522008-04-28 02:13:29 -070075static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
76 unsigned long addr, pte_t *ptep,
77 pte_t pte, int dirty)
78{
79 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
80}
81
82static inline pte_t huge_ptep_get(pte_t *ptep)
83{
84 return *ptep;
85}
86
87static inline int arch_prepare_hugepage(struct page *page)
88{
89 return 0;
90}
91
92static inline void arch_release_hugepage(struct page *page)
93{
94}
95
Gerald Schaefer6d779072008-04-28 02:13:27 -070096#endif /* _ASM_POWERPC_HUGETLB_H */