Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_HUGETLB_H |
| 2 | #define _ASM_X86_HUGETLB_H |
| 3 | |
| 4 | #include <asm/page.h> |
| 5 | |
| 6 | |
| 7 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
| 8 | unsigned long addr, |
| 9 | unsigned long len) { |
| 10 | return 0; |
| 11 | } |
| 12 | |
| 13 | /* |
| 14 | * If the arch doesn't supply something else, assume that hugepage |
| 15 | * size aligned regions are ok without further preparation. |
| 16 | */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 17 | static inline int prepare_hugepage_range(struct file *file, |
| 18 | unsigned long addr, unsigned long len) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 19 | { |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 20 | struct hstate *h = hstate_file(file); |
| 21 | if (len & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 22 | return -EINVAL; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 23 | if (addr & ~huge_page_mask(h)) |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 24 | return -EINVAL; |
| 25 | return 0; |
| 26 | } |
| 27 | |
| 28 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { |
| 29 | } |
| 30 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 31 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 32 | unsigned long addr, unsigned long end, |
| 33 | unsigned long floor, |
| 34 | unsigned long ceiling) |
| 35 | { |
| 36 | free_pgd_range(tlb, addr, end, floor, ceiling); |
| 37 | } |
| 38 | |
| 39 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
| 40 | pte_t *ptep, pte_t pte) |
| 41 | { |
| 42 | set_pte_at(mm, addr, ptep, pte); |
| 43 | } |
| 44 | |
| 45 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
| 46 | unsigned long addr, pte_t *ptep) |
| 47 | { |
| 48 | return ptep_get_and_clear(mm, addr, ptep); |
| 49 | } |
| 50 | |
Gerald Schaefer | 8fe627e | 2008-04-28 02:13:28 -0700 | [diff] [blame] | 51 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
| 52 | unsigned long addr, pte_t *ptep) |
| 53 | { |
| 54 | } |
| 55 | |
Gerald Schaefer | 7f2e952 | 2008-04-28 02:13:29 -0700 | [diff] [blame] | 56 | static inline int huge_pte_none(pte_t pte) |
| 57 | { |
| 58 | return pte_none(pte); |
| 59 | } |
| 60 | |
| 61 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
| 62 | { |
| 63 | return pte_wrprotect(pte); |
| 64 | } |
| 65 | |
| 66 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
| 67 | unsigned long addr, pte_t *ptep) |
| 68 | { |
| 69 | ptep_set_wrprotect(mm, addr, ptep); |
| 70 | } |
| 71 | |
| 72 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
| 73 | unsigned long addr, pte_t *ptep, |
| 74 | pte_t pte, int dirty) |
| 75 | { |
| 76 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); |
| 77 | } |
| 78 | |
| 79 | static inline pte_t huge_ptep_get(pte_t *ptep) |
| 80 | { |
| 81 | return *ptep; |
| 82 | } |
| 83 | |
| 84 | static inline int arch_prepare_hugepage(struct page *page) |
| 85 | { |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | static inline void arch_release_hugepage(struct page *page) |
| 90 | { |
| 91 | } |
| 92 | |
Gerald Schaefer | 6d77907 | 2008-04-28 02:13:27 -0700 | [diff] [blame] | 93 | #endif /* _ASM_X86_HUGETLB_H */ |