blob: 68c05398bba9b449a1324d54b584ce52d52aa8d1 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_HUGETLB_H
2#define _ASM_X86_HUGETLB_H
Gerald Schaefer6d779072008-04-28 02:13:27 -07003
4#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07005#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07006
7
8static inline int is_hugepage_only_range(struct mm_struct *mm,
9 unsigned long addr,
10 unsigned long len) {
11 return 0;
12}
13
14/*
15 * If the arch doesn't supply something else, assume that hugepage
16 * size aligned regions are ok without further preparation.
17 */
Andi Kleena5516432008-07-23 21:27:41 -070018static inline int prepare_hugepage_range(struct file *file,
19 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -070020{
Andi Kleena5516432008-07-23 21:27:41 -070021 struct hstate *h = hstate_file(file);
22 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070023 return -EINVAL;
Andi Kleena5516432008-07-23 21:27:41 -070024 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070025 return -EINVAL;
26 return 0;
27}
28
29static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
30}
31
Jan Beulich42b77722008-07-23 21:27:10 -070032static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
Gerald Schaefer6d779072008-04-28 02:13:27 -070033 unsigned long addr, unsigned long end,
34 unsigned long floor,
35 unsigned long ceiling)
36{
37 free_pgd_range(tlb, addr, end, floor, ceiling);
38}
39
40static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
41 pte_t *ptep, pte_t pte)
42{
43 set_pte_at(mm, addr, ptep, pte);
44}
45
46static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
47 unsigned long addr, pte_t *ptep)
48{
49 return ptep_get_and_clear(mm, addr, ptep);
50}
51
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070052static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
53 unsigned long addr, pte_t *ptep)
54{
Anthony Iliopoulos9844f542014-05-14 11:29:48 +020055 ptep_clear_flush(vma, addr, ptep);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070056}
57
Gerald Schaefer7f2e9522008-04-28 02:13:29 -070058static inline int huge_pte_none(pte_t pte)
59{
60 return pte_none(pte);
61}
62
63static inline pte_t huge_pte_wrprotect(pte_t pte)
64{
65 return pte_wrprotect(pte);
66}
67
68static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
69 unsigned long addr, pte_t *ptep)
70{
71 ptep_set_wrprotect(mm, addr, ptep);
72}
73
74static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
75 unsigned long addr, pte_t *ptep,
76 pte_t pte, int dirty)
77{
78 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
79}
80
81static inline pte_t huge_ptep_get(pte_t *ptep)
82{
83 return *ptep;
84}
85
86static inline int arch_prepare_hugepage(struct page *page)
87{
88 return 0;
89}
90
91static inline void arch_release_hugepage(struct page *page)
92{
93}
94
Will Deacon5d3a5512012-10-08 16:29:32 -070095static inline void arch_clear_hugepage_flags(struct page *page)
96{
97}
98
H. Peter Anvin1965aae2008-10-22 22:26:29 -070099#endif /* _ASM_X86_HUGETLB_H */