blob: 3a106165e03ad035484df5fb97989265798263df [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_HUGETLB_H
2#define _ASM_X86_HUGETLB_H
Gerald Schaefer6d779072008-04-28 02:13:27 -07003
4#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07005#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07006
Ingo Molnarb2eafe82016-04-22 10:12:19 +02007#define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
Gerald Schaefer6d779072008-04-28 02:13:27 -07008
9static inline int is_hugepage_only_range(struct mm_struct *mm,
10 unsigned long addr,
11 unsigned long len) {
12 return 0;
13}
14
15/*
16 * If the arch doesn't supply something else, assume that hugepage
17 * size aligned regions are ok without further preparation.
18 */
Andi Kleena5516432008-07-23 21:27:41 -070019static inline int prepare_hugepage_range(struct file *file,
20 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -070021{
Andi Kleena5516432008-07-23 21:27:41 -070022 struct hstate *h = hstate_file(file);
23 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070024 return -EINVAL;
Andi Kleena5516432008-07-23 21:27:41 -070025 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070026 return -EINVAL;
27 return 0;
28}
29
Jan Beulich42b77722008-07-23 21:27:10 -070030static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
Gerald Schaefer6d779072008-04-28 02:13:27 -070031 unsigned long addr, unsigned long end,
32 unsigned long floor,
33 unsigned long ceiling)
34{
35 free_pgd_range(tlb, addr, end, floor, ceiling);
36}
37
38static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
39 pte_t *ptep, pte_t pte)
40{
41 set_pte_at(mm, addr, ptep, pte);
42}
43
44static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
45 unsigned long addr, pte_t *ptep)
46{
47 return ptep_get_and_clear(mm, addr, ptep);
48}
49
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070050static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
51 unsigned long addr, pte_t *ptep)
52{
Anthony Iliopoulos9844f542014-05-14 11:29:48 +020053 ptep_clear_flush(vma, addr, ptep);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070054}
55
Gerald Schaefer7f2e9522008-04-28 02:13:29 -070056static inline int huge_pte_none(pte_t pte)
57{
58 return pte_none(pte);
59}
60
61static inline pte_t huge_pte_wrprotect(pte_t pte)
62{
63 return pte_wrprotect(pte);
64}
65
66static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
67 unsigned long addr, pte_t *ptep)
68{
69 ptep_set_wrprotect(mm, addr, ptep);
70}
71
72static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
73 unsigned long addr, pte_t *ptep,
74 pte_t pte, int dirty)
75{
76 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
77}
78
79static inline pte_t huge_ptep_get(pte_t *ptep)
80{
81 return *ptep;
82}
83
Will Deacon5d3a5512012-10-08 16:29:32 -070084static inline void arch_clear_hugepage_flags(struct page *page)
85{
86}
87
H. Peter Anvin1965aae2008-10-22 22:26:29 -070088#endif /* _ASM_X86_HUGETLB_H */