blob: 5ed826da5e071d9936764976cb5258a613b1edf7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_HUGETLB_H
3#define _ASM_X86_HUGETLB_H
Gerald Schaefer6d779072008-04-28 02:13:27 -07004
5#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -07006#include <asm-generic/hugetlb.h>
Gerald Schaefer6d779072008-04-28 02:13:27 -07007
Ingo Molnarb2eafe82016-04-22 10:12:19 +02008#define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
Gerald Schaefer6d779072008-04-28 02:13:27 -07009
10static inline int is_hugepage_only_range(struct mm_struct *mm,
11 unsigned long addr,
12 unsigned long len) {
13 return 0;
14}
15
16/*
17 * If the arch doesn't supply something else, assume that hugepage
18 * size aligned regions are ok without further preparation.
19 */
Andi Kleena5516432008-07-23 21:27:41 -070020static inline int prepare_hugepage_range(struct file *file,
21 unsigned long addr, unsigned long len)
Gerald Schaefer6d779072008-04-28 02:13:27 -070022{
Andi Kleena5516432008-07-23 21:27:41 -070023 struct hstate *h = hstate_file(file);
24 if (len & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070025 return -EINVAL;
Andi Kleena5516432008-07-23 21:27:41 -070026 if (addr & ~huge_page_mask(h))
Gerald Schaefer6d779072008-04-28 02:13:27 -070027 return -EINVAL;
28 return 0;
29}
30
Jan Beulich42b77722008-07-23 21:27:10 -070031static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
Gerald Schaefer6d779072008-04-28 02:13:27 -070032 unsigned long addr, unsigned long end,
33 unsigned long floor,
34 unsigned long ceiling)
35{
36 free_pgd_range(tlb, addr, end, floor, ceiling);
37}
38
39static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
40 pte_t *ptep, pte_t pte)
41{
42 set_pte_at(mm, addr, ptep, pte);
43}
44
45static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
46 unsigned long addr, pte_t *ptep)
47{
48 return ptep_get_and_clear(mm, addr, ptep);
49}
50
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070051static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
52 unsigned long addr, pte_t *ptep)
53{
Anthony Iliopoulos9844f542014-05-14 11:29:48 +020054 ptep_clear_flush(vma, addr, ptep);
Gerald Schaefer8fe627e2008-04-28 02:13:28 -070055}
56
Gerald Schaefer7f2e9522008-04-28 02:13:29 -070057static inline int huge_pte_none(pte_t pte)
58{
59 return pte_none(pte);
60}
61
62static inline pte_t huge_pte_wrprotect(pte_t pte)
63{
64 return pte_wrprotect(pte);
65}
66
67static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
68 unsigned long addr, pte_t *ptep)
69{
70 ptep_set_wrprotect(mm, addr, ptep);
71}
72
73static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
74 unsigned long addr, pte_t *ptep,
75 pte_t pte, int dirty)
76{
77 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
78}
79
80static inline pte_t huge_ptep_get(pte_t *ptep)
81{
82 return *ptep;
83}
84
Will Deacon5d3a5512012-10-08 16:29:32 -070085static inline void arch_clear_hugepage_flags(struct page *page)
86{
87}
88
Aneesh Kumar K.Ve1073d12017-07-06 15:39:17 -070089#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
90static inline bool gigantic_page_supported(void) { return true; }
91#endif
92
H. Peter Anvin1965aae2008-10-22 22:26:29 -070093#endif /* _ASM_X86_HUGETLB_H */