Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_HUGETLB_H |
| 2 | #define _LINUX_HUGETLB_H |
| 3 | |
Linus Torvalds | be93d8c | 2011-05-26 12:03:50 -0700 | [diff] [blame] | 4 | #include <linux/mm_types.h> |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 5 | #include <linux/mmdebug.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 6 | #include <linux/fs.h> |
Naoya Horiguchi | 8edf344 | 2010-05-28 09:29:15 +0900 | [diff] [blame] | 7 | #include <linux/hugetlb_inline.h> |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 8 | #include <linux/cgroup.h> |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 9 | #include <linux/list.h> |
| 10 | #include <linux/kref.h> |
Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 11 | |
Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 12 | struct ctl_table; |
| 13 | struct user_struct; |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 14 | struct mmu_gather; |
Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 15 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #ifdef CONFIG_HUGETLB_PAGE |
| 17 | |
| 18 | #include <linux/mempolicy.h> |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 19 | #include <linux/shm.h> |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 20 | #include <asm/tlbflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 22 | struct hugepage_subpool { |
| 23 | spinlock_t lock; |
| 24 | long count; |
Mike Kravetz | c6a9182 | 2015-04-15 16:13:36 -0700 | [diff] [blame] | 25 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
| 26 | long used_hpages; /* Used count against maximum, includes */ |
| 27 | /* both alloced and reserved pages. */ |
| 28 | struct hstate *hstate; |
| 29 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ |
| 30 | long rsv_hpages; /* Pages reserved against global pool to */ |
| 31 | /* sasitfy minimum size. */ |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 34 | struct resv_map { |
| 35 | struct kref refs; |
Davidlohr Bueso | 7b24d86 | 2014-04-03 14:47:27 -0700 | [diff] [blame] | 36 | spinlock_t lock; |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 37 | struct list_head regions; |
Mike Kravetz | 5e91137 | 2015-09-08 15:01:28 -0700 | [diff] [blame] | 38 | long adds_in_progress; |
| 39 | struct list_head region_cache; |
| 40 | long region_cache_count; |
Joonsoo Kim | 9119a41 | 2014-04-03 14:47:25 -0700 | [diff] [blame] | 41 | }; |
| 42 | extern struct resv_map *resv_map_alloc(void); |
| 43 | void resv_map_release(struct kref *ref); |
| 44 | |
Aneesh Kumar K.V | c3f38a3 | 2012-07-31 16:42:10 -0700 | [diff] [blame] | 45 | extern spinlock_t hugetlb_lock; |
| 46 | extern int hugetlb_max_hstate __read_mostly; |
| 47 | #define for_each_hstate(h) \ |
| 48 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) |
| 49 | |
Mike Kravetz | 7ca02d0 | 2015-04-15 16:13:42 -0700 | [diff] [blame] | 50 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
| 51 | long min_hpages); |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 52 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
| 53 | |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 54 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 55 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
| 56 | int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
| 57 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
Lee Schermerhorn | 06808b0 | 2009-12-14 17:58:21 -0800 | [diff] [blame] | 58 | |
| 59 | #ifdef CONFIG_NUMA |
| 60 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, |
| 61 | void __user *, size_t *, loff_t *); |
| 62 | #endif |
| 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 65 | long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
| 66 | struct page **, struct vm_area_struct **, |
| 67 | unsigned long *, unsigned long *, long, unsigned int); |
Mel Gorman | 04f2cbe | 2008-07-23 21:27:25 -0700 | [diff] [blame] | 68 | void unmap_hugepage_range(struct vm_area_struct *, |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 69 | unsigned long, unsigned long, struct page *); |
Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 70 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
| 71 | struct vm_area_struct *vma, |
| 72 | unsigned long start, unsigned long end, |
| 73 | struct page *ref_page); |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 74 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 75 | unsigned long start, unsigned long end, |
| 76 | struct page *ref_page); |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 77 | void hugetlb_report_meminfo(struct seq_file *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | int hugetlb_report_node_meminfo(int, char *); |
David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 79 | void hugetlb_show_meminfo(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | unsigned long hugetlb_total_pages(void); |
Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 81 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
Hugh Dickins | 788c7df | 2009-06-23 13:49:05 +0100 | [diff] [blame] | 82 | unsigned long address, unsigned int flags); |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 83 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 84 | struct vm_area_struct *vma, |
KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 85 | vm_flags_t vm_flags); |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 86 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
| 87 | long freed); |
Naoya Horiguchi | 6de2b1a | 2010-09-08 10:19:36 +0900 | [diff] [blame] | 88 | int dequeue_hwpoisoned_huge_page(struct page *page); |
Naoya Horiguchi | 31caf66 | 2013-09-11 14:21:59 -0700 | [diff] [blame] | 89 | bool isolate_huge_page(struct page *page, struct list_head *list); |
| 90 | void putback_active_hugepage(struct page *page); |
Atsushi Kumagai | 8f1d26d | 2014-07-30 16:08:39 -0700 | [diff] [blame] | 91 | void free_huge_page(struct page *page); |
Mike Kravetz | b5cec28 | 2015-09-08 15:01:41 -0700 | [diff] [blame] | 92 | void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve); |
Mike Kravetz | c672c7f | 2015-09-08 15:01:35 -0700 | [diff] [blame] | 93 | extern struct mutex *hugetlb_fault_mutex_table; |
| 94 | u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, |
| 95 | struct vm_area_struct *vma, |
| 96 | struct address_space *mapping, |
| 97 | pgoff_t idx, unsigned long address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | |
Steve Capper | 3212b53 | 2013-04-23 12:35:02 +0100 | [diff] [blame] | 99 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
| 100 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
| 101 | #endif |
| 102 | |
Andrey Ryabinin | 753162c | 2015-02-10 14:11:36 -0800 | [diff] [blame] | 103 | extern int hugepages_treat_as_movable; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | extern int sysctl_hugetlb_shm_group; |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 105 | extern struct list_head huge_boot_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 107 | /* arch callbacks */ |
| 108 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 109 | pte_t *huge_pte_alloc(struct mm_struct *mm, |
| 110 | unsigned long addr, unsigned long sz); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 111 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); |
Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 112 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 113 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, |
| 114 | int write); |
| 115 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 116 | pmd_t *pmd, int flags); |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 117 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 118 | pud_t *pud, int flags); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 119 | int pmd_huge(pmd_t pmd); |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 120 | int pud_huge(pud_t pmd); |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 121 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 122 | unsigned long address, unsigned long end, pgprot_t newprot); |
David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #else /* !CONFIG_HUGETLB_PAGE */ |
| 125 | |
Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 126 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
| 127 | { |
| 128 | } |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | static inline unsigned long hugetlb_total_pages(void) |
| 131 | { |
| 132 | return 0; |
| 133 | } |
| 134 | |
Adam Litke | 5b23dbe | 2007-11-14 16:59:33 -0800 | [diff] [blame] | 135 | #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) |
| 137 | #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) |
Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 138 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
| 139 | { |
| 140 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | #define hugetlb_report_node_meminfo(n, buf) 0 |
David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 142 | static inline void hugetlb_show_meminfo(void) |
| 143 | { |
| 144 | } |
Naoya Horiguchi | e66f17f | 2015-02-11 15:25:22 -0800 | [diff] [blame] | 145 | #define follow_huge_pmd(mm, addr, pmd, flags) NULL |
| 146 | #define follow_huge_pud(mm, addr, pud, flags) NULL |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 147 | #define prepare_hugepage_range(file, addr, len) (-EINVAL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | #define pmd_huge(x) 0 |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 149 | #define pud_huge(x) 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | #define is_hugepage_only_range(mm, addr, len) 0 |
David Gibson | 9da61ae | 2006-03-22 00:08:57 -0800 | [diff] [blame] | 151 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
Hugh Dickins | 788c7df | 2009-06-23 13:49:05 +0100 | [diff] [blame] | 152 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) |
Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 153 | #define huge_pte_offset(mm, address) 0 |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 154 | static inline int dequeue_hwpoisoned_huge_page(struct page *page) |
| 155 | { |
| 156 | return 0; |
| 157 | } |
| 158 | |
Naoya Horiguchi | f40386a | 2013-12-12 17:12:19 -0800 | [diff] [blame] | 159 | static inline bool isolate_huge_page(struct page *page, struct list_head *list) |
| 160 | { |
| 161 | return false; |
| 162 | } |
Naoya Horiguchi | 31caf66 | 2013-09-11 14:21:59 -0700 | [diff] [blame] | 163 | #define putback_active_hugepage(p) do {} while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 165 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
| 166 | unsigned long address, unsigned long end, pgprot_t newprot) |
| 167 | { |
| 168 | return 0; |
| 169 | } |
Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 170 | |
Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 171 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, |
| 172 | struct vm_area_struct *vma, unsigned long start, |
| 173 | unsigned long end, struct page *ref_page) |
| 174 | { |
| 175 | BUG(); |
| 176 | } |
| 177 | |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 178 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, |
| 179 | struct vm_area_struct *vma, unsigned long start, |
| 180 | unsigned long end, struct page *ref_page) |
| 181 | { |
| 182 | BUG(); |
| 183 | } |
| 184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | #endif /* !CONFIG_HUGETLB_PAGE */ |
Aneesh Kumar K.V | f30c59e | 2014-11-05 21:57:40 +0530 | [diff] [blame] | 186 | /* |
| 187 | * hugepages at page global directory. If arch support |
| 188 | * hugepages at pgd level, they need to define this. |
| 189 | */ |
| 190 | #ifndef pgd_huge |
| 191 | #define pgd_huge(x) 0 |
| 192 | #endif |
| 193 | |
| 194 | #ifndef pgd_write |
| 195 | static inline int pgd_write(pgd_t pgd) |
| 196 | { |
| 197 | BUG(); |
| 198 | return 0; |
| 199 | } |
| 200 | #endif |
| 201 | |
| 202 | #ifndef pud_write |
| 203 | static inline int pud_write(pud_t pud) |
| 204 | { |
| 205 | BUG(); |
| 206 | return 0; |
| 207 | } |
| 208 | #endif |
| 209 | |
| 210 | #ifndef is_hugepd |
| 211 | /* |
| 212 | * Some architectures requires a hugepage directory format that is |
| 213 | * required to support multiple hugepage sizes. For example |
| 214 | * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables" |
| 215 | * introduced the same on powerpc. This allows for a more flexible hugepage |
| 216 | * pagetable layout. |
| 217 | */ |
| 218 | typedef struct { unsigned long pd; } hugepd_t; |
| 219 | #define is_hugepd(hugepd) (0) |
| 220 | #define __hugepd(x) ((hugepd_t) { (x) }) |
| 221 | static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr, |
| 222 | unsigned pdshift, unsigned long end, |
| 223 | int write, struct page **pages, int *nr) |
| 224 | { |
| 225 | return 0; |
| 226 | } |
| 227 | #else |
| 228 | extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr, |
| 229 | unsigned pdshift, unsigned long end, |
| 230 | int write, struct page **pages, int *nr); |
| 231 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Eric B Munson | 4e52780 | 2009-09-21 17:03:47 -0700 | [diff] [blame] | 233 | #define HUGETLB_ANON_FILE "anon_hugepage" |
| 234 | |
Eric B Munson | 6bfde05 | 2009-09-21 17:03:43 -0700 | [diff] [blame] | 235 | enum { |
| 236 | /* |
| 237 | * The file will be used as an shm file so shmfs accounting rules |
| 238 | * apply |
| 239 | */ |
| 240 | HUGETLB_SHMFS_INODE = 1, |
Eric B Munson | 4e52780 | 2009-09-21 17:03:47 -0700 | [diff] [blame] | 241 | /* |
| 242 | * The file is being created on the internal vfs mount and shmfs |
| 243 | * accounting rules do not apply |
| 244 | */ |
| 245 | HUGETLB_ANONHUGE_INODE = 2, |
Eric B Munson | 6bfde05 | 2009-09-21 17:03:43 -0700 | [diff] [blame] | 246 | }; |
| 247 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | #ifdef CONFIG_HUGETLBFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | struct hugetlbfs_sb_info { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | long max_inodes; /* inodes allowed */ |
| 251 | long free_inodes; /* inodes free */ |
| 252 | spinlock_t stat_lock; |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 253 | struct hstate *hstate; |
David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 254 | struct hugepage_subpool *spool; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | }; |
| 256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
| 258 | { |
| 259 | return sb->s_fs_info; |
| 260 | } |
| 261 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 262 | extern const struct file_operations hugetlbfs_file_operations; |
Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 263 | extern const struct vm_operations_struct hugetlb_vm_ops; |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 264 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 265 | struct user_struct **user, int creat_flags, |
| 266 | int page_size_log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | |
| 268 | static inline int is_file_hugepages(struct file *file) |
| 269 | { |
Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 270 | if (file->f_op == &hugetlbfs_file_operations) |
| 271 | return 1; |
| 272 | if (is_file_shm_hugepages(file)) |
| 273 | return 1; |
| 274 | |
| 275 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | } |
| 277 | |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | #else /* !CONFIG_HUGETLBFS */ |
| 280 | |
Stefan Richter | 1db8508 | 2009-02-10 23:27:32 +0100 | [diff] [blame] | 281 | #define is_file_hugepages(file) 0 |
Steven Truelove | 40716e2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 282 | static inline struct file * |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 283 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
| 284 | struct user_struct **user, int creat_flags, |
Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 285 | int page_size_log) |
Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 286 | { |
| 287 | return ERR_PTR(-ENOSYS); |
| 288 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
| 290 | #endif /* !CONFIG_HUGETLBFS */ |
| 291 | |
Adrian Bunk | d2ba27e8 | 2007-05-06 14:49:00 -0700 | [diff] [blame] | 292 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
| 293 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
| 294 | unsigned long len, unsigned long pgoff, |
| 295 | unsigned long flags); |
| 296 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ |
| 297 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 298 | #ifdef CONFIG_HUGETLB_PAGE |
| 299 | |
Nishanth Aravamudan | a343787 | 2008-07-23 21:27:44 -0700 | [diff] [blame] | 300 | #define HSTATE_NAME_LEN 32 |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 301 | /* Defines one hugetlb page size */ |
| 302 | struct hstate { |
Lee Schermerhorn | e8c5c82 | 2009-09-21 17:01:22 -0700 | [diff] [blame] | 303 | int next_nid_to_alloc; |
| 304 | int next_nid_to_free; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 305 | unsigned int order; |
| 306 | unsigned long mask; |
| 307 | unsigned long max_huge_pages; |
| 308 | unsigned long nr_huge_pages; |
| 309 | unsigned long free_huge_pages; |
| 310 | unsigned long resv_huge_pages; |
| 311 | unsigned long surplus_huge_pages; |
| 312 | unsigned long nr_overcommit_huge_pages; |
Aneesh Kumar K.V | 0edaecf | 2012-07-31 16:42:07 -0700 | [diff] [blame] | 313 | struct list_head hugepage_activelist; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 314 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
| 315 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
| 316 | unsigned int free_huge_pages_node[MAX_NUMNODES]; |
| 317 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; |
Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 318 | #ifdef CONFIG_CGROUP_HUGETLB |
| 319 | /* cgroup control files */ |
| 320 | struct cftype cgroup_files[5]; |
| 321 | #endif |
Nishanth Aravamudan | a343787 | 2008-07-23 21:27:44 -0700 | [diff] [blame] | 322 | char name[HSTATE_NAME_LEN]; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 323 | }; |
| 324 | |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 325 | struct huge_bootmem_page { |
| 326 | struct list_head list; |
| 327 | struct hstate *hstate; |
Becky Bruce | ee8f248 | 2011-07-25 17:11:50 -0700 | [diff] [blame] | 328 | #ifdef CONFIG_HIGHMEM |
| 329 | phys_addr_t phys; |
| 330 | #endif |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 331 | }; |
| 332 | |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 333 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
| 334 | unsigned long addr, int avoid_reserve); |
Naoya Horiguchi | bf50bab | 2010-09-08 10:19:33 +0900 | [diff] [blame] | 335 | struct page *alloc_huge_page_node(struct hstate *h, int nid); |
Naoya Horiguchi | 74060e4 | 2013-09-11 14:22:06 -0700 | [diff] [blame] | 336 | struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, |
| 337 | unsigned long addr, int avoid_reserve); |
Mike Kravetz | ab76ad5 | 2015-09-08 15:01:50 -0700 | [diff] [blame] | 338 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
| 339 | pgoff_t idx); |
Naoya Horiguchi | bf50bab | 2010-09-08 10:19:33 +0900 | [diff] [blame] | 340 | |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 341 | /* arch callback */ |
| 342 | int __init alloc_bootmem_huge_page(struct hstate *h); |
| 343 | |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 344 | void __init hugetlb_add_hstate(unsigned order); |
| 345 | struct hstate *size_to_hstate(unsigned long size); |
| 346 | |
| 347 | #ifndef HUGE_MAX_HSTATE |
| 348 | #define HUGE_MAX_HSTATE 1 |
| 349 | #endif |
| 350 | |
| 351 | extern struct hstate hstates[HUGE_MAX_HSTATE]; |
| 352 | extern unsigned int default_hstate_idx; |
| 353 | |
| 354 | #define default_hstate (hstates[default_hstate_idx]) |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 355 | |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 356 | static inline struct hstate *hstate_inode(struct inode *i) |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 357 | { |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 358 | struct hugetlbfs_sb_info *hsb; |
| 359 | hsb = HUGETLBFS_SB(i->i_sb); |
| 360 | return hsb->hstate; |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 361 | } |
| 362 | |
| 363 | static inline struct hstate *hstate_file(struct file *f) |
| 364 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 365 | return hstate_inode(file_inode(f)); |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 366 | } |
| 367 | |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 368 | static inline struct hstate *hstate_sizelog(int page_size_log) |
| 369 | { |
| 370 | if (!page_size_log) |
| 371 | return &default_hstate; |
Sasha Levin | 97ad2be | 2014-12-10 15:44:13 -0800 | [diff] [blame] | 372 | |
| 373 | return size_to_hstate(1UL << page_size_log); |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 374 | } |
| 375 | |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 376 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 377 | { |
Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 378 | return hstate_file(vma->vm_file); |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | static inline unsigned long huge_page_size(struct hstate *h) |
| 382 | { |
| 383 | return (unsigned long)PAGE_SIZE << h->order; |
| 384 | } |
| 385 | |
Mel Gorman | 08fba69 | 2009-01-06 14:38:53 -0800 | [diff] [blame] | 386 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
| 387 | |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 388 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
| 389 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 390 | static inline unsigned long huge_page_mask(struct hstate *h) |
| 391 | { |
| 392 | return h->mask; |
| 393 | } |
| 394 | |
| 395 | static inline unsigned int huge_page_order(struct hstate *h) |
| 396 | { |
| 397 | return h->order; |
| 398 | } |
| 399 | |
| 400 | static inline unsigned huge_page_shift(struct hstate *h) |
| 401 | { |
| 402 | return h->order + PAGE_SHIFT; |
| 403 | } |
| 404 | |
Luiz Capitulino | bae7f4a | 2014-06-04 16:07:08 -0700 | [diff] [blame] | 405 | static inline bool hstate_is_gigantic(struct hstate *h) |
| 406 | { |
| 407 | return huge_page_order(h) >= MAX_ORDER; |
| 408 | } |
| 409 | |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 410 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
| 411 | { |
| 412 | return 1 << h->order; |
| 413 | } |
| 414 | |
| 415 | static inline unsigned int blocks_per_huge_page(struct hstate *h) |
| 416 | { |
| 417 | return huge_page_size(h) / 512; |
| 418 | } |
| 419 | |
| 420 | #include <asm/hugetlb.h> |
| 421 | |
Chris Metcalf | d9ed9fa | 2012-04-01 14:01:34 -0400 | [diff] [blame] | 422 | #ifndef arch_make_huge_pte |
| 423 | static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, |
| 424 | struct page *page, int writable) |
| 425 | { |
| 426 | return entry; |
| 427 | } |
| 428 | #endif |
| 429 | |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 430 | static inline struct hstate *page_hstate(struct page *page) |
| 431 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 432 | VM_BUG_ON_PAGE(!PageHuge(page), page); |
Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 433 | return size_to_hstate(PAGE_SIZE << compound_order(page)); |
| 434 | } |
| 435 | |
Andi Kleen | aa50d3a | 2010-10-06 21:45:00 +0200 | [diff] [blame] | 436 | static inline unsigned hstate_index_to_shift(unsigned index) |
| 437 | { |
| 438 | return hstates[index].order + PAGE_SHIFT; |
| 439 | } |
| 440 | |
Aneesh Kumar K.V | 972dc4d | 2012-07-31 16:42:00 -0700 | [diff] [blame] | 441 | static inline int hstate_index(struct hstate *h) |
| 442 | { |
| 443 | return h - hstates; |
| 444 | } |
| 445 | |
Zhang Yi | 13d60f4 | 2013-06-25 21:19:31 +0800 | [diff] [blame] | 446 | pgoff_t __basepage_index(struct page *page); |
| 447 | |
| 448 | /* Return page->index in PAGE_SIZE units */ |
| 449 | static inline pgoff_t basepage_index(struct page *page) |
| 450 | { |
| 451 | if (!PageCompound(page)) |
| 452 | return page->index; |
| 453 | |
| 454 | return __basepage_index(page); |
| 455 | } |
| 456 | |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 457 | extern void dissolve_free_huge_pages(unsigned long start_pfn, |
| 458 | unsigned long end_pfn); |
Naoya Horiguchi | 100873d | 2014-06-04 16:10:56 -0700 | [diff] [blame] | 459 | static inline int hugepage_migration_supported(struct hstate *h) |
Naoya Horiguchi | 83467ef | 2013-09-11 14:22:11 -0700 | [diff] [blame] | 460 | { |
Naoya Horiguchi | c177c81 | 2014-06-04 16:05:35 -0700 | [diff] [blame] | 461 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
| 462 | return huge_page_shift(h) == PMD_SHIFT; |
| 463 | #else |
| 464 | return 0; |
| 465 | #endif |
Naoya Horiguchi | 83467ef | 2013-09-11 14:22:11 -0700 | [diff] [blame] | 466 | } |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 467 | |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 468 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| 469 | struct mm_struct *mm, pte_t *pte) |
| 470 | { |
| 471 | if (huge_page_size(h) == PMD_SIZE) |
| 472 | return pmd_lockptr(mm, (pmd_t *) pte); |
| 473 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); |
| 474 | return &mm->page_table_lock; |
| 475 | } |
| 476 | |
Dominik Dingel | 2531c8c | 2015-07-17 16:23:37 -0700 | [diff] [blame] | 477 | #ifndef hugepages_supported |
| 478 | /* |
| 479 | * Some platform decide whether they support huge pages at boot |
| 480 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 |
| 481 | * when there is no such support |
| 482 | */ |
| 483 | #define hugepages_supported() (HPAGE_SHIFT != 0) |
| 484 | #endif |
Nishanth Aravamudan | 457c1b2 | 2014-05-06 12:50:00 -0700 | [diff] [blame] | 485 | |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 486 | #else /* CONFIG_HUGETLB_PAGE */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 487 | struct hstate {}; |
Mike Kravetz | 70c3547 | 2015-09-08 15:01:54 -0700 | [diff] [blame] | 488 | #define alloc_huge_page(v, a, r) NULL |
Naoya Horiguchi | bf50bab | 2010-09-08 10:19:33 +0900 | [diff] [blame] | 489 | #define alloc_huge_page_node(h, nid) NULL |
Naoya Horiguchi | 74060e4 | 2013-09-11 14:22:06 -0700 | [diff] [blame] | 490 | #define alloc_huge_page_noerr(v, a, r) NULL |
Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 491 | #define alloc_bootmem_huge_page(h) NULL |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 492 | #define hstate_file(f) NULL |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 493 | #define hstate_sizelog(s) NULL |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 494 | #define hstate_vma(v) NULL |
| 495 | #define hstate_inode(i) NULL |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 496 | #define page_hstate(page) NULL |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 497 | #define huge_page_size(h) PAGE_SIZE |
| 498 | #define huge_page_mask(h) PAGE_MASK |
Mel Gorman | 08fba69 | 2009-01-06 14:38:53 -0800 | [diff] [blame] | 499 | #define vma_kernel_pagesize(v) PAGE_SIZE |
Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 500 | #define vma_mmu_pagesize(v) PAGE_SIZE |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 501 | #define huge_page_order(h) 0 |
| 502 | #define huge_page_shift(h) PAGE_SHIFT |
Andrea Righi | 510a35d | 2008-07-26 15:22:27 -0700 | [diff] [blame] | 503 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
| 504 | { |
| 505 | return 1; |
| 506 | } |
Andi Kleen | aa50d3a | 2010-10-06 21:45:00 +0200 | [diff] [blame] | 507 | #define hstate_index_to_shift(index) 0 |
Aneesh Kumar K.V | 972dc4d | 2012-07-31 16:42:00 -0700 | [diff] [blame] | 508 | #define hstate_index(h) 0 |
Zhang Yi | 13d60f4 | 2013-06-25 21:19:31 +0800 | [diff] [blame] | 509 | |
| 510 | static inline pgoff_t basepage_index(struct page *page) |
| 511 | { |
| 512 | return page->index; |
| 513 | } |
Naoya Horiguchi | c8721bb | 2013-09-11 14:22:09 -0700 | [diff] [blame] | 514 | #define dissolve_free_huge_pages(s, e) do {} while (0) |
Naoya Horiguchi | 100873d | 2014-06-04 16:10:56 -0700 | [diff] [blame] | 515 | #define hugepage_migration_supported(h) 0 |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 516 | |
| 517 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| 518 | struct mm_struct *mm, pte_t *pte) |
| 519 | { |
| 520 | return &mm->page_table_lock; |
| 521 | } |
Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 522 | #endif /* CONFIG_HUGETLB_PAGE */ |
Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 523 | |
Kirill A. Shutemov | cb900f4 | 2013-11-14 14:31:02 -0800 | [diff] [blame] | 524 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
| 525 | struct mm_struct *mm, pte_t *pte) |
| 526 | { |
| 527 | spinlock_t *ptl; |
| 528 | |
| 529 | ptl = huge_pte_lockptr(h, mm, pte); |
| 530 | spin_lock(ptl); |
| 531 | return ptl; |
| 532 | } |
| 533 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | #endif /* _LINUX_HUGETLB_H */ |