blob: 6b8a7b654771aa9fc87e8e347f13300b69f2df8b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07004#include <linux/mm_types.h>
Sasha Levin309381fea2014-01-23 15:52:54 -08005#include <linux/mmdebug.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04006#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09007#include <linux/hugetlb_inline.h>
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -07008#include <linux/cgroup.h>
Joonsoo Kim9119a412014-04-03 14:47:25 -07009#include <linux/list.h>
10#include <linux/kref.h>
Dan Williams888cdbc2016-01-15 16:56:32 -080011#include <asm/pgtable.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040012
Andrew Mortone9ea0e22009-09-24 14:47:45 -070013struct ctl_table;
14struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070015struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070016
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#ifdef CONFIG_HUGETLB_PAGE
18
19#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080020#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070021#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
David Gibson90481622012-03-21 16:34:12 -070023struct hugepage_subpool {
24 spinlock_t lock;
25 long count;
Mike Kravetzc6a91822015-04-15 16:13:36 -070026 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
27 long used_hpages; /* Used count against maximum, includes */
28 /* both alloced and reserved pages. */
29 struct hstate *hstate;
30 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
31 long rsv_hpages; /* Pages reserved against global pool to */
32 /* sasitfy minimum size. */
David Gibson90481622012-03-21 16:34:12 -070033};
34
Joonsoo Kim9119a412014-04-03 14:47:25 -070035struct resv_map {
36 struct kref refs;
Davidlohr Bueso7b24d862014-04-03 14:47:27 -070037 spinlock_t lock;
Joonsoo Kim9119a412014-04-03 14:47:25 -070038 struct list_head regions;
Mike Kravetz5e911372015-09-08 15:01:28 -070039 long adds_in_progress;
40 struct list_head region_cache;
41 long region_cache_count;
Joonsoo Kim9119a412014-04-03 14:47:25 -070042};
43extern struct resv_map *resv_map_alloc(void);
44void resv_map_release(struct kref *ref);
45
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070046extern spinlock_t hugetlb_lock;
47extern int hugetlb_max_hstate __read_mostly;
48#define for_each_hstate(h) \
49 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
50
Mike Kravetz7ca02d02015-04-15 16:13:42 -070051struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
52 long min_hpages);
David Gibson90481622012-03-21 16:34:12 -070053void hugepage_put_subpool(struct hugepage_subpool *spool);
54
Mel Gormana1e78772008-07-23 21:27:23 -070055void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070056int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
58int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -080059
60#ifdef CONFIG_NUMA
61int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
62 void __user *, size_t *, loff_t *);
63#endif
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Michel Lespinasse28a35712013-02-22 16:35:55 -080066long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
67 struct page **, struct vm_area_struct **,
68 unsigned long *, unsigned long *, long, unsigned int);
Mel Gorman04f2cbe2008-07-23 21:27:25 -070069void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070070 unsigned long, unsigned long, struct page *);
Mel Gormand8333522012-07-31 16:46:20 -070071void __unmap_hugepage_range_final(struct mmu_gather *tlb,
72 struct vm_area_struct *vma,
73 unsigned long start, unsigned long end,
74 struct page *ref_page);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070075void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
76 unsigned long start, unsigned long end,
77 struct page *ref_page);
Alexey Dobriyane1759c22008-10-15 23:50:22 +040078void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079int hugetlb_report_node_meminfo(int, char *);
David Rientjes949f7ec2013-04-29 15:07:48 -070080void hugetlb_show_meminfo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010082int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +010083 unsigned long address, unsigned int flags);
Mel Gormana1e78772008-07-23 21:27:23 -070084int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +000085 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +090086 vm_flags_t vm_flags);
Mike Kravetzb5cec282015-09-08 15:01:41 -070087long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
88 long freed);
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +090089int dequeue_hwpoisoned_huge_page(struct page *page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -070090bool isolate_huge_page(struct page *page, struct list_head *list);
91void putback_active_hugepage(struct page *page);
Atsushi Kumagai8f1d26d2014-07-30 16:08:39 -070092void free_huge_page(struct page *page);
zhong jiang72e29362016-10-07 17:02:01 -070093void hugetlb_fix_reserve_counts(struct inode *inode);
Mike Kravetzc672c7f2015-09-08 15:01:35 -070094extern struct mutex *hugetlb_fault_mutex_table;
Mike Kravetzf0539c72019-05-13 17:19:41 -070095u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
Mike Kravetzc672c7f2015-09-08 15:01:35 -070096 pgoff_t idx, unsigned long address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Steve Capper3212b532013-04-23 12:35:02 +010098pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
Steve Capper3212b532013-04-23 12:35:02 +010099
Andrey Ryabinin753162c2015-02-10 14:11:36 -0800100extern int hugepages_treat_as_movable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700102extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
David Gibson63551ae2005-06-21 17:14:44 -0700104/* arch callbacks */
105
Andi Kleena5516432008-07-23 21:27:41 -0700106pte_t *huge_pte_alloc(struct mm_struct *mm,
107 unsigned long addr, unsigned long sz);
David Gibson63551ae2005-06-21 17:14:44 -0700108pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800109int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
Mike Kravetz9c34ad02018-10-05 15:51:29 -0700110void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
111 unsigned long *start, unsigned long *end);
David Gibson63551ae2005-06-21 17:14:44 -0700112struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
113 int write);
114struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800115 pmd_t *pmd, int flags);
Andi Kleenceb86872008-07-23 21:27:50 -0700116struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800117 pud_t *pud, int flags);
David Gibson63551ae2005-06-21 17:14:44 -0700118int pmd_huge(pmd_t pmd);
Andi Kleenceb86872008-07-23 21:27:50 -0700119int pud_huge(pud_t pmd);
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100120unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800121 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -0700122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#else /* !CONFIG_HUGETLB_PAGE */
124
Mel Gormana1e78772008-07-23 21:27:23 -0700125static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
126{
127}
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129static inline unsigned long hugetlb_total_pages(void)
130{
131 return 0;
132}
133
Mike Kravetz9c34ad02018-10-05 15:51:29 -0700134static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
135 pte_t *ptep)
136{
137 return 0;
138}
139
140static inline void adjust_range_if_pmd_sharing_possible(
141 struct vm_area_struct *vma,
142 unsigned long *start, unsigned long *end)
143{
144}
145
Adam Litke5b23dbe2007-11-14 16:59:33 -0800146#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
148#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400149static inline void hugetlb_report_meminfo(struct seq_file *m)
150{
151}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152#define hugetlb_report_node_meminfo(n, buf) 0
David Rientjes949f7ec2013-04-29 15:07:48 -0700153static inline void hugetlb_show_meminfo(void)
154{
155}
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800156#define follow_huge_pmd(mm, addr, pmd, flags) NULL
157#define follow_huge_pud(mm, addr, pud, flags) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700158#define prepare_hugepage_range(file, addr, len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159#define pmd_huge(x) 0
Andi Kleenceb86872008-07-23 21:27:50 -0700160#define pud_huge(x) 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800162#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickins788c7df2009-06-23 13:49:05 +0100163#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900164#define huge_pte_offset(mm, address) 0
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700165static inline int dequeue_hwpoisoned_huge_page(struct page *page)
166{
167 return 0;
168}
169
Naoya Horiguchif40386a2013-12-12 17:12:19 -0800170static inline bool isolate_huge_page(struct page *page, struct list_head *list)
171{
172 return false;
173}
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700174#define putback_active_hugepage(p) do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100176static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
177 unsigned long address, unsigned long end, pgprot_t newprot)
178{
179 return 0;
180}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800181
Mel Gormand8333522012-07-31 16:46:20 -0700182static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
183 struct vm_area_struct *vma, unsigned long start,
184 unsigned long end, struct page *ref_page)
185{
186 BUG();
187}
188
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700189static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
190 struct vm_area_struct *vma, unsigned long start,
191 unsigned long end, struct page *ref_page)
192{
193 BUG();
194}
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196#endif /* !CONFIG_HUGETLB_PAGE */
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530197/*
198 * hugepages at page global directory. If arch support
199 * hugepages at pgd level, they need to define this.
200 */
201#ifndef pgd_huge
202#define pgd_huge(x) 0
203#endif
204
205#ifndef pgd_write
206static inline int pgd_write(pgd_t pgd)
207{
208 BUG();
209 return 0;
210}
211#endif
212
213#ifndef pud_write
214static inline int pud_write(pud_t pud)
215{
216 BUG();
217 return 0;
218}
219#endif
220
221#ifndef is_hugepd
222/*
223 * Some architectures requires a hugepage directory format that is
224 * required to support multiple hugepage sizes. For example
225 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
226 * introduced the same on powerpc. This allows for a more flexible hugepage
227 * pagetable layout.
228 */
229typedef struct { unsigned long pd; } hugepd_t;
230#define is_hugepd(hugepd) (0)
231#define __hugepd(x) ((hugepd_t) { (x) })
232static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
233 unsigned pdshift, unsigned long end,
234 int write, struct page **pages, int *nr)
235{
236 return 0;
237}
238#else
239extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
240 unsigned pdshift, unsigned long end,
241 int write, struct page **pages, int *nr);
242#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Eric B Munson4e527802009-09-21 17:03:47 -0700244#define HUGETLB_ANON_FILE "anon_hugepage"
245
Eric B Munson6bfde052009-09-21 17:03:43 -0700246enum {
247 /*
248 * The file will be used as an shm file so shmfs accounting rules
249 * apply
250 */
251 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700252 /*
253 * The file is being created on the internal vfs mount and shmfs
254 * accounting rules do not apply
255 */
256 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700257};
258
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 long max_inodes; /* inodes allowed */
262 long free_inodes; /* inodes free */
263 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700264 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700265 struct hugepage_subpool *spool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266};
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
269{
270 return sb->s_fs_info;
271}
272
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800273extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400274extern const struct vm_operations_struct hugetlb_vm_ops;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700275struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
Andi Kleen42d73952012-12-11 16:01:34 -0800276 struct user_struct **user, int creat_flags,
277 int page_size_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Yaowei Bai719ff322016-01-14 15:18:51 -0800279static inline bool is_file_hugepages(struct file *file)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Adam Litke516dffd2007-03-01 15:46:08 -0800281 if (file->f_op == &hugetlbfs_file_operations)
Yaowei Bai719ff322016-01-14 15:18:51 -0800282 return true;
Adam Litke516dffd2007-03-01 15:46:08 -0800283
Yaowei Bai719ff322016-01-14 15:18:51 -0800284 return is_file_shm_hugepages(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Andi Kleen42d73952012-12-11 16:01:34 -0800287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#else /* !CONFIG_HUGETLBFS */
289
Yaowei Bai719ff322016-01-14 15:18:51 -0800290#define is_file_hugepages(file) false
Steven Truelove40716e22012-03-21 16:34:14 -0700291static inline struct file *
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700292hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
293 struct user_struct **user, int creat_flags,
Andi Kleen42d73952012-12-11 16:01:34 -0800294 int page_size_log)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700295{
296 return ERR_PTR(-ENOSYS);
297}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298
299#endif /* !CONFIG_HUGETLBFS */
300
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700301#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
302unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
303 unsigned long len, unsigned long pgoff,
304 unsigned long flags);
305#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
306
Andi Kleena5516432008-07-23 21:27:41 -0700307#ifdef CONFIG_HUGETLB_PAGE
308
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700309#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700310/* Defines one hugetlb page size */
311struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700312 int next_nid_to_alloc;
313 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700314 unsigned int order;
315 unsigned long mask;
316 unsigned long max_huge_pages;
317 unsigned long nr_huge_pages;
318 unsigned long free_huge_pages;
319 unsigned long resv_huge_pages;
320 unsigned long surplus_huge_pages;
321 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700322 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700323 struct list_head hugepage_freelists[MAX_NUMNODES];
324 unsigned int nr_huge_pages_node[MAX_NUMNODES];
325 unsigned int free_huge_pages_node[MAX_NUMNODES];
326 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700327#ifdef CONFIG_CGROUP_HUGETLB
328 /* cgroup control files */
329 struct cftype cgroup_files[5];
330#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700331 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700332};
333
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700334struct huge_bootmem_page {
335 struct list_head list;
336 struct hstate *hstate;
Becky Bruceee8f2482011-07-25 17:11:50 -0700337#ifdef CONFIG_HIGHMEM
338 phys_addr_t phys;
339#endif
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700340};
341
Mike Kravetz70c35472015-09-08 15:01:54 -0700342struct page *alloc_huge_page(struct vm_area_struct *vma,
343 unsigned long addr, int avoid_reserve);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900344struct page *alloc_huge_page_node(struct hstate *h, int nid);
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700345struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
346 unsigned long addr, int avoid_reserve);
Mike Kravetzab76ad52015-09-08 15:01:50 -0700347int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
348 pgoff_t idx);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900349
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700350/* arch callback */
351int __init alloc_bootmem_huge_page(struct hstate *h);
352
Vaishali Thakkar9fee0212016-05-19 17:11:04 -0700353void __init hugetlb_bad_size(void);
Andi Kleene5ff2152008-07-23 21:27:42 -0700354void __init hugetlb_add_hstate(unsigned order);
355struct hstate *size_to_hstate(unsigned long size);
356
357#ifndef HUGE_MAX_HSTATE
358#define HUGE_MAX_HSTATE 1
359#endif
360
361extern struct hstate hstates[HUGE_MAX_HSTATE];
362extern unsigned int default_hstate_idx;
363
364#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700365
Andi Kleena137e1c2008-07-23 21:27:43 -0700366static inline struct hstate *hstate_inode(struct inode *i)
Andi Kleena5516432008-07-23 21:27:41 -0700367{
Chen Gang7fab3582016-05-20 16:57:59 -0700368 return HUGETLBFS_SB(i->i_sb)->hstate;
Andi Kleena5516432008-07-23 21:27:41 -0700369}
370
371static inline struct hstate *hstate_file(struct file *f)
372{
Al Viro496ad9a2013-01-23 17:07:38 -0500373 return hstate_inode(file_inode(f));
Andi Kleena5516432008-07-23 21:27:41 -0700374}
375
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700376static inline struct hstate *hstate_sizelog(int page_size_log)
377{
378 if (!page_size_log)
379 return &default_hstate;
Sasha Levin97ad2be2014-12-10 15:44:13 -0800380
381 return size_to_hstate(1UL << page_size_log);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700382}
383
Andi Kleena137e1c2008-07-23 21:27:43 -0700384static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700385{
Andi Kleena137e1c2008-07-23 21:27:43 -0700386 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700387}
388
389static inline unsigned long huge_page_size(struct hstate *h)
390{
391 return (unsigned long)PAGE_SIZE << h->order;
392}
393
Mel Gorman08fba692009-01-06 14:38:53 -0800394extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
395
Mel Gorman33402892009-01-06 14:38:54 -0800396extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
397
Andi Kleena5516432008-07-23 21:27:41 -0700398static inline unsigned long huge_page_mask(struct hstate *h)
399{
400 return h->mask;
401}
402
403static inline unsigned int huge_page_order(struct hstate *h)
404{
405 return h->order;
406}
407
408static inline unsigned huge_page_shift(struct hstate *h)
409{
410 return h->order + PAGE_SHIFT;
411}
412
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -0700413static inline bool hstate_is_gigantic(struct hstate *h)
414{
415 return huge_page_order(h) >= MAX_ORDER;
416}
417
Andi Kleena5516432008-07-23 21:27:41 -0700418static inline unsigned int pages_per_huge_page(struct hstate *h)
419{
420 return 1 << h->order;
421}
422
423static inline unsigned int blocks_per_huge_page(struct hstate *h)
424{
425 return huge_page_size(h) / 512;
426}
427
428#include <asm/hugetlb.h>
429
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400430#ifndef arch_make_huge_pte
431static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
432 struct page *page, int writable)
433{
434 return entry;
435}
436#endif
437
Andi Kleene5ff2152008-07-23 21:27:42 -0700438static inline struct hstate *page_hstate(struct page *page)
439{
Sasha Levin309381fea2014-01-23 15:52:54 -0800440 VM_BUG_ON_PAGE(!PageHuge(page), page);
Andi Kleene5ff2152008-07-23 21:27:42 -0700441 return size_to_hstate(PAGE_SIZE << compound_order(page));
442}
443
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200444static inline unsigned hstate_index_to_shift(unsigned index)
445{
446 return hstates[index].order + PAGE_SHIFT;
447}
448
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700449static inline int hstate_index(struct hstate *h)
450{
451 return h - hstates;
452}
453
Zhang Yi13d60f42013-06-25 21:19:31 +0800454pgoff_t __basepage_index(struct page *page);
455
456/* Return page->index in PAGE_SIZE units */
457static inline pgoff_t basepage_index(struct page *page)
458{
459 if (!PageCompound(page))
460 return page->index;
461
462 return __basepage_index(page);
463}
464
Gerald Schaefer082d5b62016-10-07 17:01:10 -0700465extern int dissolve_free_huge_pages(unsigned long start_pfn,
466 unsigned long end_pfn);
Chen Gangd70c17d2016-05-20 16:58:01 -0700467static inline bool hugepage_migration_supported(struct hstate *h)
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700468{
Naoya Horiguchic177c812014-06-04 16:05:35 -0700469#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
470 return huge_page_shift(h) == PMD_SHIFT;
471#else
Chen Gangd70c17d2016-05-20 16:58:01 -0700472 return false;
Naoya Horiguchic177c812014-06-04 16:05:35 -0700473#endif
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700474}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700475
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800476static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
477 struct mm_struct *mm, pte_t *pte)
478{
479 if (huge_page_size(h) == PMD_SIZE)
480 return pmd_lockptr(mm, (pmd_t *) pte);
481 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
482 return &mm->page_table_lock;
483}
484
Dominik Dingel2531c8c2015-07-17 16:23:37 -0700485#ifndef hugepages_supported
486/*
487 * Some platform decide whether they support huge pages at boot
488 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
489 * when there is no such support
490 */
491#define hugepages_supported() (HPAGE_SHIFT != 0)
492#endif
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -0700493
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800494void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
495
496static inline void hugetlb_count_add(long l, struct mm_struct *mm)
497{
498 atomic_long_add(l, &mm->hugetlb_usage);
499}
500
501static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
502{
503 atomic_long_sub(l, &mm->hugetlb_usage);
504}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700505#else /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700506struct hstate {};
Mike Kravetz70c35472015-09-08 15:01:54 -0700507#define alloc_huge_page(v, a, r) NULL
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900508#define alloc_huge_page_node(h, nid) NULL
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700509#define alloc_huge_page_noerr(v, a, r) NULL
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700510#define alloc_bootmem_huge_page(h) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700511#define hstate_file(f) NULL
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700512#define hstate_sizelog(s) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700513#define hstate_vma(v) NULL
514#define hstate_inode(i) NULL
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800515#define page_hstate(page) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700516#define huge_page_size(h) PAGE_SIZE
517#define huge_page_mask(h) PAGE_MASK
Mel Gorman08fba692009-01-06 14:38:53 -0800518#define vma_kernel_pagesize(v) PAGE_SIZE
Mel Gorman33402892009-01-06 14:38:54 -0800519#define vma_mmu_pagesize(v) PAGE_SIZE
Andi Kleena5516432008-07-23 21:27:41 -0700520#define huge_page_order(h) 0
521#define huge_page_shift(h) PAGE_SHIFT
Andrea Righi510a35d2008-07-26 15:22:27 -0700522static inline unsigned int pages_per_huge_page(struct hstate *h)
523{
524 return 1;
525}
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200526#define hstate_index_to_shift(index) 0
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700527#define hstate_index(h) 0
Zhang Yi13d60f42013-06-25 21:19:31 +0800528
529static inline pgoff_t basepage_index(struct page *page)
530{
531 return page->index;
532}
Gerald Schaefer082d5b62016-10-07 17:01:10 -0700533#define dissolve_free_huge_pages(s, e) 0
Chen Gangd70c17d2016-05-20 16:58:01 -0700534#define hugepage_migration_supported(h) false
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800535
536static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
537 struct mm_struct *mm, pte_t *pte)
538{
539 return &mm->page_table_lock;
540}
Naoya Horiguchi5d317b22015-11-05 18:47:14 -0800541
542static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
543{
544}
545
546static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
547{
548}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700549#endif /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700550
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800551static inline spinlock_t *huge_pte_lock(struct hstate *h,
552 struct mm_struct *mm, pte_t *pte)
553{
554 spinlock_t *ptl;
555
556 ptl = huge_pte_lockptr(h, mm, pte);
557 spin_lock(ptl);
558 return ptl;
559}
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561#endif /* _LINUX_HUGETLB_H */