blob: 5e35379f58a53d09cf50bd693d0e8639c7b79709 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07004#include <linux/mm_types.h>
Sasha Levin309381fea2014-01-23 15:52:54 -08005#include <linux/mmdebug.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04006#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09007#include <linux/hugetlb_inline.h>
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -07008#include <linux/cgroup.h>
Joonsoo Kim9119a412014-04-03 14:47:25 -07009#include <linux/list.h>
10#include <linux/kref.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +040011
Andrew Mortone9ea0e22009-09-24 14:47:45 -070012struct ctl_table;
13struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070014struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070015
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#ifdef CONFIG_HUGETLB_PAGE
17
18#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080019#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070020#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
David Gibson90481622012-03-21 16:34:12 -070022struct hugepage_subpool {
23 spinlock_t lock;
24 long count;
Mike Kravetzc6a91822015-04-15 16:13:36 -070025 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
26 long used_hpages; /* Used count against maximum, includes */
27 /* both alloced and reserved pages. */
28 struct hstate *hstate;
29 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
30 long rsv_hpages; /* Pages reserved against global pool to */
31 /* sasitfy minimum size. */
David Gibson90481622012-03-21 16:34:12 -070032};
33
Joonsoo Kim9119a412014-04-03 14:47:25 -070034struct resv_map {
35 struct kref refs;
Davidlohr Bueso7b24d862014-04-03 14:47:27 -070036 spinlock_t lock;
Joonsoo Kim9119a412014-04-03 14:47:25 -070037 struct list_head regions;
Mike Kravetz5e911372015-09-08 15:01:28 -070038 long adds_in_progress;
39 struct list_head region_cache;
40 long region_cache_count;
Joonsoo Kim9119a412014-04-03 14:47:25 -070041};
42extern struct resv_map *resv_map_alloc(void);
43void resv_map_release(struct kref *ref);
44
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070045extern spinlock_t hugetlb_lock;
46extern int hugetlb_max_hstate __read_mostly;
47#define for_each_hstate(h) \
48 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
49
Mike Kravetz7ca02d02015-04-15 16:13:42 -070050struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
51 long min_hpages);
David Gibson90481622012-03-21 16:34:12 -070052void hugepage_put_subpool(struct hugepage_subpool *spool);
53
Mel Gormana1e78772008-07-23 21:27:23 -070054void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070055int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
56int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -080058
59#ifdef CONFIG_NUMA
60int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
61 void __user *, size_t *, loff_t *);
62#endif
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Michel Lespinasse28a35712013-02-22 16:35:55 -080065long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
66 struct page **, struct vm_area_struct **,
67 unsigned long *, unsigned long *, long, unsigned int);
Mel Gorman04f2cbe2008-07-23 21:27:25 -070068void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070069 unsigned long, unsigned long, struct page *);
Mel Gormand8333522012-07-31 16:46:20 -070070void __unmap_hugepage_range_final(struct mmu_gather *tlb,
71 struct vm_area_struct *vma,
72 unsigned long start, unsigned long end,
73 struct page *ref_page);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070074void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
75 unsigned long start, unsigned long end,
76 struct page *ref_page);
Alexey Dobriyane1759c22008-10-15 23:50:22 +040077void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078int hugetlb_report_node_meminfo(int, char *);
David Rientjes949f7ec2013-04-29 15:07:48 -070079void hugetlb_show_meminfo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010081int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +010082 unsigned long address, unsigned int flags);
Mel Gormana1e78772008-07-23 21:27:23 -070083int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +000084 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +090085 vm_flags_t vm_flags);
Mike Kravetzb5cec282015-09-08 15:01:41 -070086long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
87 long freed);
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +090088int dequeue_hwpoisoned_huge_page(struct page *page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -070089bool isolate_huge_page(struct page *page, struct list_head *list);
90void putback_active_hugepage(struct page *page);
Atsushi Kumagai8f1d26d2014-07-30 16:08:39 -070091void free_huge_page(struct page *page);
Mike Kravetzb5cec282015-09-08 15:01:41 -070092void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
Mike Kravetzc672c7f2015-09-08 15:01:35 -070093extern struct mutex *hugetlb_fault_mutex_table;
94u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
95 struct vm_area_struct *vma,
96 struct address_space *mapping,
97 pgoff_t idx, unsigned long address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
Steve Capper3212b532013-04-23 12:35:02 +010099#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
100pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
101#endif
102
Andrey Ryabinin753162c2015-02-10 14:11:36 -0800103extern int hugepages_treat_as_movable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700105extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
David Gibson63551ae2005-06-21 17:14:44 -0700107/* arch callbacks */
108
Andi Kleena5516432008-07-23 21:27:41 -0700109pte_t *huge_pte_alloc(struct mm_struct *mm,
110 unsigned long addr, unsigned long sz);
David Gibson63551ae2005-06-21 17:14:44 -0700111pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -0800112int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson63551ae2005-06-21 17:14:44 -0700113struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
114 int write);
115struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800116 pmd_t *pmd, int flags);
Andi Kleenceb86872008-07-23 21:27:50 -0700117struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800118 pud_t *pud, int flags);
David Gibson63551ae2005-06-21 17:14:44 -0700119int pmd_huge(pmd_t pmd);
Andi Kleenceb86872008-07-23 21:27:50 -0700120int pud_huge(pud_t pmd);
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100121unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800122 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -0700123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#else /* !CONFIG_HUGETLB_PAGE */
125
Mel Gormana1e78772008-07-23 21:27:23 -0700126static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
127{
128}
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130static inline unsigned long hugetlb_total_pages(void)
131{
132 return 0;
133}
134
Adam Litke5b23dbe2007-11-14 16:59:33 -0800135#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
137#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400138static inline void hugetlb_report_meminfo(struct seq_file *m)
139{
140}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#define hugetlb_report_node_meminfo(n, buf) 0
David Rientjes949f7ec2013-04-29 15:07:48 -0700142static inline void hugetlb_show_meminfo(void)
143{
144}
Naoya Horiguchie66f17f2015-02-11 15:25:22 -0800145#define follow_huge_pmd(mm, addr, pmd, flags) NULL
146#define follow_huge_pud(mm, addr, pud, flags) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700147#define prepare_hugepage_range(file, addr, len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#define pmd_huge(x) 0
Andi Kleenceb86872008-07-23 21:27:50 -0700149#define pud_huge(x) 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800151#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickins788c7df2009-06-23 13:49:05 +0100152#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900153#define huge_pte_offset(mm, address) 0
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700154static inline int dequeue_hwpoisoned_huge_page(struct page *page)
155{
156 return 0;
157}
158
Naoya Horiguchif40386a2013-12-12 17:12:19 -0800159static inline bool isolate_huge_page(struct page *page, struct list_head *list)
160{
161 return false;
162}
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700163#define putback_active_hugepage(p) do {} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100165static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
166 unsigned long address, unsigned long end, pgprot_t newprot)
167{
168 return 0;
169}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800170
Mel Gormand8333522012-07-31 16:46:20 -0700171static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
172 struct vm_area_struct *vma, unsigned long start,
173 unsigned long end, struct page *ref_page)
174{
175 BUG();
176}
177
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700178static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
179 struct vm_area_struct *vma, unsigned long start,
180 unsigned long end, struct page *ref_page)
181{
182 BUG();
183}
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185#endif /* !CONFIG_HUGETLB_PAGE */
Aneesh Kumar K.Vf30c59e2014-11-05 21:57:40 +0530186/*
187 * hugepages at page global directory. If arch support
188 * hugepages at pgd level, they need to define this.
189 */
190#ifndef pgd_huge
191#define pgd_huge(x) 0
192#endif
193
194#ifndef pgd_write
195static inline int pgd_write(pgd_t pgd)
196{
197 BUG();
198 return 0;
199}
200#endif
201
202#ifndef pud_write
203static inline int pud_write(pud_t pud)
204{
205 BUG();
206 return 0;
207}
208#endif
209
210#ifndef is_hugepd
211/*
212 * Some architectures requires a hugepage directory format that is
213 * required to support multiple hugepage sizes. For example
214 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
215 * introduced the same on powerpc. This allows for a more flexible hugepage
216 * pagetable layout.
217 */
218typedef struct { unsigned long pd; } hugepd_t;
219#define is_hugepd(hugepd) (0)
220#define __hugepd(x) ((hugepd_t) { (x) })
221static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
222 unsigned pdshift, unsigned long end,
223 int write, struct page **pages, int *nr)
224{
225 return 0;
226}
227#else
228extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
229 unsigned pdshift, unsigned long end,
230 int write, struct page **pages, int *nr);
231#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Eric B Munson4e527802009-09-21 17:03:47 -0700233#define HUGETLB_ANON_FILE "anon_hugepage"
234
Eric B Munson6bfde052009-09-21 17:03:43 -0700235enum {
236 /*
237 * The file will be used as an shm file so shmfs accounting rules
238 * apply
239 */
240 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700241 /*
242 * The file is being created on the internal vfs mount and shmfs
243 * accounting rules do not apply
244 */
245 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700246};
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 long max_inodes; /* inodes allowed */
251 long free_inodes; /* inodes free */
252 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700253 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700254 struct hugepage_subpool *spool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255};
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
258{
259 return sb->s_fs_info;
260}
261
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800262extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400263extern const struct vm_operations_struct hugetlb_vm_ops;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700264struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
Andi Kleen42d73952012-12-11 16:01:34 -0800265 struct user_struct **user, int creat_flags,
266 int page_size_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268static inline int is_file_hugepages(struct file *file)
269{
Adam Litke516dffd2007-03-01 15:46:08 -0800270 if (file->f_op == &hugetlbfs_file_operations)
271 return 1;
272 if (is_file_shm_hugepages(file))
273 return 1;
274
275 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
Andi Kleen42d73952012-12-11 16:01:34 -0800278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279#else /* !CONFIG_HUGETLBFS */
280
Stefan Richter1db85082009-02-10 23:27:32 +0100281#define is_file_hugepages(file) 0
Steven Truelove40716e22012-03-21 16:34:14 -0700282static inline struct file *
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700283hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
284 struct user_struct **user, int creat_flags,
Andi Kleen42d73952012-12-11 16:01:34 -0800285 int page_size_log)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700286{
287 return ERR_PTR(-ENOSYS);
288}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
290#endif /* !CONFIG_HUGETLBFS */
291
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700292#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
293unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
294 unsigned long len, unsigned long pgoff,
295 unsigned long flags);
296#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
297
Andi Kleena5516432008-07-23 21:27:41 -0700298#ifdef CONFIG_HUGETLB_PAGE
299
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700300#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700301/* Defines one hugetlb page size */
302struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700303 int next_nid_to_alloc;
304 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700305 unsigned int order;
306 unsigned long mask;
307 unsigned long max_huge_pages;
308 unsigned long nr_huge_pages;
309 unsigned long free_huge_pages;
310 unsigned long resv_huge_pages;
311 unsigned long surplus_huge_pages;
312 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700313 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700314 struct list_head hugepage_freelists[MAX_NUMNODES];
315 unsigned int nr_huge_pages_node[MAX_NUMNODES];
316 unsigned int free_huge_pages_node[MAX_NUMNODES];
317 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700318#ifdef CONFIG_CGROUP_HUGETLB
319 /* cgroup control files */
320 struct cftype cgroup_files[5];
321#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700322 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700323};
324
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700325struct huge_bootmem_page {
326 struct list_head list;
327 struct hstate *hstate;
Becky Bruceee8f2482011-07-25 17:11:50 -0700328#ifdef CONFIG_HIGHMEM
329 phys_addr_t phys;
330#endif
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700331};
332
Mike Kravetz70c35472015-09-08 15:01:54 -0700333struct page *alloc_huge_page(struct vm_area_struct *vma,
334 unsigned long addr, int avoid_reserve);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900335struct page *alloc_huge_page_node(struct hstate *h, int nid);
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700336struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
337 unsigned long addr, int avoid_reserve);
Mike Kravetzab76ad52015-09-08 15:01:50 -0700338int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
339 pgoff_t idx);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900340
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700341/* arch callback */
342int __init alloc_bootmem_huge_page(struct hstate *h);
343
Andi Kleene5ff2152008-07-23 21:27:42 -0700344void __init hugetlb_add_hstate(unsigned order);
345struct hstate *size_to_hstate(unsigned long size);
346
347#ifndef HUGE_MAX_HSTATE
348#define HUGE_MAX_HSTATE 1
349#endif
350
351extern struct hstate hstates[HUGE_MAX_HSTATE];
352extern unsigned int default_hstate_idx;
353
354#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700355
Andi Kleena137e1c2008-07-23 21:27:43 -0700356static inline struct hstate *hstate_inode(struct inode *i)
Andi Kleena5516432008-07-23 21:27:41 -0700357{
Andi Kleena137e1c2008-07-23 21:27:43 -0700358 struct hugetlbfs_sb_info *hsb;
359 hsb = HUGETLBFS_SB(i->i_sb);
360 return hsb->hstate;
Andi Kleena5516432008-07-23 21:27:41 -0700361}
362
363static inline struct hstate *hstate_file(struct file *f)
364{
Al Viro496ad9a2013-01-23 17:07:38 -0500365 return hstate_inode(file_inode(f));
Andi Kleena5516432008-07-23 21:27:41 -0700366}
367
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700368static inline struct hstate *hstate_sizelog(int page_size_log)
369{
370 if (!page_size_log)
371 return &default_hstate;
Sasha Levin97ad2be2014-12-10 15:44:13 -0800372
373 return size_to_hstate(1UL << page_size_log);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700374}
375
Andi Kleena137e1c2008-07-23 21:27:43 -0700376static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700377{
Andi Kleena137e1c2008-07-23 21:27:43 -0700378 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700379}
380
381static inline unsigned long huge_page_size(struct hstate *h)
382{
383 return (unsigned long)PAGE_SIZE << h->order;
384}
385
Mel Gorman08fba692009-01-06 14:38:53 -0800386extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
387
Mel Gorman33402892009-01-06 14:38:54 -0800388extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
389
Andi Kleena5516432008-07-23 21:27:41 -0700390static inline unsigned long huge_page_mask(struct hstate *h)
391{
392 return h->mask;
393}
394
395static inline unsigned int huge_page_order(struct hstate *h)
396{
397 return h->order;
398}
399
400static inline unsigned huge_page_shift(struct hstate *h)
401{
402 return h->order + PAGE_SHIFT;
403}
404
Luiz Capitulinobae7f4a2014-06-04 16:07:08 -0700405static inline bool hstate_is_gigantic(struct hstate *h)
406{
407 return huge_page_order(h) >= MAX_ORDER;
408}
409
Andi Kleena5516432008-07-23 21:27:41 -0700410static inline unsigned int pages_per_huge_page(struct hstate *h)
411{
412 return 1 << h->order;
413}
414
415static inline unsigned int blocks_per_huge_page(struct hstate *h)
416{
417 return huge_page_size(h) / 512;
418}
419
420#include <asm/hugetlb.h>
421
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400422#ifndef arch_make_huge_pte
423static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
424 struct page *page, int writable)
425{
426 return entry;
427}
428#endif
429
Andi Kleene5ff2152008-07-23 21:27:42 -0700430static inline struct hstate *page_hstate(struct page *page)
431{
Sasha Levin309381fea2014-01-23 15:52:54 -0800432 VM_BUG_ON_PAGE(!PageHuge(page), page);
Andi Kleene5ff2152008-07-23 21:27:42 -0700433 return size_to_hstate(PAGE_SIZE << compound_order(page));
434}
435
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200436static inline unsigned hstate_index_to_shift(unsigned index)
437{
438 return hstates[index].order + PAGE_SHIFT;
439}
440
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700441static inline int hstate_index(struct hstate *h)
442{
443 return h - hstates;
444}
445
Zhang Yi13d60f42013-06-25 21:19:31 +0800446pgoff_t __basepage_index(struct page *page);
447
448/* Return page->index in PAGE_SIZE units */
449static inline pgoff_t basepage_index(struct page *page)
450{
451 if (!PageCompound(page))
452 return page->index;
453
454 return __basepage_index(page);
455}
456
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700457extern void dissolve_free_huge_pages(unsigned long start_pfn,
458 unsigned long end_pfn);
Naoya Horiguchi100873d2014-06-04 16:10:56 -0700459static inline int hugepage_migration_supported(struct hstate *h)
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700460{
Naoya Horiguchic177c812014-06-04 16:05:35 -0700461#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
462 return huge_page_shift(h) == PMD_SHIFT;
463#else
464 return 0;
465#endif
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700466}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700467
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800468static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
469 struct mm_struct *mm, pte_t *pte)
470{
471 if (huge_page_size(h) == PMD_SIZE)
472 return pmd_lockptr(mm, (pmd_t *) pte);
473 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
474 return &mm->page_table_lock;
475}
476
Dominik Dingel2531c8c2015-07-17 16:23:37 -0700477#ifndef hugepages_supported
478/*
479 * Some platform decide whether they support huge pages at boot
480 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
481 * when there is no such support
482 */
483#define hugepages_supported() (HPAGE_SHIFT != 0)
484#endif
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -0700485
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700486#else /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700487struct hstate {};
Mike Kravetz70c35472015-09-08 15:01:54 -0700488#define alloc_huge_page(v, a, r) NULL
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900489#define alloc_huge_page_node(h, nid) NULL
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700490#define alloc_huge_page_noerr(v, a, r) NULL
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700491#define alloc_bootmem_huge_page(h) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700492#define hstate_file(f) NULL
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700493#define hstate_sizelog(s) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700494#define hstate_vma(v) NULL
495#define hstate_inode(i) NULL
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800496#define page_hstate(page) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700497#define huge_page_size(h) PAGE_SIZE
498#define huge_page_mask(h) PAGE_MASK
Mel Gorman08fba692009-01-06 14:38:53 -0800499#define vma_kernel_pagesize(v) PAGE_SIZE
Mel Gorman33402892009-01-06 14:38:54 -0800500#define vma_mmu_pagesize(v) PAGE_SIZE
Andi Kleena5516432008-07-23 21:27:41 -0700501#define huge_page_order(h) 0
502#define huge_page_shift(h) PAGE_SHIFT
Andrea Righi510a35d2008-07-26 15:22:27 -0700503static inline unsigned int pages_per_huge_page(struct hstate *h)
504{
505 return 1;
506}
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200507#define hstate_index_to_shift(index) 0
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700508#define hstate_index(h) 0
Zhang Yi13d60f42013-06-25 21:19:31 +0800509
510static inline pgoff_t basepage_index(struct page *page)
511{
512 return page->index;
513}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700514#define dissolve_free_huge_pages(s, e) do {} while (0)
Naoya Horiguchi100873d2014-06-04 16:10:56 -0700515#define hugepage_migration_supported(h) 0
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800516
517static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
518 struct mm_struct *mm, pte_t *pte)
519{
520 return &mm->page_table_lock;
521}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700522#endif /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700523
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800524static inline spinlock_t *huge_pte_lock(struct hstate *h,
525 struct mm_struct *mm, pte_t *pte)
526{
527 spinlock_t *ptl;
528
529 ptl = huge_pte_lockptr(h, mm, pte);
530 spin_lock(ptl);
531 return ptl;
532}
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534#endif /* _LINUX_HUGETLB_H */