blob: 8c43cc469d78259b6028dfb30be8899b9a95e3d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07004#include <linux/mm_types.h>
Sasha Levin309381fea2014-01-23 15:52:54 -08005#include <linux/mmdebug.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04006#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09007#include <linux/hugetlb_inline.h>
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -07008#include <linux/cgroup.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04009
Andrew Mortone9ea0e22009-09-24 14:47:45 -070010struct ctl_table;
11struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070012struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#ifdef CONFIG_HUGETLB_PAGE
15
16#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080017#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070018#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
David Gibson90481622012-03-21 16:34:12 -070020struct hugepage_subpool {
21 spinlock_t lock;
22 long count;
23 long max_hpages, used_hpages;
24};
25
Aneesh Kumar K.Vc3f38a32012-07-31 16:42:10 -070026extern spinlock_t hugetlb_lock;
27extern int hugetlb_max_hstate __read_mostly;
28#define for_each_hstate(h) \
29 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
30
David Gibson90481622012-03-21 16:34:12 -070031struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
32void hugepage_put_subpool(struct hugepage_subpool *spool);
33
Wu Fengguang20a03072009-06-16 15:32:22 -070034int PageHuge(struct page *page);
35
Mel Gormana1e78772008-07-23 21:27:23 -070036void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070037int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
38int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
39int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -080040
41#ifdef CONFIG_NUMA
42int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
43 void __user *, size_t *, loff_t *);
44#endif
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Michel Lespinasse28a35712013-02-22 16:35:55 -080047long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
48 struct page **, struct vm_area_struct **,
49 unsigned long *, unsigned long *, long, unsigned int);
Mel Gorman04f2cbe2008-07-23 21:27:25 -070050void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070051 unsigned long, unsigned long, struct page *);
Mel Gormand8333522012-07-31 16:46:20 -070052void __unmap_hugepage_range_final(struct mmu_gather *tlb,
53 struct vm_area_struct *vma,
54 unsigned long start, unsigned long end,
55 struct page *ref_page);
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070056void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
57 unsigned long start, unsigned long end,
58 struct page *ref_page);
Alexey Dobriyane1759c22008-10-15 23:50:22 +040059void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060int hugetlb_report_node_meminfo(int, char *);
David Rientjes949f7ec2013-04-29 15:07:48 -070061void hugetlb_show_meminfo(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010063int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +010064 unsigned long address, unsigned int flags);
Mel Gormana1e78772008-07-23 21:27:23 -070065int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +000066 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +090067 vm_flags_t vm_flags);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070068void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +090069int dequeue_hwpoisoned_huge_page(struct page *page);
Naoya Horiguchi31caf662013-09-11 14:21:59 -070070bool isolate_huge_page(struct page *page, struct list_head *list);
71void putback_active_hugepage(struct page *page);
Naoya Horiguchic8721bb2013-09-11 14:22:09 -070072bool is_hugepage_active(struct page *page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Steve Capper3212b532013-04-23 12:35:02 +010074#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
76#endif
77
Mel Gorman396faf02007-07-17 04:03:13 -070078extern unsigned long hugepages_treat_as_movable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070079extern const unsigned long hugetlb_zero, hugetlb_infinity;
80extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -070081extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
David Gibson63551ae2005-06-21 17:14:44 -070083/* arch callbacks */
84
Andi Kleena5516432008-07-23 21:27:41 -070085pte_t *huge_pte_alloc(struct mm_struct *mm,
86 unsigned long addr, unsigned long sz);
David Gibson63551ae2005-06-21 17:14:44 -070087pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -080088int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson63551ae2005-06-21 17:14:44 -070089struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
90 int write);
91struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
92 pmd_t *pmd, int write);
Andi Kleenceb86872008-07-23 21:27:50 -070093struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
94 pud_t *pud, int write);
David Gibson63551ae2005-06-21 17:14:44 -070095int pmd_huge(pmd_t pmd);
Andi Kleenceb86872008-07-23 21:27:50 -070096int pud_huge(pud_t pmd);
Peter Zijlstra7da4d642012-11-19 03:14:23 +010097unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
Zhang, Yanmin8f860592006-03-22 00:08:50 -080098 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -070099
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#else /* !CONFIG_HUGETLB_PAGE */
101
Wu Fengguang20a03072009-06-16 15:32:22 -0700102static inline int PageHuge(struct page *page)
103{
104 return 0;
105}
106
Mel Gormana1e78772008-07-23 21:27:23 -0700107static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
108{
109}
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111static inline unsigned long hugetlb_total_pages(void)
112{
113 return 0;
114}
115
Adam Litke5b23dbe2007-11-14 16:59:33 -0800116#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
118#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400119static inline void hugetlb_report_meminfo(struct seq_file *m)
120{
121}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#define hugetlb_report_node_meminfo(n, buf) 0
David Rientjes949f7ec2013-04-29 15:07:48 -0700123static inline void hugetlb_show_meminfo(void)
124{
125}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#define follow_huge_pmd(mm, addr, pmd, write) NULL
Andi Kleenceb86872008-07-23 21:27:50 -0700127#define follow_huge_pud(mm, addr, pud, write) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700128#define prepare_hugepage_range(file, addr, len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#define pmd_huge(x) 0
Andi Kleenceb86872008-07-23 21:27:50 -0700130#define pud_huge(x) 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800132#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickins788c7df2009-06-23 13:49:05 +0100133#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900134#define huge_pte_offset(mm, address) 0
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700135static inline int dequeue_hwpoisoned_huge_page(struct page *page)
136{
137 return 0;
138}
139
Naoya Horiguchif40386a2013-12-12 17:12:19 -0800140static inline bool isolate_huge_page(struct page *page, struct list_head *list)
141{
142 return false;
143}
Naoya Horiguchi31caf662013-09-11 14:21:59 -0700144#define putback_active_hugepage(p) do {} while (0)
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700145#define is_hugepage_active(x) false
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Peter Zijlstra7da4d642012-11-19 03:14:23 +0100147static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
148 unsigned long address, unsigned long end, pgprot_t newprot)
149{
150 return 0;
151}
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800152
Mel Gormand8333522012-07-31 16:46:20 -0700153static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
154 struct vm_area_struct *vma, unsigned long start,
155 unsigned long end, struct page *ref_page)
156{
157 BUG();
158}
159
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700160static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
161 struct vm_area_struct *vma, unsigned long start,
162 unsigned long end, struct page *ref_page)
163{
164 BUG();
165}
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167#endif /* !CONFIG_HUGETLB_PAGE */
168
Eric B Munson4e527802009-09-21 17:03:47 -0700169#define HUGETLB_ANON_FILE "anon_hugepage"
170
Eric B Munson6bfde052009-09-21 17:03:43 -0700171enum {
172 /*
173 * The file will be used as an shm file so shmfs accounting rules
174 * apply
175 */
176 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700177 /*
178 * The file is being created on the internal vfs mount and shmfs
179 * accounting rules do not apply
180 */
181 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700182};
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 long max_inodes; /* inodes allowed */
187 long free_inodes; /* inodes free */
188 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700189 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700190 struct hugepage_subpool *spool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191};
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
194{
195 return sb->s_fs_info;
196}
197
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800198extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400199extern const struct vm_operations_struct hugetlb_vm_ops;
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700200struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
Andi Kleen42d73952012-12-11 16:01:34 -0800201 struct user_struct **user, int creat_flags,
202 int page_size_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
204static inline int is_file_hugepages(struct file *file)
205{
Adam Litke516dffd2007-03-01 15:46:08 -0800206 if (file->f_op == &hugetlbfs_file_operations)
207 return 1;
208 if (is_file_shm_hugepages(file))
209 return 1;
210
211 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Andi Kleen42d73952012-12-11 16:01:34 -0800214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215#else /* !CONFIG_HUGETLBFS */
216
Stefan Richter1db85082009-02-10 23:27:32 +0100217#define is_file_hugepages(file) 0
Steven Truelove40716e22012-03-21 16:34:14 -0700218static inline struct file *
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700219hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
220 struct user_struct **user, int creat_flags,
Andi Kleen42d73952012-12-11 16:01:34 -0800221 int page_size_log)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700222{
223 return ERR_PTR(-ENOSYS);
224}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226#endif /* !CONFIG_HUGETLBFS */
227
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700228#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
229unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
230 unsigned long len, unsigned long pgoff,
231 unsigned long flags);
232#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
233
Andi Kleena5516432008-07-23 21:27:41 -0700234#ifdef CONFIG_HUGETLB_PAGE
235
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700236#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700237/* Defines one hugetlb page size */
238struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700239 int next_nid_to_alloc;
240 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700241 unsigned int order;
242 unsigned long mask;
243 unsigned long max_huge_pages;
244 unsigned long nr_huge_pages;
245 unsigned long free_huge_pages;
246 unsigned long resv_huge_pages;
247 unsigned long surplus_huge_pages;
248 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700249 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700250 struct list_head hugepage_freelists[MAX_NUMNODES];
251 unsigned int nr_huge_pages_node[MAX_NUMNODES];
252 unsigned int free_huge_pages_node[MAX_NUMNODES];
253 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Aneesh Kumar K.Vabb82062012-07-31 16:42:24 -0700254#ifdef CONFIG_CGROUP_HUGETLB
255 /* cgroup control files */
256 struct cftype cgroup_files[5];
257#endif
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700258 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700259};
260
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700261struct huge_bootmem_page {
262 struct list_head list;
263 struct hstate *hstate;
Becky Bruceee8f2482011-07-25 17:11:50 -0700264#ifdef CONFIG_HIGHMEM
265 phys_addr_t phys;
266#endif
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700267};
268
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900269struct page *alloc_huge_page_node(struct hstate *h, int nid);
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700270struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
271 unsigned long addr, int avoid_reserve);
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900272
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700273/* arch callback */
274int __init alloc_bootmem_huge_page(struct hstate *h);
275
Andi Kleene5ff2152008-07-23 21:27:42 -0700276void __init hugetlb_add_hstate(unsigned order);
277struct hstate *size_to_hstate(unsigned long size);
278
279#ifndef HUGE_MAX_HSTATE
280#define HUGE_MAX_HSTATE 1
281#endif
282
283extern struct hstate hstates[HUGE_MAX_HSTATE];
284extern unsigned int default_hstate_idx;
285
286#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700287
Andi Kleena137e1c2008-07-23 21:27:43 -0700288static inline struct hstate *hstate_inode(struct inode *i)
Andi Kleena5516432008-07-23 21:27:41 -0700289{
Andi Kleena137e1c2008-07-23 21:27:43 -0700290 struct hugetlbfs_sb_info *hsb;
291 hsb = HUGETLBFS_SB(i->i_sb);
292 return hsb->hstate;
Andi Kleena5516432008-07-23 21:27:41 -0700293}
294
295static inline struct hstate *hstate_file(struct file *f)
296{
Al Viro496ad9a2013-01-23 17:07:38 -0500297 return hstate_inode(file_inode(f));
Andi Kleena5516432008-07-23 21:27:41 -0700298}
299
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700300static inline struct hstate *hstate_sizelog(int page_size_log)
301{
302 if (!page_size_log)
303 return &default_hstate;
304 return size_to_hstate(1 << page_size_log);
305}
306
Andi Kleena137e1c2008-07-23 21:27:43 -0700307static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700308{
Andi Kleena137e1c2008-07-23 21:27:43 -0700309 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700310}
311
312static inline unsigned long huge_page_size(struct hstate *h)
313{
314 return (unsigned long)PAGE_SIZE << h->order;
315}
316
Mel Gorman08fba692009-01-06 14:38:53 -0800317extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
318
Mel Gorman33402892009-01-06 14:38:54 -0800319extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
320
Andi Kleena5516432008-07-23 21:27:41 -0700321static inline unsigned long huge_page_mask(struct hstate *h)
322{
323 return h->mask;
324}
325
326static inline unsigned int huge_page_order(struct hstate *h)
327{
328 return h->order;
329}
330
331static inline unsigned huge_page_shift(struct hstate *h)
332{
333 return h->order + PAGE_SHIFT;
334}
335
336static inline unsigned int pages_per_huge_page(struct hstate *h)
337{
338 return 1 << h->order;
339}
340
341static inline unsigned int blocks_per_huge_page(struct hstate *h)
342{
343 return huge_page_size(h) / 512;
344}
345
346#include <asm/hugetlb.h>
347
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400348#ifndef arch_make_huge_pte
349static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
350 struct page *page, int writable)
351{
352 return entry;
353}
354#endif
355
Andi Kleene5ff2152008-07-23 21:27:42 -0700356static inline struct hstate *page_hstate(struct page *page)
357{
Sasha Levin309381fea2014-01-23 15:52:54 -0800358 VM_BUG_ON_PAGE(!PageHuge(page), page);
Andi Kleene5ff2152008-07-23 21:27:42 -0700359 return size_to_hstate(PAGE_SIZE << compound_order(page));
360}
361
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200362static inline unsigned hstate_index_to_shift(unsigned index)
363{
364 return hstates[index].order + PAGE_SHIFT;
365}
366
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700367static inline int hstate_index(struct hstate *h)
368{
369 return h - hstates;
370}
371
Zhang Yi13d60f42013-06-25 21:19:31 +0800372pgoff_t __basepage_index(struct page *page);
373
374/* Return page->index in PAGE_SIZE units */
375static inline pgoff_t basepage_index(struct page *page)
376{
377 if (!PageCompound(page))
378 return page->index;
379
380 return __basepage_index(page);
381}
382
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700383extern void dissolve_free_huge_pages(unsigned long start_pfn,
384 unsigned long end_pfn);
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700385int pmd_huge_support(void);
386/*
387 * Currently hugepage migration is enabled only for pmd-based hugepage.
388 * This function will be updated when hugepage migration is more widely
389 * supported.
390 */
391static inline int hugepage_migration_support(struct hstate *h)
392{
393 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
394}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700395
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800396static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
397 struct mm_struct *mm, pte_t *pte)
398{
399 if (huge_page_size(h) == PMD_SIZE)
400 return pmd_lockptr(mm, (pmd_t *) pte);
401 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
402 return &mm->page_table_lock;
403}
404
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700405#else /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700406struct hstate {};
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900407#define alloc_huge_page_node(h, nid) NULL
Naoya Horiguchi74060e42013-09-11 14:22:06 -0700408#define alloc_huge_page_noerr(v, a, r) NULL
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700409#define alloc_bootmem_huge_page(h) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700410#define hstate_file(f) NULL
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700411#define hstate_sizelog(s) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700412#define hstate_vma(v) NULL
413#define hstate_inode(i) NULL
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800414#define page_hstate(page) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700415#define huge_page_size(h) PAGE_SIZE
416#define huge_page_mask(h) PAGE_MASK
Mel Gorman08fba692009-01-06 14:38:53 -0800417#define vma_kernel_pagesize(v) PAGE_SIZE
Mel Gorman33402892009-01-06 14:38:54 -0800418#define vma_mmu_pagesize(v) PAGE_SIZE
Andi Kleena5516432008-07-23 21:27:41 -0700419#define huge_page_order(h) 0
420#define huge_page_shift(h) PAGE_SHIFT
Andrea Righi510a35d2008-07-26 15:22:27 -0700421static inline unsigned int pages_per_huge_page(struct hstate *h)
422{
423 return 1;
424}
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200425#define hstate_index_to_shift(index) 0
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700426#define hstate_index(h) 0
Zhang Yi13d60f42013-06-25 21:19:31 +0800427
428static inline pgoff_t basepage_index(struct page *page)
429{
430 return page->index;
431}
Naoya Horiguchic8721bb2013-09-11 14:22:09 -0700432#define dissolve_free_huge_pages(s, e) do {} while (0)
Naoya Horiguchi83467ef2013-09-11 14:22:11 -0700433#define pmd_huge_support() 0
434#define hugepage_migration_support(h) 0
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800435
436static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
437 struct mm_struct *mm, pte_t *pte)
438{
439 return &mm->page_table_lock;
440}
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -0700441#endif /* CONFIG_HUGETLB_PAGE */
Andi Kleena5516432008-07-23 21:27:41 -0700442
Kirill A. Shutemovcb900f42013-11-14 14:31:02 -0800443static inline spinlock_t *huge_pte_lock(struct hstate *h,
444 struct mm_struct *mm, pte_t *pte)
445{
446 spinlock_t *ptl;
447
448 ptl = huge_pte_lockptr(h, mm, pte);
449 spin_lock(ptl);
450 return ptl;
451}
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453#endif /* _LINUX_HUGETLB_H */