blob: ed550d819044da8c3dc5ca0b445ec85a7ba30374 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Linus Torvaldsbe93d8c2011-05-26 12:03:50 -07004#include <linux/mm_types.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04005#include <linux/fs.h>
Naoya Horiguchi8edf3442010-05-28 09:29:15 +09006#include <linux/hugetlb_inline.h>
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04007
Andrew Mortone9ea0e22009-09-24 14:47:45 -07008struct ctl_table;
9struct user_struct;
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070010struct mmu_gather;
Andrew Mortone9ea0e22009-09-24 14:47:45 -070011
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#ifdef CONFIG_HUGETLB_PAGE
13
14#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -080015#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070016#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
David Gibson90481622012-03-21 16:34:12 -070018struct hugepage_subpool {
19 spinlock_t lock;
20 long count;
21 long max_hpages, used_hpages;
22};
23
24struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
25void hugepage_put_subpool(struct hugepage_subpool *spool);
26
Wu Fengguang20a03072009-06-16 15:32:22 -070027int PageHuge(struct page *page);
28
Mel Gormana1e78772008-07-23 21:27:23 -070029void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
Alexey Dobriyan8d65af72009-09-23 15:57:19 -070030int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
31int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
32int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
Lee Schermerhorn06808b02009-12-14 17:58:21 -080033
34#ifdef CONFIG_NUMA
35int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
36 void __user *, size_t *, loff_t *);
37#endif
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Hugh Dickins2a15efc2009-09-21 17:03:27 -070040int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
41 struct page **, struct vm_area_struct **,
42 unsigned long *, int *, int, unsigned int flags);
Mel Gorman04f2cbe2008-07-23 21:27:25 -070043void unmap_hugepage_range(struct vm_area_struct *,
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -070044 unsigned long, unsigned long, struct page *);
45void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
46 unsigned long start, unsigned long end,
47 struct page *ref_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070048int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
Alexey Dobriyane1759c22008-10-15 23:50:22 +040049void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050int hugetlb_report_node_meminfo(int, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010052int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Hugh Dickins788c7df2009-06-23 13:49:05 +010053 unsigned long address, unsigned int flags);
Mel Gormana1e78772008-07-23 21:27:23 -070054int hugetlb_reserve_pages(struct inode *inode, long from, long to,
Mel Gorman5a6fe122009-02-10 14:02:27 +000055 struct vm_area_struct *vma,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +090056 vm_flags_t vm_flags);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070057void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +090058int dequeue_hwpoisoned_huge_page(struct page *page);
Naoya Horiguchi0ebabb42010-09-08 10:19:34 +090059void copy_huge_page(struct page *dst, struct page *src);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Mel Gorman396faf02007-07-17 04:03:13 -070061extern unsigned long hugepages_treat_as_movable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062extern const unsigned long hugetlb_zero, hugetlb_infinity;
63extern int sysctl_hugetlb_shm_group;
Jon Tollefson53ba51d2008-07-23 21:27:52 -070064extern struct list_head huge_boot_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
David Gibson63551ae2005-06-21 17:14:44 -070066/* arch callbacks */
67
Andi Kleena5516432008-07-23 21:27:41 -070068pte_t *huge_pte_alloc(struct mm_struct *mm,
69 unsigned long addr, unsigned long sz);
David Gibson63551ae2005-06-21 17:14:44 -070070pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -080071int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson63551ae2005-06-21 17:14:44 -070072struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
73 int write);
74struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
75 pmd_t *pmd, int write);
Andi Kleenceb86872008-07-23 21:27:50 -070076struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
77 pud_t *pud, int write);
David Gibson63551ae2005-06-21 17:14:44 -070078int pmd_huge(pmd_t pmd);
Andi Kleenceb86872008-07-23 21:27:50 -070079int pud_huge(pud_t pmd);
Zhang, Yanmin8f860592006-03-22 00:08:50 -080080void hugetlb_change_protection(struct vm_area_struct *vma,
81 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -070082
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#else /* !CONFIG_HUGETLB_PAGE */
84
Wu Fengguang20a03072009-06-16 15:32:22 -070085static inline int PageHuge(struct page *page)
86{
87 return 0;
88}
89
Mel Gormana1e78772008-07-23 21:27:23 -070090static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
91{
92}
93
Linus Torvalds1da177e2005-04-16 15:20:36 -070094static inline unsigned long hugetlb_total_pages(void)
95{
96 return 0;
97}
98
Adam Litke5b23dbe2007-11-14 16:59:33 -080099#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
101#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
102#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
Alexey Dobriyane1759c22008-10-15 23:50:22 +0400103static inline void hugetlb_report_meminfo(struct seq_file *m)
104{
105}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106#define hugetlb_report_node_meminfo(n, buf) 0
107#define follow_huge_pmd(mm, addr, pmd, write) NULL
Andi Kleenceb86872008-07-23 21:27:50 -0700108#define follow_huge_pud(mm, addr, pud, write) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700109#define prepare_hugepage_range(file, addr, len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#define pmd_huge(x) 0
Andi Kleenceb86872008-07-23 21:27:50 -0700111#define pud_huge(x) 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800113#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickins788c7df2009-06-23 13:49:05 +0100114#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
Naoya Horiguchi0fe6e202010-05-28 09:29:16 +0900115#define huge_pte_offset(mm, address) 0
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700116static inline int dequeue_hwpoisoned_huge_page(struct page *page)
117{
118 return 0;
119}
120
Naoya Horiguchi0ebabb42010-09-08 10:19:34 +0900121static inline void copy_huge_page(struct page *dst, struct page *src)
122{
123}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800125#define hugetlb_change_protection(vma, address, end, newprot)
126
Aneesh Kumar K.V24669e52012-07-31 16:42:03 -0700127static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
128 struct vm_area_struct *vma, unsigned long start,
129 unsigned long end, struct page *ref_page)
130{
131 BUG();
132}
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134#endif /* !CONFIG_HUGETLB_PAGE */
135
Eric B Munson4e527802009-09-21 17:03:47 -0700136#define HUGETLB_ANON_FILE "anon_hugepage"
137
Eric B Munson6bfde052009-09-21 17:03:43 -0700138enum {
139 /*
140 * The file will be used as an shm file so shmfs accounting rules
141 * apply
142 */
143 HUGETLB_SHMFS_INODE = 1,
Eric B Munson4e527802009-09-21 17:03:47 -0700144 /*
145 * The file is being created on the internal vfs mount and shmfs
146 * accounting rules do not apply
147 */
148 HUGETLB_ANONHUGE_INODE = 2,
Eric B Munson6bfde052009-09-21 17:03:43 -0700149};
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151#ifdef CONFIG_HUGETLBFS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152struct hugetlbfs_sb_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 long max_inodes; /* inodes allowed */
154 long free_inodes; /* inodes free */
155 spinlock_t stat_lock;
Andi Kleena137e1c2008-07-23 21:27:43 -0700156 struct hstate *hstate;
David Gibson90481622012-03-21 16:34:12 -0700157 struct hugepage_subpool *spool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158};
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
161{
162 return sb->s_fs_info;
163}
164
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800165extern const struct file_operations hugetlbfs_file_operations;
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +0400166extern const struct vm_operations_struct hugetlb_vm_ops;
Steven Truelove40716e22012-03-21 16:34:14 -0700167struct file *hugetlb_file_setup(const char *name, unsigned long addr,
168 size_t size, vm_flags_t acct,
Eric B Munson6bfde052009-09-21 17:03:43 -0700169 struct user_struct **user, int creat_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171static inline int is_file_hugepages(struct file *file)
172{
Adam Litke516dffd2007-03-01 15:46:08 -0800173 if (file->f_op == &hugetlbfs_file_operations)
174 return 1;
175 if (is_file_shm_hugepages(file))
176 return 1;
177
178 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181#else /* !CONFIG_HUGETLBFS */
182
Stefan Richter1db85082009-02-10 23:27:32 +0100183#define is_file_hugepages(file) 0
Steven Truelove40716e22012-03-21 16:34:14 -0700184static inline struct file *
185hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900186 vm_flags_t acctflag, struct user_struct **user, int creat_flags)
Andrew Mortone9ea0e22009-09-24 14:47:45 -0700187{
188 return ERR_PTR(-ENOSYS);
189}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191#endif /* !CONFIG_HUGETLBFS */
192
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700193#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
194unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
195 unsigned long len, unsigned long pgoff,
196 unsigned long flags);
197#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
198
Andi Kleena5516432008-07-23 21:27:41 -0700199#ifdef CONFIG_HUGETLB_PAGE
200
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700201#define HSTATE_NAME_LEN 32
Andi Kleena5516432008-07-23 21:27:41 -0700202/* Defines one hugetlb page size */
203struct hstate {
Lee Schermerhorne8c5c822009-09-21 17:01:22 -0700204 int next_nid_to_alloc;
205 int next_nid_to_free;
Andi Kleena5516432008-07-23 21:27:41 -0700206 unsigned int order;
207 unsigned long mask;
208 unsigned long max_huge_pages;
209 unsigned long nr_huge_pages;
210 unsigned long free_huge_pages;
211 unsigned long resv_huge_pages;
212 unsigned long surplus_huge_pages;
213 unsigned long nr_overcommit_huge_pages;
Aneesh Kumar K.V0edaecf2012-07-31 16:42:07 -0700214 struct list_head hugepage_activelist;
Andi Kleena5516432008-07-23 21:27:41 -0700215 struct list_head hugepage_freelists[MAX_NUMNODES];
216 unsigned int nr_huge_pages_node[MAX_NUMNODES];
217 unsigned int free_huge_pages_node[MAX_NUMNODES];
218 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
Nishanth Aravamudana3437872008-07-23 21:27:44 -0700219 char name[HSTATE_NAME_LEN];
Andi Kleena5516432008-07-23 21:27:41 -0700220};
221
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700222struct huge_bootmem_page {
223 struct list_head list;
224 struct hstate *hstate;
Becky Bruceee8f2482011-07-25 17:11:50 -0700225#ifdef CONFIG_HIGHMEM
226 phys_addr_t phys;
227#endif
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700228};
229
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900230struct page *alloc_huge_page_node(struct hstate *h, int nid);
231
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700232/* arch callback */
233int __init alloc_bootmem_huge_page(struct hstate *h);
234
Andi Kleene5ff2152008-07-23 21:27:42 -0700235void __init hugetlb_add_hstate(unsigned order);
236struct hstate *size_to_hstate(unsigned long size);
237
238#ifndef HUGE_MAX_HSTATE
239#define HUGE_MAX_HSTATE 1
240#endif
241
242extern struct hstate hstates[HUGE_MAX_HSTATE];
243extern unsigned int default_hstate_idx;
244
245#define default_hstate (hstates[default_hstate_idx])
Andi Kleena5516432008-07-23 21:27:41 -0700246
Andi Kleena137e1c2008-07-23 21:27:43 -0700247static inline struct hstate *hstate_inode(struct inode *i)
Andi Kleena5516432008-07-23 21:27:41 -0700248{
Andi Kleena137e1c2008-07-23 21:27:43 -0700249 struct hugetlbfs_sb_info *hsb;
250 hsb = HUGETLBFS_SB(i->i_sb);
251 return hsb->hstate;
Andi Kleena5516432008-07-23 21:27:41 -0700252}
253
254static inline struct hstate *hstate_file(struct file *f)
255{
Andi Kleena137e1c2008-07-23 21:27:43 -0700256 return hstate_inode(f->f_dentry->d_inode);
Andi Kleena5516432008-07-23 21:27:41 -0700257}
258
Andi Kleena137e1c2008-07-23 21:27:43 -0700259static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
Andi Kleena5516432008-07-23 21:27:41 -0700260{
Andi Kleena137e1c2008-07-23 21:27:43 -0700261 return hstate_file(vma->vm_file);
Andi Kleena5516432008-07-23 21:27:41 -0700262}
263
264static inline unsigned long huge_page_size(struct hstate *h)
265{
266 return (unsigned long)PAGE_SIZE << h->order;
267}
268
Mel Gorman08fba692009-01-06 14:38:53 -0800269extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
270
Mel Gorman33402892009-01-06 14:38:54 -0800271extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
272
Andi Kleena5516432008-07-23 21:27:41 -0700273static inline unsigned long huge_page_mask(struct hstate *h)
274{
275 return h->mask;
276}
277
278static inline unsigned int huge_page_order(struct hstate *h)
279{
280 return h->order;
281}
282
283static inline unsigned huge_page_shift(struct hstate *h)
284{
285 return h->order + PAGE_SHIFT;
286}
287
288static inline unsigned int pages_per_huge_page(struct hstate *h)
289{
290 return 1 << h->order;
291}
292
293static inline unsigned int blocks_per_huge_page(struct hstate *h)
294{
295 return huge_page_size(h) / 512;
296}
297
298#include <asm/hugetlb.h>
299
Chris Metcalfd9ed9fa2012-04-01 14:01:34 -0400300#ifndef arch_make_huge_pte
301static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
302 struct page *page, int writable)
303{
304 return entry;
305}
306#endif
307
Andi Kleene5ff2152008-07-23 21:27:42 -0700308static inline struct hstate *page_hstate(struct page *page)
309{
310 return size_to_hstate(PAGE_SIZE << compound_order(page));
311}
312
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200313static inline unsigned hstate_index_to_shift(unsigned index)
314{
315 return hstates[index].order + PAGE_SHIFT;
316}
317
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700318static inline int hstate_index(struct hstate *h)
319{
320 return h - hstates;
321}
322
Andi Kleena5516432008-07-23 21:27:41 -0700323#else
324struct hstate {};
Naoya Horiguchibf50bab2010-09-08 10:19:33 +0900325#define alloc_huge_page_node(h, nid) NULL
Jon Tollefson53ba51d2008-07-23 21:27:52 -0700326#define alloc_bootmem_huge_page(h) NULL
Andi Kleena5516432008-07-23 21:27:41 -0700327#define hstate_file(f) NULL
328#define hstate_vma(v) NULL
329#define hstate_inode(i) NULL
330#define huge_page_size(h) PAGE_SIZE
331#define huge_page_mask(h) PAGE_MASK
Mel Gorman08fba692009-01-06 14:38:53 -0800332#define vma_kernel_pagesize(v) PAGE_SIZE
Mel Gorman33402892009-01-06 14:38:54 -0800333#define vma_mmu_pagesize(v) PAGE_SIZE
Andi Kleena5516432008-07-23 21:27:41 -0700334#define huge_page_order(h) 0
335#define huge_page_shift(h) PAGE_SHIFT
Andrea Righi510a35d2008-07-26 15:22:27 -0700336static inline unsigned int pages_per_huge_page(struct hstate *h)
337{
338 return 1;
339}
Andi Kleenaa50d3a2010-10-06 21:45:00 +0200340#define hstate_index_to_shift(index) 0
Aneesh Kumar K.V972dc4d2012-07-31 16:42:00 -0700341#define hstate_index(h) 0
Andi Kleena5516432008-07-23 21:27:41 -0700342#endif
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344#endif /* _LINUX_HUGETLB_H */