blob: addca4cd4f11c6d706cccfac2f4a81476ca133bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04004#include <linux/fs.h>
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifdef CONFIG_HUGETLB_PAGE
7
8#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -08009#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070010#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12struct ctl_table;
13
14static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
15{
16 return vma->vm_flags & VM_HUGETLB;
17}
18
19int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
Nishanth Aravamudana3d0c6a2008-02-08 04:18:18 -080020int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
Mel Gorman396faf02007-07-17 04:03:13 -070021int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070022int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Adam Litke5b23dbe2007-11-14 16:59:33 -080023int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070024void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
Chen, Kenneth W502717f2006-10-11 01:20:46 -070025void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070026int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
27int hugetlb_report_meminfo(char *);
28int hugetlb_report_node_meminfo(int, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070029unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010030int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
31 unsigned long address, int write_access);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070032int hugetlb_reserve_pages(struct inode *inode, long from, long to);
33void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35extern unsigned long max_huge_pages;
Nishanth Aravamudan064d9ef2008-02-13 15:03:19 -080036extern unsigned long sysctl_overcommit_huge_pages;
Mel Gorman396faf02007-07-17 04:03:13 -070037extern unsigned long hugepages_treat_as_movable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038extern const unsigned long hugetlb_zero, hugetlb_infinity;
39extern int sysctl_hugetlb_shm_group;
40
David Gibson63551ae2005-06-21 17:14:44 -070041/* arch callbacks */
42
43pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
44pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -080045int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson63551ae2005-06-21 17:14:44 -070046struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
47 int write);
48struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
49 pmd_t *pmd, int write);
David Gibson63551ae2005-06-21 17:14:44 -070050int pmd_huge(pmd_t pmd);
Zhang, Yanmin8f860592006-03-22 00:08:50 -080051void hugetlb_change_protection(struct vm_area_struct *vma,
52 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -070053
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
55#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -080056#endif
57
58#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
59#define hugetlb_free_pgd_range free_pgd_range
David Gibson3915bcf2006-03-22 00:08:59 -080060#else
61void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
62 unsigned long end, unsigned long floor,
63 unsigned long ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#endif
65
66#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
David Gibson42b88be2006-03-22 00:09:01 -080067/*
68 * If the arch doesn't supply something else, assume that hugepage
69 * size aligned regions are ok without further preparation.
70 */
David Gibsondec4ad82007-08-30 23:56:40 -070071static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
David Gibson42b88be2006-03-22 00:09:01 -080072{
73 if (len & ~HPAGE_MASK)
74 return -EINVAL;
75 if (addr & ~HPAGE_MASK)
76 return -EINVAL;
77 return 0;
78}
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#else
David Gibsondec4ad82007-08-30 23:56:40 -070080int prepare_hugepage_range(unsigned long addr, unsigned long len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081#endif
82
David Gibson63551ae2005-06-21 17:14:44 -070083#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
84#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
85#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
86#else
87void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
88 pte_t *ptep, pte_t pte);
89pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep);
91#endif
92
93#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
94#define hugetlb_prefault_arch_hook(mm) do { } while (0)
95#else
96void hugetlb_prefault_arch_hook(struct mm_struct *mm);
97#endif
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#else /* !CONFIG_HUGETLB_PAGE */
100
101static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
102{
103 return 0;
104}
105static inline unsigned long hugetlb_total_pages(void)
106{
107 return 0;
108}
109
Adam Litke5b23dbe2007-11-14 16:59:33 -0800110#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
112#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
113#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#define unmap_hugepage_range(vma, start, end) BUG()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define hugetlb_report_meminfo(buf) 0
116#define hugetlb_report_node_meminfo(n, buf) 0
117#define follow_huge_pmd(mm, addr, pmd, write) NULL
David Gibsondec4ad82007-08-30 23:56:40 -0700118#define prepare_hugepage_range(addr,len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119#define pmd_huge(x) 0
120#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800121#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100122#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800124#define hugetlb_change_protection(vma, address, end, newprot)
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126#ifndef HPAGE_MASK
Robin Holt51c6f662005-11-13 16:06:42 -0800127#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
128#define HPAGE_SIZE PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129#endif
130
131#endif /* !CONFIG_HUGETLB_PAGE */
132
133#ifdef CONFIG_HUGETLBFS
134struct hugetlbfs_config {
135 uid_t uid;
136 gid_t gid;
137 umode_t mode;
138 long nr_blocks;
139 long nr_inodes;
140};
141
142struct hugetlbfs_sb_info {
143 long max_blocks; /* blocks allowed */
144 long free_blocks; /* blocks free */
145 long max_inodes; /* inodes allowed */
146 long free_inodes; /* inodes free */
147 spinlock_t stat_lock;
148};
149
150
151struct hugetlbfs_inode_info {
152 struct shared_policy policy;
153 struct inode vfs_inode;
154};
155
156static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
157{
158 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
159}
160
161static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
162{
163 return sb->s_fs_info;
164}
165
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800166extern const struct file_operations hugetlbfs_file_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167extern struct vm_operations_struct hugetlb_vm_ops;
Eric W. Biederman9d665862007-06-16 10:16:16 -0700168struct file *hugetlb_file_setup(const char *name, size_t);
Adam Litke9a119c02007-11-14 16:59:41 -0800169int hugetlb_get_quota(struct address_space *mapping, long delta);
170void hugetlb_put_quota(struct address_space *mapping, long delta);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Ken Chen45c682a2007-11-14 16:59:44 -0800172#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512)
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174static inline int is_file_hugepages(struct file *file)
175{
Adam Litke516dffd2007-03-01 15:46:08 -0800176 if (file->f_op == &hugetlbfs_file_operations)
177 return 1;
178 if (is_file_shm_hugepages(file))
179 return 1;
180
181 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
184static inline void set_file_hugepages(struct file *file)
185{
186 file->f_op = &hugetlbfs_file_operations;
187}
188#else /* !CONFIG_HUGETLBFS */
189
190#define is_file_hugepages(file) 0
191#define set_file_hugepages(file) BUG()
Eric W. Biederman9d665862007-06-16 10:16:16 -0700192#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194#endif /* !CONFIG_HUGETLBFS */
195
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700196#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
197unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
198 unsigned long len, unsigned long pgoff,
199 unsigned long flags);
200#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202#endif /* _LINUX_HUGETLB_H */