blob: 24968790bc3e3c451f178a1605e9eecb01acf918 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04004#include <linux/fs.h>
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#ifdef CONFIG_HUGETLB_PAGE
7
8#include <linux/mempolicy.h>
Adam Litke516dffd2007-03-01 15:46:08 -08009#include <linux/shm.h>
David Gibson63551ae2005-06-21 17:14:44 -070010#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12struct ctl_table;
13
14static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
15{
16 return vma->vm_flags & VM_HUGETLB;
17}
18
19int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
Mel Gorman396faf02007-07-17 04:03:13 -070020int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070021int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
Adam Litke5b23dbe2007-11-14 16:59:33 -080022int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
Chen, Kenneth W502717f2006-10-11 01:20:46 -070024void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
26int hugetlb_report_meminfo(char *);
27int hugetlb_report_node_meminfo(int, char *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070028unsigned long hugetlb_total_pages(void);
Hugh Dickinsac9b9c62005-10-20 16:24:28 +010029int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
30 unsigned long address, int write_access);
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -070031int hugetlb_reserve_pages(struct inode *inode, long from, long to);
32void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34extern unsigned long max_huge_pages;
Mel Gorman396faf02007-07-17 04:03:13 -070035extern unsigned long hugepages_treat_as_movable;
Adam Litke54f9f802007-10-16 01:26:20 -070036extern int hugetlb_dynamic_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037extern const unsigned long hugetlb_zero, hugetlb_infinity;
38extern int sysctl_hugetlb_shm_group;
39
David Gibson63551ae2005-06-21 17:14:44 -070040/* arch callbacks */
41
42pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
43pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
Chen, Kenneth W39dde652006-12-06 20:32:03 -080044int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson63551ae2005-06-21 17:14:44 -070045struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
46 int write);
47struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
48 pmd_t *pmd, int write);
David Gibson63551ae2005-06-21 17:14:44 -070049int pmd_huge(pmd_t pmd);
Zhang, Yanmin8f860592006-03-22 00:08:50 -080050void hugetlb_change_protection(struct vm_area_struct *vma,
51 unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson63551ae2005-06-21 17:14:44 -070052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
54#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -080055#endif
56
57#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
58#define hugetlb_free_pgd_range free_pgd_range
David Gibson3915bcf2006-03-22 00:08:59 -080059#else
60void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
61 unsigned long end, unsigned long floor,
62 unsigned long ceiling);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#endif
64
65#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
David Gibson42b88be2006-03-22 00:09:01 -080066/*
67 * If the arch doesn't supply something else, assume that hugepage
68 * size aligned regions are ok without further preparation.
69 */
David Gibsondec4ad82007-08-30 23:56:40 -070070static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
David Gibson42b88be2006-03-22 00:09:01 -080071{
72 if (len & ~HPAGE_MASK)
73 return -EINVAL;
74 if (addr & ~HPAGE_MASK)
75 return -EINVAL;
76 return 0;
77}
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#else
David Gibsondec4ad82007-08-30 23:56:40 -070079int prepare_hugepage_range(unsigned long addr, unsigned long len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#endif
81
David Gibson63551ae2005-06-21 17:14:44 -070082#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
83#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
84#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
85#else
86void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
87 pte_t *ptep, pte_t pte);
88pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
89 pte_t *ptep);
90#endif
91
92#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
93#define hugetlb_prefault_arch_hook(mm) do { } while (0)
94#else
95void hugetlb_prefault_arch_hook(struct mm_struct *mm);
96#endif
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#else /* !CONFIG_HUGETLB_PAGE */
99
100static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
101{
102 return 0;
103}
104static inline unsigned long hugetlb_total_pages(void)
105{
106 return 0;
107}
108
Adam Litke5b23dbe2007-11-14 16:59:33 -0800109#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
111#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
112#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113#define unmap_hugepage_range(vma, start, end) BUG()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114#define hugetlb_report_meminfo(buf) 0
115#define hugetlb_report_node_meminfo(n, buf) 0
116#define follow_huge_pmd(mm, addr, pmd, write) NULL
David Gibsondec4ad82007-08-30 23:56:40 -0700117#define prepare_hugepage_range(addr,len) (-EINVAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118#define pmd_huge(x) 0
119#define is_hugepage_only_range(mm, addr, len) 0
David Gibson9da61ae2006-03-22 00:08:57 -0800120#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
Hugh Dickinsac9b9c62005-10-20 16:24:28 +0100121#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Zhang, Yanmin8f860592006-03-22 00:08:50 -0800123#define hugetlb_change_protection(vma, address, end, newprot)
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#ifndef HPAGE_MASK
Robin Holt51c6f662005-11-13 16:06:42 -0800126#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
127#define HPAGE_SIZE PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128#endif
129
130#endif /* !CONFIG_HUGETLB_PAGE */
131
132#ifdef CONFIG_HUGETLBFS
133struct hugetlbfs_config {
134 uid_t uid;
135 gid_t gid;
136 umode_t mode;
137 long nr_blocks;
138 long nr_inodes;
139};
140
141struct hugetlbfs_sb_info {
142 long max_blocks; /* blocks allowed */
143 long free_blocks; /* blocks free */
144 long max_inodes; /* inodes allowed */
145 long free_inodes; /* inodes free */
146 spinlock_t stat_lock;
147};
148
149
150struct hugetlbfs_inode_info {
151 struct shared_policy policy;
152 struct inode vfs_inode;
153};
154
155static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
156{
157 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
158}
159
160static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
161{
162 return sb->s_fs_info;
163}
164
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800165extern const struct file_operations hugetlbfs_file_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166extern struct vm_operations_struct hugetlb_vm_ops;
Eric W. Biederman9d665862007-06-16 10:16:16 -0700167struct file *hugetlb_file_setup(const char *name, size_t);
Adam Litke9a119c02007-11-14 16:59:41 -0800168int hugetlb_get_quota(struct address_space *mapping, long delta);
169void hugetlb_put_quota(struct address_space *mapping, long delta);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Ken Chen45c682a2007-11-14 16:59:44 -0800171#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512)
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173static inline int is_file_hugepages(struct file *file)
174{
Adam Litke516dffd2007-03-01 15:46:08 -0800175 if (file->f_op == &hugetlbfs_file_operations)
176 return 1;
177 if (is_file_shm_hugepages(file))
178 return 1;
179
180 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static inline void set_file_hugepages(struct file *file)
184{
185 file->f_op = &hugetlbfs_file_operations;
186}
187#else /* !CONFIG_HUGETLBFS */
188
189#define is_file_hugepages(file) 0
190#define set_file_hugepages(file) BUG()
Eric W. Biederman9d665862007-06-16 10:16:16 -0700191#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193#endif /* !CONFIG_HUGETLBFS */
194
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700195#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
196unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
197 unsigned long len, unsigned long pgoff,
198 unsigned long flags);
199#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201#endif /* _LINUX_HUGETLB_H */