| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_HUGETLB_H | 
|  | 2 | #define _LINUX_HUGETLB_H | 
|  | 3 |  | 
| Linus Torvalds | be93d8c | 2011-05-26 12:03:50 -0700 | [diff] [blame] | 4 | #include <linux/mm_types.h> | 
| Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 5 | #include <linux/fs.h> | 
| Naoya Horiguchi | 8edf344 | 2010-05-28 09:29:15 +0900 | [diff] [blame] | 6 | #include <linux/hugetlb_inline.h> | 
| Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 7 | #include <linux/cgroup.h> | 
| Alexey Dobriyan | 4e950f6 | 2007-07-30 02:36:13 +0400 | [diff] [blame] | 8 |  | 
| Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 9 | struct ctl_table; | 
|  | 10 | struct user_struct; | 
| Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 11 | struct mmu_gather; | 
| Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 12 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 14 |  | 
|  | 15 | #include <linux/mempolicy.h> | 
| Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 16 | #include <linux/shm.h> | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 17 | #include <asm/tlbflush.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
| David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 19 | struct hugepage_subpool { | 
|  | 20 | spinlock_t lock; | 
|  | 21 | long count; | 
|  | 22 | long max_hpages, used_hpages; | 
|  | 23 | }; | 
|  | 24 |  | 
| Aneesh Kumar K.V | c3f38a3 | 2012-07-31 16:42:10 -0700 | [diff] [blame] | 25 | extern spinlock_t hugetlb_lock; | 
|  | 26 | extern int hugetlb_max_hstate __read_mostly; | 
|  | 27 | #define for_each_hstate(h) \ | 
|  | 28 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) | 
|  | 29 |  | 
| David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 30 | struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); | 
|  | 31 | void hugepage_put_subpool(struct hugepage_subpool *spool); | 
|  | 32 |  | 
| Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 33 | int PageHuge(struct page *page); | 
|  | 34 |  | 
| Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 35 | void reset_vma_resv_huge_pages(struct vm_area_struct *vma); | 
| Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 36 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 
|  | 37 | int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 
|  | 38 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 
| Lee Schermerhorn | 06808b0 | 2009-12-14 17:58:21 -0800 | [diff] [blame] | 39 |  | 
|  | 40 | #ifdef CONFIG_NUMA | 
|  | 41 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, | 
|  | 42 | void __user *, size_t *, loff_t *); | 
|  | 43 | #endif | 
|  | 44 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); | 
| Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 46 | long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, | 
|  | 47 | struct page **, struct vm_area_struct **, | 
|  | 48 | unsigned long *, unsigned long *, long, unsigned int); | 
| Mel Gorman | 04f2cbe | 2008-07-23 21:27:25 -0700 | [diff] [blame] | 49 | void unmap_hugepage_range(struct vm_area_struct *, | 
| Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 50 | unsigned long, unsigned long, struct page *); | 
| Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 51 | void __unmap_hugepage_range_final(struct mmu_gather *tlb, | 
|  | 52 | struct vm_area_struct *vma, | 
|  | 53 | unsigned long start, unsigned long end, | 
|  | 54 | struct page *ref_page); | 
| Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 55 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | 
|  | 56 | unsigned long start, unsigned long end, | 
|  | 57 | struct page *ref_page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | int hugetlb_prefault(struct address_space *, struct vm_area_struct *); | 
| Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 59 | void hugetlb_report_meminfo(struct seq_file *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | int hugetlb_report_node_meminfo(int, char *); | 
| David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 61 | void hugetlb_show_meminfo(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | unsigned long hugetlb_total_pages(void); | 
| Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 63 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 
| Hugh Dickins | 788c7df | 2009-06-23 13:49:05 +0100 | [diff] [blame] | 64 | unsigned long address, unsigned int flags); | 
| Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 65 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, | 
| Mel Gorman | 5a6fe12 | 2009-02-10 14:02:27 +0000 | [diff] [blame] | 66 | struct vm_area_struct *vma, | 
| KOSAKI Motohiro | ca16d14 | 2011-05-26 19:16:19 +0900 | [diff] [blame] | 67 | vm_flags_t vm_flags); | 
| Chen, Kenneth W | a43a8c3 | 2006-06-23 02:03:15 -0700 | [diff] [blame] | 68 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 
| Naoya Horiguchi | 6de2b1a | 2010-09-08 10:19:36 +0900 | [diff] [blame] | 69 | int dequeue_hwpoisoned_huge_page(struct page *page); | 
| Naoya Horiguchi | 0ebabb4 | 2010-09-08 10:19:34 +0900 | [diff] [blame] | 70 | void copy_huge_page(struct page *dst, struct page *src); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
| Mel Gorman | 396faf0 | 2007-07-17 04:03:13 -0700 | [diff] [blame] | 72 | extern unsigned long hugepages_treat_as_movable; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | extern const unsigned long hugetlb_zero, hugetlb_infinity; | 
|  | 74 | extern int sysctl_hugetlb_shm_group; | 
| Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 75 | extern struct list_head huge_boot_pages; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 77 | /* arch callbacks */ | 
|  | 78 |  | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 79 | pte_t *huge_pte_alloc(struct mm_struct *mm, | 
|  | 80 | unsigned long addr, unsigned long sz); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 81 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr); | 
| Chen, Kenneth W | 39dde65 | 2006-12-06 20:32:03 -0800 | [diff] [blame] | 82 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 83 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | 
|  | 84 | int write); | 
|  | 85 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | 
|  | 86 | pmd_t *pmd, int write); | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 87 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, | 
|  | 88 | pud_t *pud, int write); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 89 | int pmd_huge(pmd_t pmd); | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 90 | int pud_huge(pud_t pmd); | 
| Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 91 | unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 92 | unsigned long address, unsigned long end, pgprot_t newprot); | 
| David Gibson | 63551ae | 2005-06-21 17:14:44 -0700 | [diff] [blame] | 93 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | #else /* !CONFIG_HUGETLB_PAGE */ | 
|  | 95 |  | 
| Wu Fengguang | 20a0307 | 2009-06-16 15:32:22 -0700 | [diff] [blame] | 96 | static inline int PageHuge(struct page *page) | 
|  | 97 | { | 
|  | 98 | return 0; | 
|  | 99 | } | 
|  | 100 |  | 
| Mel Gorman | a1e7877 | 2008-07-23 21:27:23 -0700 | [diff] [blame] | 101 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) | 
|  | 102 | { | 
|  | 103 | } | 
|  | 104 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | static inline unsigned long hugetlb_total_pages(void) | 
|  | 106 | { | 
|  | 107 | return 0; | 
|  | 108 | } | 
|  | 109 |  | 
| Adam Litke | 5b23dbe | 2007-11-14 16:59:33 -0800 | [diff] [blame] | 110 | #define follow_hugetlb_page(m,v,p,vs,a,b,i,w)	({ BUG(); 0; }) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | #define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL) | 
|  | 112 | #define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; }) | 
|  | 113 | #define hugetlb_prefault(mapping, vma)		({ BUG(); 0; }) | 
| Alexey Dobriyan | e1759c2 | 2008-10-15 23:50:22 +0400 | [diff] [blame] | 114 | static inline void hugetlb_report_meminfo(struct seq_file *m) | 
|  | 115 | { | 
|  | 116 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | #define hugetlb_report_node_meminfo(n, buf)	0 | 
| David Rientjes | 949f7ec | 2013-04-29 15:07:48 -0700 | [diff] [blame] | 118 | static inline void hugetlb_show_meminfo(void) | 
|  | 119 | { | 
|  | 120 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | #define follow_huge_pmd(mm, addr, pmd, write)	NULL | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 122 | #define follow_huge_pud(mm, addr, pud, write)	NULL | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 123 | #define prepare_hugepage_range(file, addr, len)	(-EINVAL) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #define pmd_huge(x)	0 | 
| Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 125 | #define pud_huge(x)	0 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | #define is_hugepage_only_range(mm, addr, len)	0 | 
| David Gibson | 9da61ae | 2006-03-22 00:08:57 -0800 | [diff] [blame] | 127 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) | 
| Hugh Dickins | 788c7df | 2009-06-23 13:49:05 +0100 | [diff] [blame] | 128 | #define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; }) | 
| Naoya Horiguchi | 0fe6e20 | 2010-05-28 09:29:16 +0900 | [diff] [blame] | 129 | #define huge_pte_offset(mm, address)	0 | 
| Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 130 | static inline int dequeue_hwpoisoned_huge_page(struct page *page) | 
|  | 131 | { | 
|  | 132 | return 0; | 
|  | 133 | } | 
|  | 134 |  | 
| Naoya Horiguchi | 0ebabb4 | 2010-09-08 10:19:34 +0900 | [diff] [blame] | 135 | static inline void copy_huge_page(struct page *dst, struct page *src) | 
|  | 136 | { | 
|  | 137 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 |  | 
| Peter Zijlstra | 7da4d64 | 2012-11-19 03:14:23 +0100 | [diff] [blame] | 139 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 
|  | 140 | unsigned long address, unsigned long end, pgprot_t newprot) | 
|  | 141 | { | 
|  | 142 | return 0; | 
|  | 143 | } | 
| Zhang, Yanmin | 8f86059 | 2006-03-22 00:08:50 -0800 | [diff] [blame] | 144 |  | 
| Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 145 | static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, | 
|  | 146 | struct vm_area_struct *vma, unsigned long start, | 
|  | 147 | unsigned long end, struct page *ref_page) | 
|  | 148 | { | 
|  | 149 | BUG(); | 
|  | 150 | } | 
|  | 151 |  | 
| Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 152 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | 
|  | 153 | struct vm_area_struct *vma, unsigned long start, | 
|  | 154 | unsigned long end, struct page *ref_page) | 
|  | 155 | { | 
|  | 156 | BUG(); | 
|  | 157 | } | 
|  | 158 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | #endif /* !CONFIG_HUGETLB_PAGE */ | 
|  | 160 |  | 
| Eric B Munson | 4e52780 | 2009-09-21 17:03:47 -0700 | [diff] [blame] | 161 | #define HUGETLB_ANON_FILE "anon_hugepage" | 
|  | 162 |  | 
| Eric B Munson | 6bfde05 | 2009-09-21 17:03:43 -0700 | [diff] [blame] | 163 | enum { | 
|  | 164 | /* | 
|  | 165 | * The file will be used as an shm file so shmfs accounting rules | 
|  | 166 | * apply | 
|  | 167 | */ | 
|  | 168 | HUGETLB_SHMFS_INODE     = 1, | 
| Eric B Munson | 4e52780 | 2009-09-21 17:03:47 -0700 | [diff] [blame] | 169 | /* | 
|  | 170 | * The file is being created on the internal vfs mount and shmfs | 
|  | 171 | * accounting rules do not apply | 
|  | 172 | */ | 
|  | 173 | HUGETLB_ANONHUGE_INODE  = 2, | 
| Eric B Munson | 6bfde05 | 2009-09-21 17:03:43 -0700 | [diff] [blame] | 174 | }; | 
|  | 175 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | #ifdef CONFIG_HUGETLBFS | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | struct hugetlbfs_sb_info { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | long	max_inodes;   /* inodes allowed */ | 
|  | 179 | long	free_inodes;  /* inodes free */ | 
|  | 180 | spinlock_t	stat_lock; | 
| Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 181 | struct hstate *hstate; | 
| David Gibson | 9048162 | 2012-03-21 16:34:12 -0700 | [diff] [blame] | 182 | struct hugepage_subpool *spool; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | }; | 
|  | 184 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) | 
|  | 186 | { | 
|  | 187 | return sb->s_fs_info; | 
|  | 188 | } | 
|  | 189 |  | 
| Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 190 | extern const struct file_operations hugetlbfs_file_operations; | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 191 | extern const struct vm_operations_struct hugetlb_vm_ops; | 
| Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 192 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, | 
| Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 193 | struct user_struct **user, int creat_flags, | 
|  | 194 | int page_size_log); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 |  | 
|  | 196 | static inline int is_file_hugepages(struct file *file) | 
|  | 197 | { | 
| Adam Litke | 516dffd | 2007-03-01 15:46:08 -0800 | [diff] [blame] | 198 | if (file->f_op == &hugetlbfs_file_operations) | 
|  | 199 | return 1; | 
|  | 200 | if (is_file_shm_hugepages(file)) | 
|  | 201 | return 1; | 
|  | 202 |  | 
|  | 203 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | } | 
|  | 205 |  | 
| Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 206 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | #else /* !CONFIG_HUGETLBFS */ | 
|  | 208 |  | 
| Stefan Richter | 1db8508 | 2009-02-10 23:27:32 +0100 | [diff] [blame] | 209 | #define is_file_hugepages(file)			0 | 
| Steven Truelove | 40716e2 | 2012-03-21 16:34:14 -0700 | [diff] [blame] | 210 | static inline struct file * | 
| Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 211 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, | 
|  | 212 | struct user_struct **user, int creat_flags, | 
| Andi Kleen | 42d7395 | 2012-12-11 16:01:34 -0800 | [diff] [blame] | 213 | int page_size_log) | 
| Andrew Morton | e9ea0e2 | 2009-09-24 14:47:45 -0700 | [diff] [blame] | 214 | { | 
|  | 215 | return ERR_PTR(-ENOSYS); | 
|  | 216 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 |  | 
|  | 218 | #endif /* !CONFIG_HUGETLBFS */ | 
|  | 219 |  | 
| Adrian Bunk | d2ba27e8 | 2007-05-06 14:49:00 -0700 | [diff] [blame] | 220 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | 
|  | 221 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | 
|  | 222 | unsigned long len, unsigned long pgoff, | 
|  | 223 | unsigned long flags); | 
|  | 224 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ | 
|  | 225 |  | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 226 | #ifdef CONFIG_HUGETLB_PAGE | 
|  | 227 |  | 
| Nishanth Aravamudan | a343787 | 2008-07-23 21:27:44 -0700 | [diff] [blame] | 228 | #define HSTATE_NAME_LEN 32 | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 229 | /* Defines one hugetlb page size */ | 
|  | 230 | struct hstate { | 
| Lee Schermerhorn | e8c5c82 | 2009-09-21 17:01:22 -0700 | [diff] [blame] | 231 | int next_nid_to_alloc; | 
|  | 232 | int next_nid_to_free; | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 233 | unsigned int order; | 
|  | 234 | unsigned long mask; | 
|  | 235 | unsigned long max_huge_pages; | 
|  | 236 | unsigned long nr_huge_pages; | 
|  | 237 | unsigned long free_huge_pages; | 
|  | 238 | unsigned long resv_huge_pages; | 
|  | 239 | unsigned long surplus_huge_pages; | 
|  | 240 | unsigned long nr_overcommit_huge_pages; | 
| Aneesh Kumar K.V | 0edaecf | 2012-07-31 16:42:07 -0700 | [diff] [blame] | 241 | struct list_head hugepage_activelist; | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 242 | struct list_head hugepage_freelists[MAX_NUMNODES]; | 
|  | 243 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; | 
|  | 244 | unsigned int free_huge_pages_node[MAX_NUMNODES]; | 
|  | 245 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | 
| Aneesh Kumar K.V | abb8206 | 2012-07-31 16:42:24 -0700 | [diff] [blame] | 246 | #ifdef CONFIG_CGROUP_HUGETLB | 
|  | 247 | /* cgroup control files */ | 
|  | 248 | struct cftype cgroup_files[5]; | 
|  | 249 | #endif | 
| Nishanth Aravamudan | a343787 | 2008-07-23 21:27:44 -0700 | [diff] [blame] | 250 | char name[HSTATE_NAME_LEN]; | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 251 | }; | 
|  | 252 |  | 
| Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 253 | struct huge_bootmem_page { | 
|  | 254 | struct list_head list; | 
|  | 255 | struct hstate *hstate; | 
| Becky Bruce | ee8f248 | 2011-07-25 17:11:50 -0700 | [diff] [blame] | 256 | #ifdef CONFIG_HIGHMEM | 
|  | 257 | phys_addr_t phys; | 
|  | 258 | #endif | 
| Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 259 | }; | 
|  | 260 |  | 
| Naoya Horiguchi | bf50bab | 2010-09-08 10:19:33 +0900 | [diff] [blame] | 261 | struct page *alloc_huge_page_node(struct hstate *h, int nid); | 
|  | 262 |  | 
| Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 263 | /* arch callback */ | 
|  | 264 | int __init alloc_bootmem_huge_page(struct hstate *h); | 
|  | 265 |  | 
| Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 266 | void __init hugetlb_add_hstate(unsigned order); | 
|  | 267 | struct hstate *size_to_hstate(unsigned long size); | 
|  | 268 |  | 
|  | 269 | #ifndef HUGE_MAX_HSTATE | 
|  | 270 | #define HUGE_MAX_HSTATE 1 | 
|  | 271 | #endif | 
|  | 272 |  | 
|  | 273 | extern struct hstate hstates[HUGE_MAX_HSTATE]; | 
|  | 274 | extern unsigned int default_hstate_idx; | 
|  | 275 |  | 
|  | 276 | #define default_hstate (hstates[default_hstate_idx]) | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 277 |  | 
| Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 278 | static inline struct hstate *hstate_inode(struct inode *i) | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 279 | { | 
| Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 280 | struct hugetlbfs_sb_info *hsb; | 
|  | 281 | hsb = HUGETLBFS_SB(i->i_sb); | 
|  | 282 | return hsb->hstate; | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 283 | } | 
|  | 284 |  | 
|  | 285 | static inline struct hstate *hstate_file(struct file *f) | 
|  | 286 | { | 
| Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 287 | return hstate_inode(file_inode(f)); | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 288 | } | 
|  | 289 |  | 
| Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 290 | static inline struct hstate *hstate_sizelog(int page_size_log) | 
|  | 291 | { | 
|  | 292 | if (!page_size_log) | 
|  | 293 | return &default_hstate; | 
|  | 294 | return size_to_hstate(1 << page_size_log); | 
|  | 295 | } | 
|  | 296 |  | 
| Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 297 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 298 | { | 
| Andi Kleen | a137e1c | 2008-07-23 21:27:43 -0700 | [diff] [blame] | 299 | return hstate_file(vma->vm_file); | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 300 | } | 
|  | 301 |  | 
|  | 302 | static inline unsigned long huge_page_size(struct hstate *h) | 
|  | 303 | { | 
|  | 304 | return (unsigned long)PAGE_SIZE << h->order; | 
|  | 305 | } | 
|  | 306 |  | 
| Mel Gorman | 08fba69 | 2009-01-06 14:38:53 -0800 | [diff] [blame] | 307 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); | 
|  | 308 |  | 
| Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 309 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); | 
|  | 310 |  | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 311 | static inline unsigned long huge_page_mask(struct hstate *h) | 
|  | 312 | { | 
|  | 313 | return h->mask; | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | static inline unsigned int huge_page_order(struct hstate *h) | 
|  | 317 | { | 
|  | 318 | return h->order; | 
|  | 319 | } | 
|  | 320 |  | 
|  | 321 | static inline unsigned huge_page_shift(struct hstate *h) | 
|  | 322 | { | 
|  | 323 | return h->order + PAGE_SHIFT; | 
|  | 324 | } | 
|  | 325 |  | 
|  | 326 | static inline unsigned int pages_per_huge_page(struct hstate *h) | 
|  | 327 | { | 
|  | 328 | return 1 << h->order; | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | static inline unsigned int blocks_per_huge_page(struct hstate *h) | 
|  | 332 | { | 
|  | 333 | return huge_page_size(h) / 512; | 
|  | 334 | } | 
|  | 335 |  | 
|  | 336 | #include <asm/hugetlb.h> | 
|  | 337 |  | 
| Chris Metcalf | d9ed9fa | 2012-04-01 14:01:34 -0400 | [diff] [blame] | 338 | #ifndef arch_make_huge_pte | 
|  | 339 | static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, | 
|  | 340 | struct page *page, int writable) | 
|  | 341 | { | 
|  | 342 | return entry; | 
|  | 343 | } | 
|  | 344 | #endif | 
|  | 345 |  | 
| Andi Kleen | e5ff215 | 2008-07-23 21:27:42 -0700 | [diff] [blame] | 346 | static inline struct hstate *page_hstate(struct page *page) | 
|  | 347 | { | 
|  | 348 | return size_to_hstate(PAGE_SIZE << compound_order(page)); | 
|  | 349 | } | 
|  | 350 |  | 
| Andi Kleen | aa50d3a | 2010-10-06 21:45:00 +0200 | [diff] [blame] | 351 | static inline unsigned hstate_index_to_shift(unsigned index) | 
|  | 352 | { | 
|  | 353 | return hstates[index].order + PAGE_SHIFT; | 
|  | 354 | } | 
|  | 355 |  | 
| Aneesh Kumar K.V | 972dc4d | 2012-07-31 16:42:00 -0700 | [diff] [blame] | 356 | static inline int hstate_index(struct hstate *h) | 
|  | 357 | { | 
|  | 358 | return h - hstates; | 
|  | 359 | } | 
|  | 360 |  | 
| Zhang Yi | 13d60f4 | 2013-06-25 21:19:31 +0800 | [diff] [blame^] | 361 | pgoff_t __basepage_index(struct page *page); | 
|  | 362 |  | 
|  | 363 | /* Return page->index in PAGE_SIZE units */ | 
|  | 364 | static inline pgoff_t basepage_index(struct page *page) | 
|  | 365 | { | 
|  | 366 | if (!PageCompound(page)) | 
|  | 367 | return page->index; | 
|  | 368 |  | 
|  | 369 | return __basepage_index(page); | 
|  | 370 | } | 
|  | 371 |  | 
| Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 372 | #else	/* CONFIG_HUGETLB_PAGE */ | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 373 | struct hstate {}; | 
| Naoya Horiguchi | bf50bab | 2010-09-08 10:19:33 +0900 | [diff] [blame] | 374 | #define alloc_huge_page_node(h, nid) NULL | 
| Jon Tollefson | 53ba51d | 2008-07-23 21:27:52 -0700 | [diff] [blame] | 375 | #define alloc_bootmem_huge_page(h) NULL | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 376 | #define hstate_file(f) NULL | 
| Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 377 | #define hstate_sizelog(s) NULL | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 378 | #define hstate_vma(v) NULL | 
|  | 379 | #define hstate_inode(i) NULL | 
|  | 380 | #define huge_page_size(h) PAGE_SIZE | 
|  | 381 | #define huge_page_mask(h) PAGE_MASK | 
| Mel Gorman | 08fba69 | 2009-01-06 14:38:53 -0800 | [diff] [blame] | 382 | #define vma_kernel_pagesize(v) PAGE_SIZE | 
| Mel Gorman | 3340289 | 2009-01-06 14:38:54 -0800 | [diff] [blame] | 383 | #define vma_mmu_pagesize(v) PAGE_SIZE | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 384 | #define huge_page_order(h) 0 | 
|  | 385 | #define huge_page_shift(h) PAGE_SHIFT | 
| Andrea Righi | 510a35d | 2008-07-26 15:22:27 -0700 | [diff] [blame] | 386 | static inline unsigned int pages_per_huge_page(struct hstate *h) | 
|  | 387 | { | 
|  | 388 | return 1; | 
|  | 389 | } | 
| Andi Kleen | aa50d3a | 2010-10-06 21:45:00 +0200 | [diff] [blame] | 390 | #define hstate_index_to_shift(index) 0 | 
| Aneesh Kumar K.V | 972dc4d | 2012-07-31 16:42:00 -0700 | [diff] [blame] | 391 | #define hstate_index(h) 0 | 
| Zhang Yi | 13d60f4 | 2013-06-25 21:19:31 +0800 | [diff] [blame^] | 392 |  | 
|  | 393 | static inline pgoff_t basepage_index(struct page *page) | 
|  | 394 | { | 
|  | 395 | return page->index; | 
|  | 396 | } | 
| Naoya Horiguchi | af73e4d | 2013-05-07 16:18:13 -0700 | [diff] [blame] | 397 | #endif	/* CONFIG_HUGETLB_PAGE */ | 
| Andi Kleen | a551643 | 2008-07-23 21:27:41 -0700 | [diff] [blame] | 398 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | #endif /* _LINUX_HUGETLB_H */ |