blob: b9a254dcc0e77e72873b9b49cf49787bd2154678 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * Nadia Yvette Chambers, 2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 2002 Linus Torvalds.
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08007 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Andrew Morton9b857d22014-06-04 16:07:21 -070010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/thread_info.h>
13#include <asm/current.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010014#include <linux/sched/signal.h> /* remove ASAP */
Mike Kravetz70c35472015-09-08 15:01:54 -070015#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070019#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080025#include <linux/capability.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070026#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070030#include <linux/parser.h>
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -070031#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
Nick Black1fd7317d2009-09-22 16:43:33 -070036#include <linux/magic.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090037#include <linux/migrate.h>
Al Viro34d06402015-04-03 11:31:35 -040038#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -080042static const struct super_operations hugetlbfs_ops;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070043static const struct address_space_operations hugetlbfs_aops;
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080044const struct file_operations hugetlbfs_file_operations;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -080045static const struct inode_operations hugetlbfs_dir_inode_operations;
46static const struct inode_operations hugetlbfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David Gibsona1d776e2012-03-21 16:34:12 -070048struct hugetlbfs_config {
David Howells4a252202017-07-05 16:24:18 +010049 struct hstate *hstate;
50 long max_hpages;
51 long nr_inodes;
52 long min_hpages;
53 kuid_t uid;
54 kgid_t gid;
55 umode_t mode;
David Gibsona1d776e2012-03-21 16:34:12 -070056};
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058int sysctl_hugetlb_shm_group;
59
Randy Dunlape73a75f2007-07-15 23:40:52 -070060enum {
61 Opt_size, Opt_nr_inodes,
62 Opt_mode, Opt_uid, Opt_gid,
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -070063 Opt_pagesize, Opt_min_size,
Randy Dunlape73a75f2007-07-15 23:40:52 -070064 Opt_err,
65};
66
Steven Whitehousea447c092008-10-13 10:46:57 +010067static const match_table_t tokens = {
Randy Dunlape73a75f2007-07-15 23:40:52 -070068 {Opt_size, "size=%s"},
69 {Opt_nr_inodes, "nr_inodes=%s"},
70 {Opt_mode, "mode=%o"},
71 {Opt_uid, "uid=%u"},
72 {Opt_gid, "gid=%u"},
Andi Kleena137e1c2008-07-23 21:27:43 -070073 {Opt_pagesize, "pagesize=%s"},
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -070074 {Opt_min_size, "min_size=%s"},
Randy Dunlape73a75f2007-07-15 23:40:52 -070075 {Opt_err, NULL},
76};
77
Mike Kravetz70c35472015-09-08 15:01:54 -070078#ifdef CONFIG_NUMA
79static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
80 struct inode *inode, pgoff_t index)
81{
82 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
83 index);
84}
85
86static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
87{
88 mpol_cond_put(vma->vm_policy);
89}
90#else
91static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
92 struct inode *inode, pgoff_t index)
93{
94}
95
96static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97{
98}
99#endif
100
Adam Litke2e9b367c2005-10-29 18:16:47 -0700101static void huge_pagevec_release(struct pagevec *pvec)
102{
103 int i;
104
105 for (i = 0; i < pagevec_count(pvec); ++i)
106 put_page(pvec->pages[i]);
107
108 pagevec_reinit(pvec);
109}
110
Mike Kravetz63489f82018-03-22 16:17:13 -0700111/*
112 * Mask used when checking the page offset value passed in via system
113 * calls. This value will be converted to a loff_t which is signed.
114 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
115 * value. The extra bit (- 1 in the shift value) is to take the sign
116 * bit into account.
117 */
118#define PGOFF_LOFFT_MAX \
119 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
122{
Al Viro496ad9a2013-01-23 17:07:38 -0500123 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 loff_t len, vma_len;
125 int ret;
Andi Kleena5516432008-07-23 21:27:41 -0700126 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Hugh Dickins68589bc2006-11-14 02:03:32 -0800128 /*
David Gibsondec4ad82007-08-30 23:56:40 -0700129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
134 * and ia64).
Hugh Dickins68589bc2006-11-14 02:03:32 -0800135 */
Naoya Horiguchia2fce912013-04-17 15:58:27 -0700136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
Hugh Dickins68589bc2006-11-14 02:03:32 -0800137 vma->vm_ops = &hugetlb_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Mike Kravetz045c7a32017-04-13 14:56:32 -0700139 /*
Mike Kravetz63489f82018-03-22 16:17:13 -0700140 * page based offset in vm_pgoff could be sufficiently large to
141 * overflow a (l)off_t when converted to byte offset.
Mike Kravetz045c7a32017-04-13 14:56:32 -0700142 */
Mike Kravetz63489f82018-03-22 16:17:13 -0700143 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700144 return -EINVAL;
145
Mike Kravetz63489f82018-03-22 16:17:13 -0700146 /* must be huge page aligned */
Becky Bruce2b37c352011-07-25 17:11:49 -0700147 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
David Gibsondec4ad82007-08-30 23:56:40 -0700148 return -EINVAL;
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
Mike Kravetz045c7a32017-04-13 14:56:32 -0700151 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
152 /* check for overflow */
153 if (len < vma_len)
154 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Al Viro59551022016-01-22 15:40:57 -0500156 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 file_accessed(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 ret = -ENOMEM;
Mel Gormana1e78772008-07-23 21:27:23 -0700160 if (hugetlb_reserve_pages(inode,
Andi Kleena5516432008-07-23 21:27:41 -0700161 vma->vm_pgoff >> huge_page_order(h),
Mel Gorman5a6fe122009-02-10 14:02:27 +0000162 len >> huge_page_shift(h), vma,
163 vma->vm_flags))
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700164 goto out;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800165
Adam Litke4c887262005-10-29 18:16:46 -0700166 ret = 0;
Zhang, Yanminb6174df2006-07-10 04:44:49 -0700167 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700168 i_size_write(inode, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169out:
Al Viro59551022016-01-22 15:40:57 -0500170 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 return ret;
173}
174
175/*
Hugh Dickins508034a2005-10-29 18:16:30 -0700176 * Called under down_write(mmap_sem).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 */
178
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700179#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180static unsigned long
181hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
182 unsigned long len, unsigned long pgoff, unsigned long flags)
183{
184 struct mm_struct *mm = current->mm;
185 struct vm_area_struct *vma;
Andi Kleena5516432008-07-23 21:27:41 -0700186 struct hstate *h = hstate_file(file);
Michel Lespinasse08659352012-12-11 16:02:00 -0800187 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Andi Kleena5516432008-07-23 21:27:41 -0700189 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 return -EINVAL;
191 if (len > TASK_SIZE)
192 return -ENOMEM;
193
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700194 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700195 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700196 return -EINVAL;
197 return addr;
198 }
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if (addr) {
Andi Kleena5516432008-07-23 21:27:41 -0700201 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 vma = find_vma(mm, addr);
203 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700204 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return addr;
206 }
207
Michel Lespinasse08659352012-12-11 16:02:00 -0800208 info.flags = 0;
209 info.length = len;
210 info.low_limit = TASK_UNMAPPED_BASE;
211 info.high_limit = TASK_SIZE;
212 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
213 info.align_offset = 0;
214 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215}
216#endif
217
Al Viro34d06402015-04-03 11:31:35 -0400218static size_t
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700219hugetlbfs_read_actor(struct page *page, unsigned long offset,
Al Viro34d06402015-04-03 11:31:35 -0400220 struct iov_iter *to, unsigned long size)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700221{
Al Viro34d06402015-04-03 11:31:35 -0400222 size_t copied = 0;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700223 int i, chunksize;
224
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700225 /* Find which 4k chunk and offset with in that chunk */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300226 i = offset >> PAGE_SHIFT;
227 offset = offset & ~PAGE_MASK;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700228
229 while (size) {
Al Viro34d06402015-04-03 11:31:35 -0400230 size_t n;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300231 chunksize = PAGE_SIZE;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700232 if (offset)
233 chunksize -= offset;
234 if (chunksize > size)
235 chunksize = size;
Al Viro34d06402015-04-03 11:31:35 -0400236 n = copy_page_to_iter(&page[i], offset, chunksize, to);
237 copied += n;
238 if (n != chunksize)
239 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700240 offset = 0;
241 size -= chunksize;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700242 i++;
243 }
Al Viro34d06402015-04-03 11:31:35 -0400244 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700245}
246
247/*
248 * Support for read() - Find the page attached to f_mapping and copy out the
249 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300250 * since it has PAGE_SIZE assumptions.
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700251 */
Al Viro34d06402015-04-03 11:31:35 -0400252static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700253{
Al Viro34d06402015-04-03 11:31:35 -0400254 struct file *file = iocb->ki_filp;
255 struct hstate *h = hstate_file(file);
256 struct address_space *mapping = file->f_mapping;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700257 struct inode *inode = mapping->host;
Al Viro34d06402015-04-03 11:31:35 -0400258 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
259 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700260 unsigned long end_index;
261 loff_t isize;
262 ssize_t retval = 0;
263
Al Viro34d06402015-04-03 11:31:35 -0400264 while (iov_iter_count(to)) {
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700265 struct page *page;
Al Viro34d06402015-04-03 11:31:35 -0400266 size_t nr, copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700267
268 /* nr is the maximum number of bytes to copy from this page */
Andi Kleena5516432008-07-23 21:27:41 -0700269 nr = huge_page_size(h);
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700270 isize = i_size_read(inode);
271 if (!isize)
Al Viro34d06402015-04-03 11:31:35 -0400272 break;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700273 end_index = (isize - 1) >> huge_page_shift(h);
Al Viro34d06402015-04-03 11:31:35 -0400274 if (index > end_index)
275 break;
276 if (index == end_index) {
Andi Kleena5516432008-07-23 21:27:41 -0700277 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700278 if (nr <= offset)
Al Viro34d06402015-04-03 11:31:35 -0400279 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700280 }
281 nr = nr - offset;
282
283 /* Find the page */
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700284 page = find_lock_page(mapping, index);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700285 if (unlikely(page == NULL)) {
286 /*
287 * We have a HOLE, zero out the user-buffer for the
288 * length of the hole or request.
289 */
Al Viro34d06402015-04-03 11:31:35 -0400290 copied = iov_iter_zero(nr, to);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700291 } else {
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700292 unlock_page(page);
293
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700294 /*
295 * We have the page, copy it to user space buffer.
296 */
Al Viro34d06402015-04-03 11:31:35 -0400297 copied = hugetlbfs_read_actor(page, offset, to, nr);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300298 put_page(page);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700299 }
Al Viro34d06402015-04-03 11:31:35 -0400300 offset += copied;
301 retval += copied;
302 if (copied != nr && iov_iter_count(to)) {
303 if (!retval)
304 retval = -EFAULT;
305 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700306 }
Andi Kleena5516432008-07-23 21:27:41 -0700307 index += offset >> huge_page_shift(h);
308 offset &= ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700309 }
Al Viro34d06402015-04-03 11:31:35 -0400310 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700311 return retval;
312}
313
Nick Piggin800d15a2007-10-16 01:25:03 -0700314static int hugetlbfs_write_begin(struct file *file,
315 struct address_space *mapping,
316 loff_t pos, unsigned len, unsigned flags,
317 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
319 return -EINVAL;
320}
321
Nick Piggin800d15a2007-10-16 01:25:03 -0700322static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
323 loff_t pos, unsigned len, unsigned copied,
324 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325{
Nick Piggin800d15a2007-10-16 01:25:03 -0700326 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 return -EINVAL;
328}
329
Mike Kravetzb5cec282015-09-08 15:01:41 -0700330static void remove_huge_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331{
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700332 ClearPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 ClearPageUptodate(page);
Minchan Kimbd65cb82011-03-22 16:30:54 -0700334 delete_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335}
336
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800337static void
Davidlohr Buesof808c132017-09-08 16:15:08 -0700338hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800339{
340 struct vm_area_struct *vma;
341
342 /*
343 * end == 0 indicates that the entire range after
344 * start should be unmapped.
345 */
346 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
347 unsigned long v_offset;
348 unsigned long v_end;
349
350 /*
351 * Can the expression below overflow on 32-bit arches?
352 * No, because the interval tree returns us only those vmas
353 * which overlap the truncated area starting at pgoff,
354 * and no vma on a 32-bit arch can span beyond the 4GB.
355 */
356 if (vma->vm_pgoff < start)
357 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
358 else
359 v_offset = 0;
360
361 if (!end)
362 v_end = vma->vm_end;
363 else {
364 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
365 + vma->vm_start;
366 if (v_end > vma->vm_end)
367 v_end = vma->vm_end;
368 }
369
370 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
371 NULL);
372 }
373}
Mike Kravetzb5cec282015-09-08 15:01:41 -0700374
375/*
376 * remove_inode_hugepages handles two distinct cases: truncation and hole
377 * punch. There are subtle differences in operation for each case.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800378 *
Mike Kravetzb5cec282015-09-08 15:01:41 -0700379 * truncation is indicated by end of range being LLONG_MAX
380 * In this case, we first scan the range and release found pages.
381 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
Mike Kravetz18178892015-11-20 15:57:13 -0800382 * maps and global counts. Page faults can not race with truncation
383 * in this routine. hugetlb_no_page() prevents page faults in the
384 * truncated range. It checks i_size before allocation, and again after
385 * with the page table lock for the page held. The same lock must be
386 * acquired to unmap a page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700387 * hole punch is indicated if end is not LLONG_MAX
388 * In the hole punch case we scan the range and release found pages.
389 * Only when releasing a page is the associated region/reserv map
390 * deleted. The region/reserv map for ranges without associated
Mike Kravetz18178892015-11-20 15:57:13 -0800391 * pages are not modified. Page faults can race with hole punch.
392 * This is indicated if we find a mapped page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700393 * Note: If the passed end of range value is beyond the end of file, but
394 * not LLONG_MAX this routine still performs a hole punch operation.
395 */
396static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
397 loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
Andi Kleena5516432008-07-23 21:27:41 -0700399 struct hstate *h = hstate_inode(inode);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800400 struct address_space *mapping = &inode->i_data;
Andi Kleena5516432008-07-23 21:27:41 -0700401 const pgoff_t start = lstart >> huge_page_shift(h);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700402 const pgoff_t end = lend >> huge_page_shift(h);
403 struct vm_area_struct pseudo_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 struct pagevec pvec;
Jan Karad72dc8a2017-09-06 16:21:18 -0700405 pgoff_t next, index;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700406 int i, freed = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700407 bool truncate_op = (lend == LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Mike Kravetzb5cec282015-09-08 15:01:41 -0700409 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
410 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
Mel Gorman86679822017-11-15 17:37:52 -0800411 pagevec_init(&pvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 next = start;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700413 while (next < end) {
414 /*
Mike Kravetz18178892015-11-20 15:57:13 -0800415 * When no more pages are found, we are done.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700416 */
Jan Kara397162f2017-09-06 16:21:43 -0700417 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
Mike Kravetz18178892015-11-20 15:57:13 -0800418 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 for (i = 0; i < pagevec_count(&pvec); ++i) {
421 struct page *page = pvec.pages[i];
Mike Kravetzb5cec282015-09-08 15:01:41 -0700422 u32 hash;
423
Jan Karad72dc8a2017-09-06 16:21:18 -0700424 index = page->index;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700425 hash = hugetlb_fault_mutex_hash(h, current->mm,
426 &pseudo_vma,
Jan Karad72dc8a2017-09-06 16:21:18 -0700427 mapping, index, 0);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700428 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800430 /*
431 * If page is mapped, it was faulted in after being
432 * unmapped in caller. Unmap (again) now after taking
433 * the fault mutex. The mutex will prevent faults
434 * until we finish removing the page.
435 *
436 * This race can only happen in the hole punch case.
437 * Getting here in a truncate operation is a bug.
438 */
439 if (unlikely(page_mapped(page))) {
Mike Kravetz18178892015-11-20 15:57:13 -0800440 BUG_ON(truncate_op);
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800441
442 i_mmap_lock_write(mapping);
443 hugetlb_vmdelete_list(&mapping->i_mmap,
Jan Karad72dc8a2017-09-06 16:21:18 -0700444 index * pages_per_huge_page(h),
445 (index + 1) * pages_per_huge_page(h));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800446 i_mmap_unlock_write(mapping);
447 }
448
449 lock_page(page);
450 /*
451 * We must free the huge page and remove from page
452 * cache (remove_huge_page) BEFORE removing the
453 * region/reserve map (hugetlb_unreserve_pages). In
454 * rare out of memory conditions, removal of the
zhong jiang72e29362016-10-07 17:02:01 -0700455 * region/reserve map could fail. Correspondingly,
456 * the subpool and global reserve usage count can need
457 * to be adjusted.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800458 */
zhong jiang72e29362016-10-07 17:02:01 -0700459 VM_BUG_ON(PagePrivate(page));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800460 remove_huge_page(page);
461 freed++;
462 if (!truncate_op) {
463 if (unlikely(hugetlb_unreserve_pages(inode,
Jan Karad72dc8a2017-09-06 16:21:18 -0700464 index, index + 1, 1)))
zhong jiang72e29362016-10-07 17:02:01 -0700465 hugetlb_fix_reserve_counts(inode);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700466 }
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 unlock_page(page);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700469 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 }
471 huge_pagevec_release(&pvec);
Mike Kravetz18178892015-11-20 15:57:13 -0800472 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
Mike Kravetzb5cec282015-09-08 15:01:41 -0700474
475 if (truncate_op)
476 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477}
478
Al Viro2bbbda32010-06-04 19:52:12 -0400479static void hugetlbfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480{
Joonsoo Kim9119a412014-04-03 14:47:25 -0700481 struct resv_map *resv_map;
482
Mike Kravetzb5cec282015-09-08 15:01:41 -0700483 remove_inode_hugepages(inode, 0, LLONG_MAX);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700484 resv_map = (struct resv_map *)inode->i_mapping->private_data;
485 /* root inode doesn't have the resv_map, so we should check it */
486 if (resv_map)
487 resv_map_release(&resv_map->refs);
Jan Karadbd57682012-05-03 14:48:02 +0200488 clear_inode(inode);
Christoph Hellwig149f4212005-10-29 18:16:43 -0700489}
490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
492{
Hugh Dickins856fc292006-10-28 10:38:43 -0700493 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 struct address_space *mapping = inode->i_mapping;
Andi Kleena5516432008-07-23 21:27:41 -0700495 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Andi Kleena5516432008-07-23 21:27:41 -0700497 BUG_ON(offset & ~huge_page_mask(h));
Hugh Dickins856fc292006-10-28 10:38:43 -0700498 pgoff = offset >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Ken Chen7aa91e12007-10-16 01:26:21 -0700500 i_size_write(inode, offset);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800501 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700502 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz1bfad992015-09-08 15:01:38 -0700503 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800504 i_mmap_unlock_write(mapping);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700505 remove_inode_hugepages(inode, offset, LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return 0;
507}
508
Mike Kravetz70c35472015-09-08 15:01:54 -0700509static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
510{
511 struct hstate *h = hstate_inode(inode);
512 loff_t hpage_size = huge_page_size(h);
513 loff_t hole_start, hole_end;
514
515 /*
516 * For hole punch round up the beginning offset of the hole and
517 * round down the end.
518 */
519 hole_start = round_up(offset, hpage_size);
520 hole_end = round_down(offset + len, hpage_size);
521
522 if (hole_end > hole_start) {
523 struct address_space *mapping = inode->i_mapping;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800524 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700525
Al Viro59551022016-01-22 15:40:57 -0500526 inode_lock(inode);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800527
528 /* protected by i_mutex */
529 if (info->seals & F_SEAL_WRITE) {
530 inode_unlock(inode);
531 return -EPERM;
532 }
533
Mike Kravetz70c35472015-09-08 15:01:54 -0700534 i_mmap_lock_write(mapping);
Davidlohr Buesof808c132017-09-08 16:15:08 -0700535 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
Mike Kravetz70c35472015-09-08 15:01:54 -0700536 hugetlb_vmdelete_list(&mapping->i_mmap,
537 hole_start >> PAGE_SHIFT,
538 hole_end >> PAGE_SHIFT);
539 i_mmap_unlock_write(mapping);
540 remove_inode_hugepages(inode, hole_start, hole_end);
Al Viro59551022016-01-22 15:40:57 -0500541 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700542 }
543
544 return 0;
545}
546
547static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
548 loff_t len)
549{
550 struct inode *inode = file_inode(file);
Marc-André Lureauff62a342018-01-31 16:19:25 -0800551 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700552 struct address_space *mapping = inode->i_mapping;
553 struct hstate *h = hstate_inode(inode);
554 struct vm_area_struct pseudo_vma;
555 struct mm_struct *mm = current->mm;
556 loff_t hpage_size = huge_page_size(h);
557 unsigned long hpage_shift = huge_page_shift(h);
558 pgoff_t start, index, end;
559 int error;
560 u32 hash;
561
562 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
563 return -EOPNOTSUPP;
564
565 if (mode & FALLOC_FL_PUNCH_HOLE)
566 return hugetlbfs_punch_hole(inode, offset, len);
567
568 /*
569 * Default preallocate case.
570 * For this range, start is rounded down and end is rounded up
571 * as well as being converted to page offsets.
572 */
573 start = offset >> hpage_shift;
574 end = (offset + len + hpage_size - 1) >> hpage_shift;
575
Al Viro59551022016-01-22 15:40:57 -0500576 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700577
578 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
579 error = inode_newsize_ok(inode, offset + len);
580 if (error)
581 goto out;
582
Marc-André Lureauff62a342018-01-31 16:19:25 -0800583 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
584 error = -EPERM;
585 goto out;
586 }
587
Mike Kravetz70c35472015-09-08 15:01:54 -0700588 /*
589 * Initialize a pseudo vma as this is required by the huge page
590 * allocation routines. If NUMA is configured, use page index
591 * as input to create an allocation policy.
592 */
593 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
594 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
595 pseudo_vma.vm_file = file;
596
597 for (index = start; index < end; index++) {
598 /*
599 * This is supposed to be the vaddr where the page is being
600 * faulted in, but we have no vaddr here.
601 */
602 struct page *page;
603 unsigned long addr;
604 int avoid_reserve = 0;
605
606 cond_resched();
607
608 /*
609 * fallocate(2) manpage permits EINTR; we may have been
610 * interrupted because we are using up too much memory.
611 */
612 if (signal_pending(current)) {
613 error = -EINTR;
614 break;
615 }
616
617 /* Set numa allocation policy based on index */
618 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
619
620 /* addr is the offset within the file (zero based) */
621 addr = index * hpage_size;
622
623 /* mutex taken here, fault path and hole punch */
624 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
625 index, addr);
626 mutex_lock(&hugetlb_fault_mutex_table[hash]);
627
628 /* See if already present in mapping to avoid alloc/free */
629 page = find_get_page(mapping, index);
630 if (page) {
631 put_page(page);
632 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
633 hugetlb_drop_vma_policy(&pseudo_vma);
634 continue;
635 }
636
637 /* Allocate page and add to page cache */
638 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
639 hugetlb_drop_vma_policy(&pseudo_vma);
640 if (IS_ERR(page)) {
641 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
642 error = PTR_ERR(page);
643 goto out;
644 }
645 clear_huge_page(page, addr, pages_per_huge_page(h));
646 __SetPageUptodate(page);
647 error = huge_add_to_page_cache(page, mapping, index);
648 if (unlikely(error)) {
649 put_page(page);
650 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
651 goto out;
652 }
653
654 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
655
656 /*
Mike Kravetz70c35472015-09-08 15:01:54 -0700657 * unlock_page because locked by add_to_page_cache()
Nadav Amit72639e62017-11-29 16:11:33 -0800658 * page_put due to reference from alloc_huge_page()
Mike Kravetz70c35472015-09-08 15:01:54 -0700659 */
Mike Kravetz70c35472015-09-08 15:01:54 -0700660 unlock_page(page);
Nadav Amit72639e62017-11-29 16:11:33 -0800661 put_page(page);
Mike Kravetz70c35472015-09-08 15:01:54 -0700662 }
663
664 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
665 i_size_write(inode, offset + len);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700666 inode->i_ctime = current_time(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700667out:
Al Viro59551022016-01-22 15:40:57 -0500668 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700669 return error;
670}
671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
673{
David Howells2b0143b2015-03-17 22:25:59 +0000674 struct inode *inode = d_inode(dentry);
Andi Kleena5516432008-07-23 21:27:41 -0700675 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 int error;
677 unsigned int ia_valid = attr->ia_valid;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800678 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 BUG_ON(!inode);
681
Jan Kara31051c82016-05-26 16:55:18 +0200682 error = setattr_prepare(dentry, attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200684 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 if (ia_valid & ATTR_SIZE) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800687 loff_t oldsize = inode->i_size;
688 loff_t newsize = attr->ia_size;
689
690 if (newsize & ~huge_page_mask(h))
Christoph Hellwig10257742010-06-04 11:30:02 +0200691 return -EINVAL;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800692 /* protected by i_mutex */
693 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
694 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
695 return -EPERM;
696 error = hugetlb_vmtruncate(inode, newsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200698 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
Christoph Hellwig10257742010-06-04 11:30:02 +0200700
701 setattr_copy(inode, attr);
702 mark_inode_dirty(inode);
703 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
Al Viro7d54fa62011-07-24 20:20:48 -0400706static struct inode *hugetlbfs_get_root(struct super_block *sb,
707 struct hugetlbfs_config *config)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
709 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 inode = new_inode(sb);
712 if (inode) {
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400713 inode->i_ino = get_next_ino();
Al Viro7d54fa62011-07-24 20:20:48 -0400714 inode->i_mode = S_IFDIR | config->mode;
715 inode->i_uid = config->uid;
716 inode->i_gid = config->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700717 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400718 inode->i_op = &hugetlbfs_dir_inode_operations;
719 inode->i_fop = &simple_dir_operations;
720 /* directory inodes start off with i_nlink == 2 (for "." entry) */
721 inc_nlink(inode);
Aneesh Kumar K.V65ed7602012-04-25 16:01:50 -0700722 lockdep_annotate_inode_mutex_key(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400723 }
724 return inode;
725}
726
Michal Hockob610ded2013-08-13 16:00:55 -0700727/*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800728 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
Michal Hockob610ded2013-08-13 16:00:55 -0700729 * be taken from reclaim -- unlike regular filesystems. This needs an
Kirill A. Shutemov88f306b2016-01-15 16:57:31 -0800730 * annotation because huge_pmd_share() does an allocation under hugetlb's
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800731 * i_mmap_rwsem.
Michal Hockob610ded2013-08-13 16:00:55 -0700732 */
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800733static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
Michal Hockob610ded2013-08-13 16:00:55 -0700734
Al Viro7d54fa62011-07-24 20:20:48 -0400735static struct inode *hugetlbfs_get_inode(struct super_block *sb,
736 struct inode *dir,
Al Viro18df2252011-07-24 23:17:40 -0400737 umode_t mode, dev_t dev)
Al Viro7d54fa62011-07-24 20:20:48 -0400738{
739 struct inode *inode;
Joonsoo Kim9119a412014-04-03 14:47:25 -0700740 struct resv_map *resv_map;
741
742 resv_map = resv_map_alloc();
743 if (!resv_map)
744 return NULL;
Al Viro7d54fa62011-07-24 20:20:48 -0400745
746 inode = new_inode(sb);
747 if (inode) {
Marc-André Lureauff62a342018-01-31 16:19:25 -0800748 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
749
Al Viro7d54fa62011-07-24 20:20:48 -0400750 inode->i_ino = get_next_ino();
751 inode_init_owner(inode, dir, mode);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800752 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
753 &hugetlbfs_i_mmap_rwsem_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 inode->i_mapping->a_ops = &hugetlbfs_aops;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700755 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700756 inode->i_mapping->private_data = resv_map;
Marc-André Lureauff62a342018-01-31 16:19:25 -0800757 info->seals = F_SEAL_SEAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 switch (mode & S_IFMT) {
759 default:
760 init_special_inode(inode, mode, dev);
761 break;
762 case S_IFREG:
763 inode->i_op = &hugetlbfs_inode_operations;
764 inode->i_fop = &hugetlbfs_file_operations;
765 break;
766 case S_IFDIR:
767 inode->i_op = &hugetlbfs_dir_inode_operations;
768 inode->i_fop = &simple_dir_operations;
769
770 /* directory inodes start off with i_nlink == 2 (for "." entry) */
Dave Hansend8c76e62006-09-30 23:29:04 -0700771 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 break;
773 case S_IFLNK:
774 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500775 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 break;
777 }
Josh Boyere096d0c2011-08-25 07:48:12 -0400778 lockdep_annotate_inode_mutex_key(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700779 } else
780 kref_put(&resv_map->refs, resv_map_release);
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 return inode;
783}
784
785/*
786 * File creation. Allocate an inode, and we're done..
787 */
788static int hugetlbfs_mknod(struct inode *dir,
Al Viro1a67aaf2011-07-26 01:52:52 -0400789 struct dentry *dentry, umode_t mode, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 struct inode *inode;
792 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Al Viro7d54fa62011-07-24 20:20:48 -0400794 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (inode) {
Deepa Dinamani078cd822016-09-14 07:48:04 -0700796 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 d_instantiate(dentry, inode);
798 dget(dentry); /* Extra count - pin the dentry in core */
799 error = 0;
800 }
801 return error;
802}
803
Al Viro18bb1db2011-07-26 01:41:39 -0400804static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805{
806 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
807 if (!retval)
Dave Hansend8c76e62006-09-30 23:29:04 -0700808 inc_nlink(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 return retval;
810}
811
Al Viroebfc3b42012-06-10 18:05:36 -0400812static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813{
814 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
815}
816
817static int hugetlbfs_symlink(struct inode *dir,
818 struct dentry *dentry, const char *symname)
819{
820 struct inode *inode;
821 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
Al Viro7d54fa62011-07-24 20:20:48 -0400823 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 if (inode) {
825 int l = strlen(symname)+1;
826 error = page_symlink(inode, symname, l);
827 if (!error) {
828 d_instantiate(dentry, inode);
829 dget(dentry);
830 } else
831 iput(inode);
832 }
Deepa Dinamani078cd822016-09-14 07:48:04 -0700833 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
835 return error;
836}
837
838/*
Ken Chen6649a382007-02-08 14:20:27 -0800839 * mark the head page dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 */
841static int hugetlbfs_set_page_dirty(struct page *page)
842{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700843 struct page *head = compound_head(page);
Ken Chen6649a382007-02-08 14:20:27 -0800844
845 SetPageDirty(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return 0;
847}
848
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900849static int hugetlbfs_migrate_page(struct address_space *mapping,
Mel Gormanb969c4a2012-01-12 17:19:34 -0800850 struct page *newpage, struct page *page,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800851 enum migrate_mode mode)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900852{
853 int rc;
854
855 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800856 if (rc != MIGRATEPAGE_SUCCESS)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900857 return rc;
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700858 if (mode != MIGRATE_SYNC_NO_COPY)
859 migrate_page_copy(newpage, page);
860 else
861 migrate_page_states(newpage, page);
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900862
Rafael Aquini78bd5202012-12-11 16:02:31 -0800863 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900864}
865
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700866static int hugetlbfs_error_remove_page(struct address_space *mapping,
867 struct page *page)
868{
869 struct inode *inode = mapping->host;
Mike Kravetzab615a52017-11-02 15:59:41 -0700870 pgoff_t index = page->index;
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700871
872 remove_huge_page(page);
Mike Kravetzab615a52017-11-02 15:59:41 -0700873 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
874 hugetlb_fix_reserve_counts(inode);
875
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700876 return 0;
877}
878
David Howells4a252202017-07-05 16:24:18 +0100879/*
880 * Display the mount options in /proc/mounts.
881 */
882static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
883{
884 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
885 struct hugepage_subpool *spool = sbinfo->spool;
886 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
887 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
888 char mod;
889
890 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
891 seq_printf(m, ",uid=%u",
892 from_kuid_munged(&init_user_ns, sbinfo->uid));
893 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
894 seq_printf(m, ",gid=%u",
895 from_kgid_munged(&init_user_ns, sbinfo->gid));
896 if (sbinfo->mode != 0755)
897 seq_printf(m, ",mode=%o", sbinfo->mode);
898 if (sbinfo->max_inodes != -1)
899 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
900
901 hpage_size /= 1024;
902 mod = 'K';
903 if (hpage_size >= 1024) {
904 hpage_size /= 1024;
905 mod = 'M';
906 }
907 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
908 if (spool) {
909 if (spool->max_hpages != -1)
910 seq_printf(m, ",size=%llu",
911 (unsigned long long)spool->max_hpages << hpage_shift);
912 if (spool->min_hpages != -1)
913 seq_printf(m, ",min_size=%llu",
914 (unsigned long long)spool->min_hpages << hpage_shift);
915 }
916 return 0;
917}
918
David Howells726c3342006-06-23 02:02:58 -0700919static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920{
David Howells726c3342006-06-23 02:02:58 -0700921 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000922 struct hstate *h = hstate_inode(d_inode(dentry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 buf->f_type = HUGETLBFS_MAGIC;
Andi Kleena5516432008-07-23 21:27:41 -0700925 buf->f_bsize = huge_page_size(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 if (sbinfo) {
927 spin_lock(&sbinfo->stat_lock);
David Gibson74a8a652005-11-21 21:32:24 -0800928 /* If no limits set, just report 0 for max/free/used
929 * blocks, like simple_statfs() */
David Gibson90481622012-03-21 16:34:12 -0700930 if (sbinfo->spool) {
931 long free_pages;
932
933 spin_lock(&sbinfo->spool->lock);
934 buf->f_blocks = sbinfo->spool->max_hpages;
935 free_pages = sbinfo->spool->max_hpages
936 - sbinfo->spool->used_hpages;
937 buf->f_bavail = buf->f_bfree = free_pages;
938 spin_unlock(&sbinfo->spool->lock);
David Gibson74a8a652005-11-21 21:32:24 -0800939 buf->f_files = sbinfo->max_inodes;
940 buf->f_ffree = sbinfo->free_inodes;
941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 spin_unlock(&sbinfo->stat_lock);
943 }
944 buf->f_namelen = NAME_MAX;
945 return 0;
946}
947
948static void hugetlbfs_put_super(struct super_block *sb)
949{
950 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
951
952 if (sbi) {
953 sb->s_fs_info = NULL;
David Gibson90481622012-03-21 16:34:12 -0700954
955 if (sbi->spool)
956 hugepage_put_subpool(sbi->spool);
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 kfree(sbi);
959 }
960}
961
Christoph Hellwig96527982005-10-29 18:16:42 -0700962static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
963{
964 if (sbinfo->free_inodes >= 0) {
965 spin_lock(&sbinfo->stat_lock);
966 if (unlikely(!sbinfo->free_inodes)) {
967 spin_unlock(&sbinfo->stat_lock);
968 return 0;
969 }
970 sbinfo->free_inodes--;
971 spin_unlock(&sbinfo->stat_lock);
972 }
973
974 return 1;
975}
976
977static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
978{
979 if (sbinfo->free_inodes >= 0) {
980 spin_lock(&sbinfo->stat_lock);
981 sbinfo->free_inodes++;
982 spin_unlock(&sbinfo->stat_lock);
983 }
984}
985
986
Christoph Lametere18b8902006-12-06 20:33:20 -0800987static struct kmem_cache *hugetlbfs_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
990{
Christoph Hellwig96527982005-10-29 18:16:42 -0700991 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 struct hugetlbfs_inode_info *p;
993
Christoph Hellwig96527982005-10-29 18:16:42 -0700994 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 return NULL;
Christoph Lametere94b1762006-12-06 20:33:17 -0800996 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
Christoph Hellwig96527982005-10-29 18:16:42 -0700997 if (unlikely(!p)) {
998 hugetlbfs_inc_free_inodes(sbinfo);
999 return NULL;
1000 }
Mike Kravetz4742a352017-03-31 15:12:01 -07001001
1002 /*
1003 * Any time after allocation, hugetlbfs_destroy_inode can be called
1004 * for the inode. mpol_free_shared_policy is unconditionally called
1005 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1006 * in case of a quick call to destroy.
1007 *
1008 * Note that the policy is initialized even if we are creating a
1009 * private inode. This simplifies hugetlbfs_destroy_inode.
1010 */
1011 mpol_shared_policy_init(&p->policy, NULL);
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 return &p->vfs_inode;
1014}
1015
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001016static void hugetlbfs_i_callback(struct rcu_head *head)
1017{
1018 struct inode *inode = container_of(head, struct inode, i_rcu);
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001019 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1020}
1021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022static void hugetlbfs_destroy_inode(struct inode *inode)
1023{
Christoph Hellwig96527982005-10-29 18:16:42 -07001024 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +11001026 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027}
1028
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001029static const struct address_space_operations hugetlbfs_aops = {
Nick Piggin800d15a2007-10-16 01:25:03 -07001030 .write_begin = hugetlbfs_write_begin,
1031 .write_end = hugetlbfs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 .set_page_dirty = hugetlbfs_set_page_dirty,
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001033 .migratepage = hugetlbfs_migrate_page,
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001034 .error_remove_page = hugetlbfs_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035};
1036
Christoph Hellwig96527982005-10-29 18:16:42 -07001037
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001038static void init_once(void *foo)
Christoph Hellwig96527982005-10-29 18:16:42 -07001039{
1040 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1041
Christoph Lametera35afb82007-05-16 22:10:57 -07001042 inode_init_once(&ei->vfs_inode);
Christoph Hellwig96527982005-10-29 18:16:42 -07001043}
1044
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001045const struct file_operations hugetlbfs_file_operations = {
Al Viro34d06402015-04-03 11:31:35 -04001046 .read_iter = hugetlbfs_read_iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 .mmap = hugetlbfs_file_mmap,
Christoph Hellwig1b061d92010-05-26 17:53:41 +02001048 .fsync = noop_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 .get_unmapped_area = hugetlb_get_unmapped_area,
Mike Kravetz70c35472015-09-08 15:01:54 -07001050 .llseek = default_llseek,
1051 .fallocate = hugetlbfs_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052};
1053
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001054static const struct inode_operations hugetlbfs_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 .create = hugetlbfs_create,
1056 .lookup = simple_lookup,
1057 .link = simple_link,
1058 .unlink = simple_unlink,
1059 .symlink = hugetlbfs_symlink,
1060 .mkdir = hugetlbfs_mkdir,
1061 .rmdir = simple_rmdir,
1062 .mknod = hugetlbfs_mknod,
1063 .rename = simple_rename,
1064 .setattr = hugetlbfs_setattr,
1065};
1066
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001067static const struct inode_operations hugetlbfs_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 .setattr = hugetlbfs_setattr,
1069};
1070
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -08001071static const struct super_operations hugetlbfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 .alloc_inode = hugetlbfs_alloc_inode,
1073 .destroy_inode = hugetlbfs_destroy_inode,
Al Viro2bbbda32010-06-04 19:52:12 -04001074 .evict_inode = hugetlbfs_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 .statfs = hugetlbfs_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 .put_super = hugetlbfs_put_super,
David Howells4a252202017-07-05 16:24:18 +01001077 .show_options = hugetlbfs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078};
1079
David Howells4a252202017-07-05 16:24:18 +01001080enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001081
1082/*
1083 * Convert size option passed from command line to number of huge pages
1084 * in the pool specified by hstate. Size option could be in bytes
1085 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1086 */
David Howells4a252202017-07-05 16:24:18 +01001087static long
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001088hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
David Howells4a252202017-07-05 16:24:18 +01001089 enum hugetlbfs_size_type val_type)
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001090{
1091 if (val_type == NO_SIZE)
1092 return -1;
1093
1094 if (val_type == SIZE_PERCENT) {
1095 size_opt <<= huge_page_shift(h);
1096 size_opt *= h->max_huge_pages;
1097 do_div(size_opt, 100);
1098 }
1099
1100 size_opt >>= huge_page_shift(h);
1101 return size_opt;
1102}
1103
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104static int
1105hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1106{
Randy Dunlape73a75f2007-07-15 23:40:52 -07001107 char *p, *rest;
1108 substring_t args[MAX_OPT_ARGS];
1109 int option;
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001110 unsigned long long max_size_opt = 0, min_size_opt = 0;
David Howells4a252202017-07-05 16:24:18 +01001111 enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
1113 if (!options)
1114 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115
Randy Dunlape73a75f2007-07-15 23:40:52 -07001116 while ((p = strsep(&options, ",")) != NULL) {
1117 int token;
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001118 if (!*p)
1119 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Randy Dunlape73a75f2007-07-15 23:40:52 -07001121 token = match_token(p, tokens, args);
1122 switch (token) {
1123 case Opt_uid:
1124 if (match_int(&args[0], &option))
1125 goto bad_val;
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001126 pconfig->uid = make_kuid(current_user_ns(), option);
1127 if (!uid_valid(pconfig->uid))
1128 goto bad_val;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001129 break;
1130
1131 case Opt_gid:
1132 if (match_int(&args[0], &option))
1133 goto bad_val;
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001134 pconfig->gid = make_kgid(current_user_ns(), option);
1135 if (!gid_valid(pconfig->gid))
1136 goto bad_val;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001137 break;
1138
1139 case Opt_mode:
1140 if (match_octal(&args[0], &option))
1141 goto bad_val;
Ken Chen75897d62008-02-04 22:28:36 -08001142 pconfig->mode = option & 01777U;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001143 break;
1144
1145 case Opt_size: {
Randy Dunlape73a75f2007-07-15 23:40:52 -07001146 /* memparse() will accept a K/M/G without a digit */
1147 if (!isdigit(*args[0].from))
1148 goto bad_val;
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001149 max_size_opt = memparse(args[0].from, &rest);
1150 max_val_type = SIZE_STD;
Andi Kleena137e1c2008-07-23 21:27:43 -07001151 if (*rest == '%')
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001152 max_val_type = SIZE_PERCENT;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001153 break;
1154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Randy Dunlape73a75f2007-07-15 23:40:52 -07001156 case Opt_nr_inodes:
1157 /* memparse() will accept a K/M/G without a digit */
1158 if (!isdigit(*args[0].from))
1159 goto bad_val;
1160 pconfig->nr_inodes = memparse(args[0].from, &rest);
1161 break;
1162
Andi Kleena137e1c2008-07-23 21:27:43 -07001163 case Opt_pagesize: {
1164 unsigned long ps;
1165 ps = memparse(args[0].from, &rest);
1166 pconfig->hstate = size_to_hstate(ps);
1167 if (!pconfig->hstate) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001168 pr_err("Unsupported page size %lu MB\n",
Andi Kleena137e1c2008-07-23 21:27:43 -07001169 ps >> 20);
1170 return -EINVAL;
1171 }
1172 break;
1173 }
1174
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001175 case Opt_min_size: {
1176 /* memparse() will accept a K/M/G without a digit */
1177 if (!isdigit(*args[0].from))
1178 goto bad_val;
1179 min_size_opt = memparse(args[0].from, &rest);
1180 min_val_type = SIZE_STD;
1181 if (*rest == '%')
1182 min_val_type = SIZE_PERCENT;
1183 break;
1184 }
1185
Randy Dunlape73a75f2007-07-15 23:40:52 -07001186 default:
Andrew Morton9b857d22014-06-04 16:07:21 -07001187 pr_err("Bad mount option: \"%s\"\n", p);
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001188 return -EINVAL;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001189 break;
1190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 }
Andi Kleena137e1c2008-07-23 21:27:43 -07001192
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001193 /*
1194 * Use huge page pool size (in hstate) to convert the size
1195 * options to number of huge pages. If NO_SIZE, -1 is returned.
1196 */
1197 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1198 max_size_opt, max_val_type);
1199 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1200 min_size_opt, min_val_type);
1201
1202 /*
1203 * If max_size was specified, then min_size must be smaller
1204 */
1205 if (max_val_type > NO_SIZE &&
1206 pconfig->min_hpages > pconfig->max_hpages) {
1207 pr_err("minimum size can not be greater than maximum size\n");
1208 return -EINVAL;
Andi Kleena137e1c2008-07-23 21:27:43 -07001209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001212
1213bad_val:
Andrew Morton9b857d22014-06-04 16:07:21 -07001214 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
Akinobu Mitac12ddba2009-04-21 12:24:05 -07001215 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216}
1217
1218static int
1219hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
1220{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 int ret;
1222 struct hugetlbfs_config config;
1223 struct hugetlbfs_sb_info *sbinfo;
1224
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001225 config.max_hpages = -1; /* No limit on size by default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 config.nr_inodes = -1; /* No limit on number of inodes by default */
David Howells77c70de2008-11-14 10:38:56 +11001227 config.uid = current_fsuid();
1228 config.gid = current_fsgid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 config.mode = 0755;
Andi Kleena137e1c2008-07-23 21:27:43 -07001230 config.hstate = &default_hstate;
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001231 config.min_hpages = -1; /* No default minimum size */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 ret = hugetlbfs_parse_options(data, &config);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 if (ret)
1234 return ret;
1235
1236 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1237 if (!sbinfo)
1238 return -ENOMEM;
1239 sb->s_fs_info = sbinfo;
Andi Kleena137e1c2008-07-23 21:27:43 -07001240 sbinfo->hstate = config.hstate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 spin_lock_init(&sbinfo->stat_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 sbinfo->max_inodes = config.nr_inodes;
1243 sbinfo->free_inodes = config.nr_inodes;
David Gibson90481622012-03-21 16:34:12 -07001244 sbinfo->spool = NULL;
David Howells4a252202017-07-05 16:24:18 +01001245 sbinfo->uid = config.uid;
1246 sbinfo->gid = config.gid;
1247 sbinfo->mode = config.mode;
1248
Mike Kravetz7ca02d0a2015-04-15 16:13:42 -07001249 /*
1250 * Allocate and initialize subpool if maximum or minimum size is
1251 * specified. Any needed reservations (for minimim size) are taken
1252 * taken when the subpool is created.
1253 */
1254 if (config.max_hpages != -1 || config.min_hpages != -1) {
1255 sbinfo->spool = hugepage_new_subpool(config.hstate,
1256 config.max_hpages,
1257 config.min_hpages);
David Gibson90481622012-03-21 16:34:12 -07001258 if (!sbinfo->spool)
1259 goto out_free;
1260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 sb->s_maxbytes = MAX_LFS_FILESIZE;
Andi Kleena137e1c2008-07-23 21:27:43 -07001262 sb->s_blocksize = huge_page_size(config.hstate);
1263 sb->s_blocksize_bits = huge_page_shift(config.hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 sb->s_magic = HUGETLBFS_MAGIC;
1265 sb->s_op = &hugetlbfs_ops;
1266 sb->s_time_gran = 1;
Al Viro48fde702012-01-08 22:15:13 -05001267 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
1268 if (!sb->s_root)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return 0;
1271out_free:
Fabian Frederick6e6870d2014-06-04 16:10:40 -07001272 kfree(sbinfo->spool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 kfree(sbinfo);
1274 return -ENOMEM;
1275}
1276
Al Viro3c26ff62010-07-25 11:46:36 +04001277static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
1278 int flags, const char *dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279{
Al Viro3c26ff62010-07-25 11:46:36 +04001280 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
1282
1283static struct file_system_type hugetlbfs_fs_type = {
1284 .name = "hugetlbfs",
Al Viro3c26ff62010-07-25 11:46:36 +04001285 .mount = hugetlbfs_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 .kill_sb = kill_litter_super,
1287};
1288
Andi Kleen42d73952012-12-11 16:01:34 -08001289static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001291static int can_do_hugetlb_shm(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001293 kgid_t shm_group;
1294 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1295 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
Andi Kleen42d73952012-12-11 16:01:34 -08001298static int get_hstate_idx(int page_size_log)
1299{
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001300 struct hstate *h = hstate_sizelog(page_size_log);
Andi Kleen42d73952012-12-11 16:01:34 -08001301
Andi Kleen42d73952012-12-11 16:01:34 -08001302 if (!h)
1303 return -1;
1304 return h - hstates;
1305}
1306
Fabian Frederickbe1d2cf2014-06-04 16:10:39 -07001307static const struct dentry_operations anon_ops = {
Al Viro118b2302013-08-24 12:08:17 -04001308 .d_dname = simple_dname
Al Viro0df4d6e2013-02-14 22:39:53 -05001309};
1310
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001311/*
1312 * Note that size should be aligned to proper hugepage size in caller side,
1313 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1314 */
1315struct file *hugetlb_file_setup(const char *name, size_t size,
1316 vm_flags_t acctflag, struct user_struct **user,
Andi Kleen42d73952012-12-11 16:01:34 -08001317 int creat_flags, int page_size_log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318{
Anatol Pomozov39b65252012-09-12 20:11:55 -07001319 struct file *file = ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 struct inode *inode;
Al Viro2c48b9c2009-08-09 00:52:35 +04001321 struct path path;
Al Viro0df4d6e2013-02-14 22:39:53 -05001322 struct super_block *sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 struct qstr quick_string;
Andi Kleen42d73952012-12-11 16:01:34 -08001324 int hstate_idx;
1325
1326 hstate_idx = get_hstate_idx(page_size_log);
1327 if (hstate_idx < 0)
1328 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Hugh Dickins353d5c32009-08-24 16:30:28 +01001330 *user = NULL;
Andi Kleen42d73952012-12-11 16:01:34 -08001331 if (!hugetlbfs_vfsmount[hstate_idx])
Akinobu Mita5bc98592007-05-06 14:50:18 -07001332 return ERR_PTR(-ENOENT);
1333
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001334 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
Hugh Dickins353d5c32009-08-24 16:30:28 +01001335 *user = current_user();
1336 if (user_shm_lock(size, *user)) {
David Rientjes21a3c272012-03-21 16:34:13 -07001337 task_lock(current);
Andrew Morton9b857d22014-06-04 16:07:21 -07001338 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
David Rientjes21a3c272012-03-21 16:34:13 -07001339 current->comm, current->pid);
1340 task_unlock(current);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001341 } else {
1342 *user = NULL;
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001343 return ERR_PTR(-EPERM);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001344 }
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001345 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
Al Viro0df4d6e2013-02-14 22:39:53 -05001347 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
Eric W. Biederman9d665862007-06-16 10:16:16 -07001348 quick_string.name = name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 quick_string.len = strlen(quick_string.name);
1350 quick_string.hash = 0;
Al Viro0df4d6e2013-02-14 22:39:53 -05001351 path.dentry = d_alloc_pseudo(sb, &quick_string);
Al Viro2c48b9c2009-08-09 00:52:35 +04001352 if (!path.dentry)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 goto out_shm_unlock;
1354
Al Viro0df4d6e2013-02-14 22:39:53 -05001355 d_set_d_op(path.dentry, &anon_ops);
Andi Kleen42d73952012-12-11 16:01:34 -08001356 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
Anatol Pomozov39b65252012-09-12 20:11:55 -07001357 file = ERR_PTR(-ENOSPC);
Al Viro0df4d6e2013-02-14 22:39:53 -05001358 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 if (!inode)
Dave Hansence8d2cd2007-10-16 23:31:13 -07001360 goto out_dentry;
Stephen Smalleye1832f22015-08-06 15:46:55 -07001361 if (creat_flags == HUGETLB_SHMFS_INODE)
1362 inode->i_flags |= S_PRIVATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363
Anatol Pomozov39b65252012-09-12 20:11:55 -07001364 file = ERR_PTR(-ENOMEM);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001365 if (hugetlb_reserve_pages(inode, 0,
1366 size >> huge_page_shift(hstate_inode(inode)), NULL,
1367 acctflag))
David Gibsonb45b5bd2006-03-22 00:08:55 -08001368 goto out_inode;
1369
Al Viro2c48b9c2009-08-09 00:52:35 +04001370 d_instantiate(path.dentry, inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 inode->i_size = size;
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +02001372 clear_nlink(inode);
Dave Hansence8d2cd2007-10-16 23:31:13 -07001373
Al Viro2c48b9c2009-08-09 00:52:35 +04001374 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
Dave Hansence8d2cd2007-10-16 23:31:13 -07001375 &hugetlbfs_file_operations);
Anatol Pomozov39b65252012-09-12 20:11:55 -07001376 if (IS_ERR(file))
Al Virob4d232e2008-02-23 05:59:19 -05001377 goto out_dentry; /* inode is already attached */
Dave Hansence8d2cd2007-10-16 23:31:13 -07001378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 return file;
1380
David Gibsonb45b5bd2006-03-22 00:08:55 -08001381out_inode:
1382 iput(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383out_dentry:
Al Viro2c48b9c2009-08-09 00:52:35 +04001384 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385out_shm_unlock:
Hugh Dickins353d5c32009-08-24 16:30:28 +01001386 if (*user) {
1387 user_shm_unlock(size, *user);
1388 *user = NULL;
1389 }
Anatol Pomozov39b65252012-09-12 20:11:55 -07001390 return file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391}
1392
1393static int __init init_hugetlbfs_fs(void)
1394{
Andi Kleen42d73952012-12-11 16:01:34 -08001395 struct hstate *h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 int error;
Andi Kleen42d73952012-12-11 16:01:34 -08001397 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001399 if (!hugepages_supported()) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001400 pr_info("disabling because there are no supported hugepage sizes\n");
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001401 return -ENOTSUPP;
1402 }
1403
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001404 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1406 sizeof(struct hugetlbfs_inode_info),
Vladimir Davydov5d097052016-01-14 15:18:21 -08001407 0, SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 if (hugetlbfs_inode_cachep == NULL)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001409 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
1411 error = register_filesystem(&hugetlbfs_fs_type);
1412 if (error)
1413 goto out;
1414
Andi Kleen42d73952012-12-11 16:01:34 -08001415 i = 0;
1416 for_each_hstate(h) {
1417 char buf[50];
1418 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Andi Kleen42d73952012-12-11 16:01:34 -08001420 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1421 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1422 buf);
1423
1424 if (IS_ERR(hugetlbfs_vfsmount[i])) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001425 pr_err("Cannot mount internal hugetlbfs for "
Andi Kleen42d73952012-12-11 16:01:34 -08001426 "page size %uK", ps_kb);
1427 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1428 hugetlbfs_vfsmount[i] = NULL;
1429 }
1430 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 }
Andi Kleen42d73952012-12-11 16:01:34 -08001432 /* Non default hstates are optional */
1433 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1434 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436 out:
Hillf Dantond1d5e05ff2012-03-21 16:34:15 -07001437 kmem_cache_destroy(hugetlbfs_inode_cachep);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001438 out2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 return error;
1440}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08001441fs_initcall(init_hugetlbfs_fs)