blob: 7c02b3f738e15b9942198350290f68b1796074b4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +01004 * Nadia Yvette Chambers, 2002
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Copyright (C) 2002 Linus Torvalds.
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08007 * License: GPL
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Andrew Morton9b857d22014-06-04 16:07:21 -070010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/thread_info.h>
13#include <asm/current.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010014#include <linux/sched/signal.h> /* remove ASAP */
Mike Kravetz70c35472015-09-08 15:01:54 -070015#include <linux/falloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070019#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
Randy Dunlap16f7e0f2006-01-11 12:17:46 -080025#include <linux/capability.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070026#include <linux/ctype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
Randy Dunlape73a75f2007-07-15 23:40:52 -070030#include <linux/parser.h>
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -070031#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
Nick Black1fd7317d2009-09-22 16:43:33 -070036#include <linux/magic.h>
Naoya Horiguchi290408d2010-09-08 10:19:35 +090037#include <linux/migrate.h>
Al Viro34d06402015-04-03 11:31:35 -040038#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080040#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -080042static const struct super_operations hugetlbfs_ops;
Christoph Hellwigf5e54d62006-06-28 04:26:44 -070043static const struct address_space_operations hugetlbfs_aops;
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -080044const struct file_operations hugetlbfs_file_operations;
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -080045static const struct inode_operations hugetlbfs_dir_inode_operations;
46static const struct inode_operations hugetlbfs_inode_operations;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
David Gibsona1d776e2012-03-21 16:34:12 -070048struct hugetlbfs_config {
David Howells4a252202017-07-05 16:24:18 +010049 struct hstate *hstate;
50 long max_hpages;
51 long nr_inodes;
52 long min_hpages;
53 kuid_t uid;
54 kgid_t gid;
55 umode_t mode;
David Gibsona1d776e2012-03-21 16:34:12 -070056};
57
58struct hugetlbfs_inode_info {
59 struct shared_policy policy;
60 struct inode vfs_inode;
61};
62
63static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
64{
65 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
66}
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068int sysctl_hugetlb_shm_group;
69
Randy Dunlape73a75f2007-07-15 23:40:52 -070070enum {
71 Opt_size, Opt_nr_inodes,
72 Opt_mode, Opt_uid, Opt_gid,
Mike Kravetz7ca02d02015-04-15 16:13:42 -070073 Opt_pagesize, Opt_min_size,
Randy Dunlape73a75f2007-07-15 23:40:52 -070074 Opt_err,
75};
76
Steven Whitehousea447c092008-10-13 10:46:57 +010077static const match_table_t tokens = {
Randy Dunlape73a75f2007-07-15 23:40:52 -070078 {Opt_size, "size=%s"},
79 {Opt_nr_inodes, "nr_inodes=%s"},
80 {Opt_mode, "mode=%o"},
81 {Opt_uid, "uid=%u"},
82 {Opt_gid, "gid=%u"},
Andi Kleena137e1c2008-07-23 21:27:43 -070083 {Opt_pagesize, "pagesize=%s"},
Mike Kravetz7ca02d02015-04-15 16:13:42 -070084 {Opt_min_size, "min_size=%s"},
Randy Dunlape73a75f2007-07-15 23:40:52 -070085 {Opt_err, NULL},
86};
87
Mike Kravetz70c35472015-09-08 15:01:54 -070088#ifdef CONFIG_NUMA
89static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
91{
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
93 index);
94}
95
96static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97{
98 mpol_cond_put(vma->vm_policy);
99}
100#else
101static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
103{
104}
105
106static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
107{
108}
109#endif
110
Adam Litke2e9b3672005-10-29 18:16:47 -0700111static void huge_pagevec_release(struct pagevec *pvec)
112{
113 int i;
114
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
117
118 pagevec_reinit(pvec);
119}
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
122{
Al Viro496ad9a2013-01-23 17:07:38 -0500123 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 loff_t len, vma_len;
125 int ret;
Andi Kleena5516432008-07-23 21:27:41 -0700126 struct hstate *h = hstate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Hugh Dickins68589bc2006-11-14 02:03:32 -0800128 /*
David Gibsondec4ad82007-08-30 23:56:40 -0700129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
134 * and ia64).
Hugh Dickins68589bc2006-11-14 02:03:32 -0800135 */
Naoya Horiguchia2fce912013-04-17 15:58:27 -0700136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
Hugh Dickins68589bc2006-11-14 02:03:32 -0800137 vma->vm_ops = &hugetlb_vm_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Mike Kravetz045c7a32017-04-13 14:56:32 -0700139 /*
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
142 */
143 if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
144 return -EINVAL;
145
Becky Bruce2b37c352011-07-25 17:11:49 -0700146 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
David Gibsondec4ad82007-08-30 23:56:40 -0700147 return -EINVAL;
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
Mike Kravetz045c7a32017-04-13 14:56:32 -0700150 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
151 /* check for overflow */
152 if (len < vma_len)
153 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Al Viro59551022016-01-22 15:40:57 -0500155 inode_lock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 file_accessed(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 ret = -ENOMEM;
Mel Gormana1e78772008-07-23 21:27:23 -0700159 if (hugetlb_reserve_pages(inode,
Andi Kleena5516432008-07-23 21:27:41 -0700160 vma->vm_pgoff >> huge_page_order(h),
Mel Gorman5a6fe122009-02-10 14:02:27 +0000161 len >> huge_page_shift(h), vma,
162 vma->vm_flags))
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700163 goto out;
David Gibsonb45b5bd2006-03-22 00:08:55 -0800164
Adam Litke4c887262005-10-29 18:16:46 -0700165 ret = 0;
Zhang, Yanminb6174df2006-07-10 04:44:49 -0700166 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
Mike Kravetz045c7a32017-04-13 14:56:32 -0700167 i_size_write(inode, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168out:
Al Viro59551022016-01-22 15:40:57 -0500169 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 return ret;
172}
173
174/*
Hugh Dickins508034a2005-10-29 18:16:30 -0700175 * Called under down_write(mmap_sem).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 */
177
Adrian Bunkd2ba27e82007-05-06 14:49:00 -0700178#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179static unsigned long
180hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
181 unsigned long len, unsigned long pgoff, unsigned long flags)
182{
183 struct mm_struct *mm = current->mm;
184 struct vm_area_struct *vma;
Andi Kleena5516432008-07-23 21:27:41 -0700185 struct hstate *h = hstate_file(file);
Michel Lespinasse08659352012-12-11 16:02:00 -0800186 struct vm_unmapped_area_info info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Andi Kleena5516432008-07-23 21:27:41 -0700188 if (len & ~huge_page_mask(h))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 return -EINVAL;
190 if (len > TASK_SIZE)
191 return -ENOMEM;
192
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700193 if (flags & MAP_FIXED) {
Andi Kleena5516432008-07-23 21:27:41 -0700194 if (prepare_hugepage_range(file, addr, len))
Benjamin Herrenschmidt036e0852007-05-06 14:50:12 -0700195 return -EINVAL;
196 return addr;
197 }
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (addr) {
Andi Kleena5516432008-07-23 21:27:41 -0700200 addr = ALIGN(addr, huge_page_size(h));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 vma = find_vma(mm, addr);
202 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700203 (!vma || addr + len <= vm_start_gap(vma)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return addr;
205 }
206
Michel Lespinasse08659352012-12-11 16:02:00 -0800207 info.flags = 0;
208 info.length = len;
209 info.low_limit = TASK_UNMAPPED_BASE;
210 info.high_limit = TASK_SIZE;
211 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
212 info.align_offset = 0;
213 return vm_unmapped_area(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215#endif
216
Al Viro34d06402015-04-03 11:31:35 -0400217static size_t
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700218hugetlbfs_read_actor(struct page *page, unsigned long offset,
Al Viro34d06402015-04-03 11:31:35 -0400219 struct iov_iter *to, unsigned long size)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700220{
Al Viro34d06402015-04-03 11:31:35 -0400221 size_t copied = 0;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700222 int i, chunksize;
223
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700224 /* Find which 4k chunk and offset with in that chunk */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300225 i = offset >> PAGE_SHIFT;
226 offset = offset & ~PAGE_MASK;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700227
228 while (size) {
Al Viro34d06402015-04-03 11:31:35 -0400229 size_t n;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300230 chunksize = PAGE_SIZE;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700231 if (offset)
232 chunksize -= offset;
233 if (chunksize > size)
234 chunksize = size;
Al Viro34d06402015-04-03 11:31:35 -0400235 n = copy_page_to_iter(&page[i], offset, chunksize, to);
236 copied += n;
237 if (n != chunksize)
238 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700239 offset = 0;
240 size -= chunksize;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700241 i++;
242 }
Al Viro34d06402015-04-03 11:31:35 -0400243 return copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700244}
245
246/*
247 * Support for read() - Find the page attached to f_mapping and copy out the
248 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300249 * since it has PAGE_SIZE assumptions.
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700250 */
Al Viro34d06402015-04-03 11:31:35 -0400251static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700252{
Al Viro34d06402015-04-03 11:31:35 -0400253 struct file *file = iocb->ki_filp;
254 struct hstate *h = hstate_file(file);
255 struct address_space *mapping = file->f_mapping;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700256 struct inode *inode = mapping->host;
Al Viro34d06402015-04-03 11:31:35 -0400257 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
258 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700259 unsigned long end_index;
260 loff_t isize;
261 ssize_t retval = 0;
262
Al Viro34d06402015-04-03 11:31:35 -0400263 while (iov_iter_count(to)) {
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700264 struct page *page;
Al Viro34d06402015-04-03 11:31:35 -0400265 size_t nr, copied;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700266
267 /* nr is the maximum number of bytes to copy from this page */
Andi Kleena5516432008-07-23 21:27:41 -0700268 nr = huge_page_size(h);
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700269 isize = i_size_read(inode);
270 if (!isize)
Al Viro34d06402015-04-03 11:31:35 -0400271 break;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700272 end_index = (isize - 1) >> huge_page_shift(h);
Al Viro34d06402015-04-03 11:31:35 -0400273 if (index > end_index)
274 break;
275 if (index == end_index) {
Andi Kleena5516432008-07-23 21:27:41 -0700276 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700277 if (nr <= offset)
Al Viro34d06402015-04-03 11:31:35 -0400278 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700279 }
280 nr = nr - offset;
281
282 /* Find the page */
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700283 page = find_lock_page(mapping, index);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700284 if (unlikely(page == NULL)) {
285 /*
286 * We have a HOLE, zero out the user-buffer for the
287 * length of the hole or request.
288 */
Al Viro34d06402015-04-03 11:31:35 -0400289 copied = iov_iter_zero(nr, to);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700290 } else {
Aneesh Kumar K.Va05b0852012-03-21 16:34:08 -0700291 unlock_page(page);
292
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700293 /*
294 * We have the page, copy it to user space buffer.
295 */
Al Viro34d06402015-04-03 11:31:35 -0400296 copied = hugetlbfs_read_actor(page, offset, to, nr);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300297 put_page(page);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700298 }
Al Viro34d06402015-04-03 11:31:35 -0400299 offset += copied;
300 retval += copied;
301 if (copied != nr && iov_iter_count(to)) {
302 if (!retval)
303 retval = -EFAULT;
304 break;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700305 }
Andi Kleena5516432008-07-23 21:27:41 -0700306 index += offset >> huge_page_shift(h);
307 offset &= ~huge_page_mask(h);
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700308 }
Al Viro34d06402015-04-03 11:31:35 -0400309 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
Badari Pulavartye63e1e52007-10-16 01:26:22 -0700310 return retval;
311}
312
Nick Piggin800d15a2007-10-16 01:25:03 -0700313static int hugetlbfs_write_begin(struct file *file,
314 struct address_space *mapping,
315 loff_t pos, unsigned len, unsigned flags,
316 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
318 return -EINVAL;
319}
320
Nick Piggin800d15a2007-10-16 01:25:03 -0700321static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
322 loff_t pos, unsigned len, unsigned copied,
323 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
Nick Piggin800d15a2007-10-16 01:25:03 -0700325 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return -EINVAL;
327}
328
Mike Kravetzb5cec282015-09-08 15:01:41 -0700329static void remove_huge_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700331 ClearPageDirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 ClearPageUptodate(page);
Minchan Kimbd65cb82011-03-22 16:30:54 -0700333 delete_from_page_cache(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
335
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800336static void
337hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
338{
339 struct vm_area_struct *vma;
340
341 /*
342 * end == 0 indicates that the entire range after
343 * start should be unmapped.
344 */
345 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
346 unsigned long v_offset;
347 unsigned long v_end;
348
349 /*
350 * Can the expression below overflow on 32-bit arches?
351 * No, because the interval tree returns us only those vmas
352 * which overlap the truncated area starting at pgoff,
353 * and no vma on a 32-bit arch can span beyond the 4GB.
354 */
355 if (vma->vm_pgoff < start)
356 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
357 else
358 v_offset = 0;
359
360 if (!end)
361 v_end = vma->vm_end;
362 else {
363 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
364 + vma->vm_start;
365 if (v_end > vma->vm_end)
366 v_end = vma->vm_end;
367 }
368
369 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
370 NULL);
371 }
372}
Mike Kravetzb5cec282015-09-08 15:01:41 -0700373
374/*
375 * remove_inode_hugepages handles two distinct cases: truncation and hole
376 * punch. There are subtle differences in operation for each case.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800377 *
Mike Kravetzb5cec282015-09-08 15:01:41 -0700378 * truncation is indicated by end of range being LLONG_MAX
379 * In this case, we first scan the range and release found pages.
380 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
Mike Kravetz18178892015-11-20 15:57:13 -0800381 * maps and global counts. Page faults can not race with truncation
382 * in this routine. hugetlb_no_page() prevents page faults in the
383 * truncated range. It checks i_size before allocation, and again after
384 * with the page table lock for the page held. The same lock must be
385 * acquired to unmap a page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700386 * hole punch is indicated if end is not LLONG_MAX
387 * In the hole punch case we scan the range and release found pages.
388 * Only when releasing a page is the associated region/reserv map
389 * deleted. The region/reserv map for ranges without associated
Mike Kravetz18178892015-11-20 15:57:13 -0800390 * pages are not modified. Page faults can race with hole punch.
391 * This is indicated if we find a mapped page.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700392 * Note: If the passed end of range value is beyond the end of file, but
393 * not LLONG_MAX this routine still performs a hole punch operation.
394 */
395static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
396 loff_t lend)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
Andi Kleena5516432008-07-23 21:27:41 -0700398 struct hstate *h = hstate_inode(inode);
David Gibsonb45b5bd2006-03-22 00:08:55 -0800399 struct address_space *mapping = &inode->i_data;
Andi Kleena5516432008-07-23 21:27:41 -0700400 const pgoff_t start = lstart >> huge_page_shift(h);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700401 const pgoff_t end = lend >> huge_page_shift(h);
402 struct vm_area_struct pseudo_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 struct pagevec pvec;
Jan Karad72dc8a2017-09-06 16:21:18 -0700404 pgoff_t next, index;
Chen, Kenneth Wa43a8c32006-06-23 02:03:15 -0700405 int i, freed = 0;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700406 bool truncate_op = (lend == LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Mike Kravetzb5cec282015-09-08 15:01:41 -0700408 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
409 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 pagevec_init(&pvec, 0);
411 next = start;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700412 while (next < end) {
413 /*
Mike Kravetz18178892015-11-20 15:57:13 -0800414 * When no more pages are found, we are done.
Mike Kravetzb5cec282015-09-08 15:01:41 -0700415 */
Jan Kara397162f2017-09-06 16:21:43 -0700416 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
Mike Kravetz18178892015-11-20 15:57:13 -0800417 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 for (i = 0; i < pagevec_count(&pvec); ++i) {
420 struct page *page = pvec.pages[i];
Mike Kravetzb5cec282015-09-08 15:01:41 -0700421 u32 hash;
422
Jan Karad72dc8a2017-09-06 16:21:18 -0700423 index = page->index;
Mike Kravetzb5cec282015-09-08 15:01:41 -0700424 hash = hugetlb_fault_mutex_hash(h, current->mm,
425 &pseudo_vma,
Jan Karad72dc8a2017-09-06 16:21:18 -0700426 mapping, index, 0);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700427 mutex_lock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800429 /*
430 * If page is mapped, it was faulted in after being
431 * unmapped in caller. Unmap (again) now after taking
432 * the fault mutex. The mutex will prevent faults
433 * until we finish removing the page.
434 *
435 * This race can only happen in the hole punch case.
436 * Getting here in a truncate operation is a bug.
437 */
438 if (unlikely(page_mapped(page))) {
Mike Kravetz18178892015-11-20 15:57:13 -0800439 BUG_ON(truncate_op);
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800440
441 i_mmap_lock_write(mapping);
442 hugetlb_vmdelete_list(&mapping->i_mmap,
Jan Karad72dc8a2017-09-06 16:21:18 -0700443 index * pages_per_huge_page(h),
444 (index + 1) * pages_per_huge_page(h));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800445 i_mmap_unlock_write(mapping);
446 }
447
448 lock_page(page);
449 /*
450 * We must free the huge page and remove from page
451 * cache (remove_huge_page) BEFORE removing the
452 * region/reserve map (hugetlb_unreserve_pages). In
453 * rare out of memory conditions, removal of the
zhong jiang72e29362016-10-07 17:02:01 -0700454 * region/reserve map could fail. Correspondingly,
455 * the subpool and global reserve usage count can need
456 * to be adjusted.
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800457 */
zhong jiang72e29362016-10-07 17:02:01 -0700458 VM_BUG_ON(PagePrivate(page));
Mike Kravetz4aae8d12016-01-15 16:57:40 -0800459 remove_huge_page(page);
460 freed++;
461 if (!truncate_op) {
462 if (unlikely(hugetlb_unreserve_pages(inode,
Jan Karad72dc8a2017-09-06 16:21:18 -0700463 index, index + 1, 1)))
zhong jiang72e29362016-10-07 17:02:01 -0700464 hugetlb_fix_reserve_counts(inode);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700465 }
466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 unlock_page(page);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700468 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 }
470 huge_pagevec_release(&pvec);
Mike Kravetz18178892015-11-20 15:57:13 -0800471 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
Mike Kravetzb5cec282015-09-08 15:01:41 -0700473
474 if (truncate_op)
475 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
Al Viro2bbbda32010-06-04 19:52:12 -0400478static void hugetlbfs_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Joonsoo Kim9119a412014-04-03 14:47:25 -0700480 struct resv_map *resv_map;
481
Mike Kravetzb5cec282015-09-08 15:01:41 -0700482 remove_inode_hugepages(inode, 0, LLONG_MAX);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700483 resv_map = (struct resv_map *)inode->i_mapping->private_data;
484 /* root inode doesn't have the resv_map, so we should check it */
485 if (resv_map)
486 resv_map_release(&resv_map->refs);
Jan Karadbd57682012-05-03 14:48:02 +0200487 clear_inode(inode);
Christoph Hellwig149f4212005-10-29 18:16:43 -0700488}
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
491{
Hugh Dickins856fc292006-10-28 10:38:43 -0700492 pgoff_t pgoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct address_space *mapping = inode->i_mapping;
Andi Kleena5516432008-07-23 21:27:41 -0700494 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Andi Kleena5516432008-07-23 21:27:41 -0700496 BUG_ON(offset & ~huge_page_mask(h));
Hugh Dickins856fc292006-10-28 10:38:43 -0700497 pgoff = offset >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Ken Chen7aa91e12007-10-16 01:26:21 -0700499 i_size_write(inode, offset);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800500 i_mmap_lock_write(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700501 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
Mike Kravetz1bfad992015-09-08 15:01:38 -0700502 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
Davidlohr Bueso83cde9e2014-12-12 16:54:21 -0800503 i_mmap_unlock_write(mapping);
Mike Kravetzb5cec282015-09-08 15:01:41 -0700504 remove_inode_hugepages(inode, offset, LLONG_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return 0;
506}
507
Mike Kravetz70c35472015-09-08 15:01:54 -0700508static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
509{
510 struct hstate *h = hstate_inode(inode);
511 loff_t hpage_size = huge_page_size(h);
512 loff_t hole_start, hole_end;
513
514 /*
515 * For hole punch round up the beginning offset of the hole and
516 * round down the end.
517 */
518 hole_start = round_up(offset, hpage_size);
519 hole_end = round_down(offset + len, hpage_size);
520
521 if (hole_end > hole_start) {
522 struct address_space *mapping = inode->i_mapping;
523
Al Viro59551022016-01-22 15:40:57 -0500524 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700525 i_mmap_lock_write(mapping);
526 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
527 hugetlb_vmdelete_list(&mapping->i_mmap,
528 hole_start >> PAGE_SHIFT,
529 hole_end >> PAGE_SHIFT);
530 i_mmap_unlock_write(mapping);
531 remove_inode_hugepages(inode, hole_start, hole_end);
Al Viro59551022016-01-22 15:40:57 -0500532 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700533 }
534
535 return 0;
536}
537
538static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
539 loff_t len)
540{
541 struct inode *inode = file_inode(file);
542 struct address_space *mapping = inode->i_mapping;
543 struct hstate *h = hstate_inode(inode);
544 struct vm_area_struct pseudo_vma;
545 struct mm_struct *mm = current->mm;
546 loff_t hpage_size = huge_page_size(h);
547 unsigned long hpage_shift = huge_page_shift(h);
548 pgoff_t start, index, end;
549 int error;
550 u32 hash;
551
552 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
553 return -EOPNOTSUPP;
554
555 if (mode & FALLOC_FL_PUNCH_HOLE)
556 return hugetlbfs_punch_hole(inode, offset, len);
557
558 /*
559 * Default preallocate case.
560 * For this range, start is rounded down and end is rounded up
561 * as well as being converted to page offsets.
562 */
563 start = offset >> hpage_shift;
564 end = (offset + len + hpage_size - 1) >> hpage_shift;
565
Al Viro59551022016-01-22 15:40:57 -0500566 inode_lock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700567
568 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
569 error = inode_newsize_ok(inode, offset + len);
570 if (error)
571 goto out;
572
573 /*
574 * Initialize a pseudo vma as this is required by the huge page
575 * allocation routines. If NUMA is configured, use page index
576 * as input to create an allocation policy.
577 */
578 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
579 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
580 pseudo_vma.vm_file = file;
581
582 for (index = start; index < end; index++) {
583 /*
584 * This is supposed to be the vaddr where the page is being
585 * faulted in, but we have no vaddr here.
586 */
587 struct page *page;
588 unsigned long addr;
589 int avoid_reserve = 0;
590
591 cond_resched();
592
593 /*
594 * fallocate(2) manpage permits EINTR; we may have been
595 * interrupted because we are using up too much memory.
596 */
597 if (signal_pending(current)) {
598 error = -EINTR;
599 break;
600 }
601
602 /* Set numa allocation policy based on index */
603 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
604
605 /* addr is the offset within the file (zero based) */
606 addr = index * hpage_size;
607
608 /* mutex taken here, fault path and hole punch */
609 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
610 index, addr);
611 mutex_lock(&hugetlb_fault_mutex_table[hash]);
612
613 /* See if already present in mapping to avoid alloc/free */
614 page = find_get_page(mapping, index);
615 if (page) {
616 put_page(page);
617 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
618 hugetlb_drop_vma_policy(&pseudo_vma);
619 continue;
620 }
621
622 /* Allocate page and add to page cache */
623 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
624 hugetlb_drop_vma_policy(&pseudo_vma);
625 if (IS_ERR(page)) {
626 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
627 error = PTR_ERR(page);
628 goto out;
629 }
630 clear_huge_page(page, addr, pages_per_huge_page(h));
631 __SetPageUptodate(page);
632 error = huge_add_to_page_cache(page, mapping, index);
633 if (unlikely(error)) {
634 put_page(page);
635 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
636 goto out;
637 }
638
639 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
640
641 /*
642 * page_put due to reference from alloc_huge_page()
643 * unlock_page because locked by add_to_page_cache()
644 */
645 put_page(page);
646 unlock_page(page);
647 }
648
649 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
650 i_size_write(inode, offset + len);
Deepa Dinamani078cd822016-09-14 07:48:04 -0700651 inode->i_ctime = current_time(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700652out:
Al Viro59551022016-01-22 15:40:57 -0500653 inode_unlock(inode);
Mike Kravetz70c35472015-09-08 15:01:54 -0700654 return error;
655}
656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
658{
David Howells2b0143b2015-03-17 22:25:59 +0000659 struct inode *inode = d_inode(dentry);
Andi Kleena5516432008-07-23 21:27:41 -0700660 struct hstate *h = hstate_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 int error;
662 unsigned int ia_valid = attr->ia_valid;
663
664 BUG_ON(!inode);
665
Jan Kara31051c82016-05-26 16:55:18 +0200666 error = setattr_prepare(dentry, attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200668 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
670 if (ia_valid & ATTR_SIZE) {
671 error = -EINVAL;
Christoph Hellwig10257742010-06-04 11:30:02 +0200672 if (attr->ia_size & ~huge_page_mask(h))
673 return -EINVAL;
674 error = hugetlb_vmtruncate(inode, attr->ia_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if (error)
Christoph Hellwig10257742010-06-04 11:30:02 +0200676 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
Christoph Hellwig10257742010-06-04 11:30:02 +0200678
679 setattr_copy(inode, attr);
680 mark_inode_dirty(inode);
681 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682}
683
Al Viro7d54fa62011-07-24 20:20:48 -0400684static struct inode *hugetlbfs_get_root(struct super_block *sb,
685 struct hugetlbfs_config *config)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686{
687 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 inode = new_inode(sb);
690 if (inode) {
Christoph Hellwig85fe4022010-10-23 11:19:54 -0400691 inode->i_ino = get_next_ino();
Al Viro7d54fa62011-07-24 20:20:48 -0400692 inode->i_mode = S_IFDIR | config->mode;
693 inode->i_uid = config->uid;
694 inode->i_gid = config->gid;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700695 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400696 inode->i_op = &hugetlbfs_dir_inode_operations;
697 inode->i_fop = &simple_dir_operations;
698 /* directory inodes start off with i_nlink == 2 (for "." entry) */
699 inc_nlink(inode);
Aneesh Kumar K.V65ed7602012-04-25 16:01:50 -0700700 lockdep_annotate_inode_mutex_key(inode);
Al Viro7d54fa62011-07-24 20:20:48 -0400701 }
702 return inode;
703}
704
Michal Hockob610ded2013-08-13 16:00:55 -0700705/*
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800706 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
Michal Hockob610ded2013-08-13 16:00:55 -0700707 * be taken from reclaim -- unlike regular filesystems. This needs an
Kirill A. Shutemov88f306b2016-01-15 16:57:31 -0800708 * annotation because huge_pmd_share() does an allocation under hugetlb's
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800709 * i_mmap_rwsem.
Michal Hockob610ded2013-08-13 16:00:55 -0700710 */
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800711static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
Michal Hockob610ded2013-08-13 16:00:55 -0700712
Al Viro7d54fa62011-07-24 20:20:48 -0400713static struct inode *hugetlbfs_get_inode(struct super_block *sb,
714 struct inode *dir,
Al Viro18df2252011-07-24 23:17:40 -0400715 umode_t mode, dev_t dev)
Al Viro7d54fa62011-07-24 20:20:48 -0400716{
717 struct inode *inode;
Joonsoo Kim9119a412014-04-03 14:47:25 -0700718 struct resv_map *resv_map;
719
720 resv_map = resv_map_alloc();
721 if (!resv_map)
722 return NULL;
Al Viro7d54fa62011-07-24 20:20:48 -0400723
724 inode = new_inode(sb);
725 if (inode) {
Al Viro7d54fa62011-07-24 20:20:48 -0400726 inode->i_ino = get_next_ino();
727 inode_init_owner(inode, dir, mode);
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800728 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
729 &hugetlbfs_i_mmap_rwsem_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 inode->i_mapping->a_ops = &hugetlbfs_aops;
Deepa Dinamani078cd822016-09-14 07:48:04 -0700731 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700732 inode->i_mapping->private_data = resv_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 switch (mode & S_IFMT) {
734 default:
735 init_special_inode(inode, mode, dev);
736 break;
737 case S_IFREG:
738 inode->i_op = &hugetlbfs_inode_operations;
739 inode->i_fop = &hugetlbfs_file_operations;
740 break;
741 case S_IFDIR:
742 inode->i_op = &hugetlbfs_dir_inode_operations;
743 inode->i_fop = &simple_dir_operations;
744
745 /* directory inodes start off with i_nlink == 2 (for "." entry) */
Dave Hansend8c76e62006-09-30 23:29:04 -0700746 inc_nlink(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 break;
748 case S_IFLNK:
749 inode->i_op = &page_symlink_inode_operations;
Al Viro21fc61c2015-11-17 01:07:57 -0500750 inode_nohighmem(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 break;
752 }
Josh Boyere096d0c2011-08-25 07:48:12 -0400753 lockdep_annotate_inode_mutex_key(inode);
Joonsoo Kim9119a412014-04-03 14:47:25 -0700754 } else
755 kref_put(&resv_map->refs, resv_map_release);
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return inode;
758}
759
760/*
761 * File creation. Allocate an inode, and we're done..
762 */
763static int hugetlbfs_mknod(struct inode *dir,
Al Viro1a67aaf2011-07-26 01:52:52 -0400764 struct dentry *dentry, umode_t mode, dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765{
766 struct inode *inode;
767 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Al Viro7d54fa62011-07-24 20:20:48 -0400769 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 if (inode) {
Deepa Dinamani078cd822016-09-14 07:48:04 -0700771 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 d_instantiate(dentry, inode);
773 dget(dentry); /* Extra count - pin the dentry in core */
774 error = 0;
775 }
776 return error;
777}
778
Al Viro18bb1db2011-07-26 01:41:39 -0400779static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
782 if (!retval)
Dave Hansend8c76e62006-09-30 23:29:04 -0700783 inc_nlink(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return retval;
785}
786
Al Viroebfc3b42012-06-10 18:05:36 -0400787static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
790}
791
792static int hugetlbfs_symlink(struct inode *dir,
793 struct dentry *dentry, const char *symname)
794{
795 struct inode *inode;
796 int error = -ENOSPC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
Al Viro7d54fa62011-07-24 20:20:48 -0400798 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 if (inode) {
800 int l = strlen(symname)+1;
801 error = page_symlink(inode, symname, l);
802 if (!error) {
803 d_instantiate(dentry, inode);
804 dget(dentry);
805 } else
806 iput(inode);
807 }
Deepa Dinamani078cd822016-09-14 07:48:04 -0700808 dir->i_ctime = dir->i_mtime = current_time(dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
810 return error;
811}
812
813/*
Ken Chen6649a382007-02-08 14:20:27 -0800814 * mark the head page dirty
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 */
816static int hugetlbfs_set_page_dirty(struct page *page)
817{
Christoph Lameterd85f3382007-05-06 14:49:39 -0700818 struct page *head = compound_head(page);
Ken Chen6649a382007-02-08 14:20:27 -0800819
820 SetPageDirty(head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 return 0;
822}
823
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900824static int hugetlbfs_migrate_page(struct address_space *mapping,
Mel Gormanb969c4a2012-01-12 17:19:34 -0800825 struct page *newpage, struct page *page,
Mel Gormana6bc32b2012-01-12 17:19:43 -0800826 enum migrate_mode mode)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900827{
828 int rc;
829
830 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
Rafael Aquini78bd5202012-12-11 16:02:31 -0800831 if (rc != MIGRATEPAGE_SUCCESS)
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900832 return rc;
833 migrate_page_copy(newpage, page);
834
Rafael Aquini78bd5202012-12-11 16:02:31 -0800835 return MIGRATEPAGE_SUCCESS;
Naoya Horiguchi290408d2010-09-08 10:19:35 +0900836}
837
Naoya Horiguchi78bb9202017-07-10 15:47:50 -0700838static int hugetlbfs_error_remove_page(struct address_space *mapping,
839 struct page *page)
840{
841 struct inode *inode = mapping->host;
842
843 remove_huge_page(page);
844 hugetlb_fix_reserve_counts(inode);
845 return 0;
846}
847
David Howells4a252202017-07-05 16:24:18 +0100848/*
849 * Display the mount options in /proc/mounts.
850 */
851static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
852{
853 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
854 struct hugepage_subpool *spool = sbinfo->spool;
855 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
856 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
857 char mod;
858
859 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
860 seq_printf(m, ",uid=%u",
861 from_kuid_munged(&init_user_ns, sbinfo->uid));
862 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
863 seq_printf(m, ",gid=%u",
864 from_kgid_munged(&init_user_ns, sbinfo->gid));
865 if (sbinfo->mode != 0755)
866 seq_printf(m, ",mode=%o", sbinfo->mode);
867 if (sbinfo->max_inodes != -1)
868 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
869
870 hpage_size /= 1024;
871 mod = 'K';
872 if (hpage_size >= 1024) {
873 hpage_size /= 1024;
874 mod = 'M';
875 }
876 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
877 if (spool) {
878 if (spool->max_hpages != -1)
879 seq_printf(m, ",size=%llu",
880 (unsigned long long)spool->max_hpages << hpage_shift);
881 if (spool->min_hpages != -1)
882 seq_printf(m, ",min_size=%llu",
883 (unsigned long long)spool->min_hpages << hpage_shift);
884 }
885 return 0;
886}
887
David Howells726c3342006-06-23 02:02:58 -0700888static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
David Howells726c3342006-06-23 02:02:58 -0700890 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000891 struct hstate *h = hstate_inode(d_inode(dentry));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
893 buf->f_type = HUGETLBFS_MAGIC;
Andi Kleena5516432008-07-23 21:27:41 -0700894 buf->f_bsize = huge_page_size(h);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (sbinfo) {
896 spin_lock(&sbinfo->stat_lock);
David Gibson74a8a652005-11-21 21:32:24 -0800897 /* If no limits set, just report 0 for max/free/used
898 * blocks, like simple_statfs() */
David Gibson90481622012-03-21 16:34:12 -0700899 if (sbinfo->spool) {
900 long free_pages;
901
902 spin_lock(&sbinfo->spool->lock);
903 buf->f_blocks = sbinfo->spool->max_hpages;
904 free_pages = sbinfo->spool->max_hpages
905 - sbinfo->spool->used_hpages;
906 buf->f_bavail = buf->f_bfree = free_pages;
907 spin_unlock(&sbinfo->spool->lock);
David Gibson74a8a652005-11-21 21:32:24 -0800908 buf->f_files = sbinfo->max_inodes;
909 buf->f_ffree = sbinfo->free_inodes;
910 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 spin_unlock(&sbinfo->stat_lock);
912 }
913 buf->f_namelen = NAME_MAX;
914 return 0;
915}
916
917static void hugetlbfs_put_super(struct super_block *sb)
918{
919 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
920
921 if (sbi) {
922 sb->s_fs_info = NULL;
David Gibson90481622012-03-21 16:34:12 -0700923
924 if (sbi->spool)
925 hugepage_put_subpool(sbi->spool);
926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 kfree(sbi);
928 }
929}
930
Christoph Hellwig96527982005-10-29 18:16:42 -0700931static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
932{
933 if (sbinfo->free_inodes >= 0) {
934 spin_lock(&sbinfo->stat_lock);
935 if (unlikely(!sbinfo->free_inodes)) {
936 spin_unlock(&sbinfo->stat_lock);
937 return 0;
938 }
939 sbinfo->free_inodes--;
940 spin_unlock(&sbinfo->stat_lock);
941 }
942
943 return 1;
944}
945
946static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
947{
948 if (sbinfo->free_inodes >= 0) {
949 spin_lock(&sbinfo->stat_lock);
950 sbinfo->free_inodes++;
951 spin_unlock(&sbinfo->stat_lock);
952 }
953}
954
955
Christoph Lametere18b8902006-12-06 20:33:20 -0800956static struct kmem_cache *hugetlbfs_inode_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
959{
Christoph Hellwig96527982005-10-29 18:16:42 -0700960 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 struct hugetlbfs_inode_info *p;
962
Christoph Hellwig96527982005-10-29 18:16:42 -0700963 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 return NULL;
Christoph Lametere94b1762006-12-06 20:33:17 -0800965 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
Christoph Hellwig96527982005-10-29 18:16:42 -0700966 if (unlikely(!p)) {
967 hugetlbfs_inc_free_inodes(sbinfo);
968 return NULL;
969 }
Mike Kravetz4742a352017-03-31 15:12:01 -0700970
971 /*
972 * Any time after allocation, hugetlbfs_destroy_inode can be called
973 * for the inode. mpol_free_shared_policy is unconditionally called
974 * as part of hugetlbfs_destroy_inode. So, initialize policy here
975 * in case of a quick call to destroy.
976 *
977 * Note that the policy is initialized even if we are creating a
978 * private inode. This simplifies hugetlbfs_destroy_inode.
979 */
980 mpol_shared_policy_init(&p->policy, NULL);
981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 return &p->vfs_inode;
983}
984
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100985static void hugetlbfs_i_callback(struct rcu_head *head)
986{
987 struct inode *inode = container_of(head, struct inode, i_rcu);
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100988 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
989}
990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991static void hugetlbfs_destroy_inode(struct inode *inode)
992{
Christoph Hellwig96527982005-10-29 18:16:42 -0700993 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100995 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997
Christoph Hellwigf5e54d62006-06-28 04:26:44 -0700998static const struct address_space_operations hugetlbfs_aops = {
Nick Piggin800d15a2007-10-16 01:25:03 -0700999 .write_begin = hugetlbfs_write_begin,
1000 .write_end = hugetlbfs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 .set_page_dirty = hugetlbfs_set_page_dirty,
Naoya Horiguchi290408d2010-09-08 10:19:35 +09001002 .migratepage = hugetlbfs_migrate_page,
Naoya Horiguchi78bb9202017-07-10 15:47:50 -07001003 .error_remove_page = hugetlbfs_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004};
1005
Christoph Hellwig96527982005-10-29 18:16:42 -07001006
Alexey Dobriyan51cc5062008-07-25 19:45:34 -07001007static void init_once(void *foo)
Christoph Hellwig96527982005-10-29 18:16:42 -07001008{
1009 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1010
Christoph Lametera35afb82007-05-16 22:10:57 -07001011 inode_init_once(&ei->vfs_inode);
Christoph Hellwig96527982005-10-29 18:16:42 -07001012}
1013
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001014const struct file_operations hugetlbfs_file_operations = {
Al Viro34d06402015-04-03 11:31:35 -04001015 .read_iter = hugetlbfs_read_iter,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 .mmap = hugetlbfs_file_mmap,
Christoph Hellwig1b061d92010-05-26 17:53:41 +02001017 .fsync = noop_fsync,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 .get_unmapped_area = hugetlb_get_unmapped_area,
Mike Kravetz70c35472015-09-08 15:01:54 -07001019 .llseek = default_llseek,
1020 .fallocate = hugetlbfs_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021};
1022
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001023static const struct inode_operations hugetlbfs_dir_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 .create = hugetlbfs_create,
1025 .lookup = simple_lookup,
1026 .link = simple_link,
1027 .unlink = simple_unlink,
1028 .symlink = hugetlbfs_symlink,
1029 .mkdir = hugetlbfs_mkdir,
1030 .rmdir = simple_rmdir,
1031 .mknod = hugetlbfs_mknod,
1032 .rename = simple_rename,
1033 .setattr = hugetlbfs_setattr,
1034};
1035
Arjan van de Ven92e1d5b2007-02-12 00:55:39 -08001036static const struct inode_operations hugetlbfs_inode_operations = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 .setattr = hugetlbfs_setattr,
1038};
1039
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -08001040static const struct super_operations hugetlbfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 .alloc_inode = hugetlbfs_alloc_inode,
1042 .destroy_inode = hugetlbfs_destroy_inode,
Al Viro2bbbda32010-06-04 19:52:12 -04001043 .evict_inode = hugetlbfs_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 .statfs = hugetlbfs_statfs,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 .put_super = hugetlbfs_put_super,
David Howells4a252202017-07-05 16:24:18 +01001046 .show_options = hugetlbfs_show_options,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047};
1048
David Howells4a252202017-07-05 16:24:18 +01001049enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001050
1051/*
1052 * Convert size option passed from command line to number of huge pages
1053 * in the pool specified by hstate. Size option could be in bytes
1054 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1055 */
David Howells4a252202017-07-05 16:24:18 +01001056static long
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001057hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
David Howells4a252202017-07-05 16:24:18 +01001058 enum hugetlbfs_size_type val_type)
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001059{
1060 if (val_type == NO_SIZE)
1061 return -1;
1062
1063 if (val_type == SIZE_PERCENT) {
1064 size_opt <<= huge_page_shift(h);
1065 size_opt *= h->max_huge_pages;
1066 do_div(size_opt, 100);
1067 }
1068
1069 size_opt >>= huge_page_shift(h);
1070 return size_opt;
1071}
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073static int
1074hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1075{
Randy Dunlape73a75f2007-07-15 23:40:52 -07001076 char *p, *rest;
1077 substring_t args[MAX_OPT_ARGS];
1078 int option;
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001079 unsigned long long max_size_opt = 0, min_size_opt = 0;
David Howells4a252202017-07-05 16:24:18 +01001080 enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 if (!options)
1083 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Randy Dunlape73a75f2007-07-15 23:40:52 -07001085 while ((p = strsep(&options, ",")) != NULL) {
1086 int token;
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001087 if (!*p)
1088 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
Randy Dunlape73a75f2007-07-15 23:40:52 -07001090 token = match_token(p, tokens, args);
1091 switch (token) {
1092 case Opt_uid:
1093 if (match_int(&args[0], &option))
1094 goto bad_val;
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001095 pconfig->uid = make_kuid(current_user_ns(), option);
1096 if (!uid_valid(pconfig->uid))
1097 goto bad_val;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001098 break;
1099
1100 case Opt_gid:
1101 if (match_int(&args[0], &option))
1102 goto bad_val;
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001103 pconfig->gid = make_kgid(current_user_ns(), option);
1104 if (!gid_valid(pconfig->gid))
1105 goto bad_val;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001106 break;
1107
1108 case Opt_mode:
1109 if (match_octal(&args[0], &option))
1110 goto bad_val;
Ken Chen75897d62008-02-04 22:28:36 -08001111 pconfig->mode = option & 01777U;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001112 break;
1113
1114 case Opt_size: {
Randy Dunlape73a75f2007-07-15 23:40:52 -07001115 /* memparse() will accept a K/M/G without a digit */
1116 if (!isdigit(*args[0].from))
1117 goto bad_val;
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001118 max_size_opt = memparse(args[0].from, &rest);
1119 max_val_type = SIZE_STD;
Andi Kleena137e1c2008-07-23 21:27:43 -07001120 if (*rest == '%')
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001121 max_val_type = SIZE_PERCENT;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001122 break;
1123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
Randy Dunlape73a75f2007-07-15 23:40:52 -07001125 case Opt_nr_inodes:
1126 /* memparse() will accept a K/M/G without a digit */
1127 if (!isdigit(*args[0].from))
1128 goto bad_val;
1129 pconfig->nr_inodes = memparse(args[0].from, &rest);
1130 break;
1131
Andi Kleena137e1c2008-07-23 21:27:43 -07001132 case Opt_pagesize: {
1133 unsigned long ps;
1134 ps = memparse(args[0].from, &rest);
1135 pconfig->hstate = size_to_hstate(ps);
1136 if (!pconfig->hstate) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001137 pr_err("Unsupported page size %lu MB\n",
Andi Kleena137e1c2008-07-23 21:27:43 -07001138 ps >> 20);
1139 return -EINVAL;
1140 }
1141 break;
1142 }
1143
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001144 case Opt_min_size: {
1145 /* memparse() will accept a K/M/G without a digit */
1146 if (!isdigit(*args[0].from))
1147 goto bad_val;
1148 min_size_opt = memparse(args[0].from, &rest);
1149 min_val_type = SIZE_STD;
1150 if (*rest == '%')
1151 min_val_type = SIZE_PERCENT;
1152 break;
1153 }
1154
Randy Dunlape73a75f2007-07-15 23:40:52 -07001155 default:
Andrew Morton9b857d22014-06-04 16:07:21 -07001156 pr_err("Bad mount option: \"%s\"\n", p);
Lee Schermerhornb4c07bc2007-07-15 23:40:54 -07001157 return -EINVAL;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001158 break;
1159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 }
Andi Kleena137e1c2008-07-23 21:27:43 -07001161
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001162 /*
1163 * Use huge page pool size (in hstate) to convert the size
1164 * options to number of huge pages. If NO_SIZE, -1 is returned.
1165 */
1166 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1167 max_size_opt, max_val_type);
1168 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1169 min_size_opt, min_val_type);
1170
1171 /*
1172 * If max_size was specified, then min_size must be smaller
1173 */
1174 if (max_val_type > NO_SIZE &&
1175 pconfig->min_hpages > pconfig->max_hpages) {
1176 pr_err("minimum size can not be greater than maximum size\n");
1177 return -EINVAL;
Andi Kleena137e1c2008-07-23 21:27:43 -07001178 }
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 return 0;
Randy Dunlape73a75f2007-07-15 23:40:52 -07001181
1182bad_val:
Andrew Morton9b857d22014-06-04 16:07:21 -07001183 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
Akinobu Mitac12ddba2009-04-21 12:24:05 -07001184 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185}
1186
1187static int
1188hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
1189{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 int ret;
1191 struct hugetlbfs_config config;
1192 struct hugetlbfs_sb_info *sbinfo;
1193
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001194 config.max_hpages = -1; /* No limit on size by default */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 config.nr_inodes = -1; /* No limit on number of inodes by default */
David Howells77c70de2008-11-14 10:38:56 +11001196 config.uid = current_fsuid();
1197 config.gid = current_fsgid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 config.mode = 0755;
Andi Kleena137e1c2008-07-23 21:27:43 -07001199 config.hstate = &default_hstate;
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001200 config.min_hpages = -1; /* No default minimum size */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 ret = hugetlbfs_parse_options(data, &config);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 if (ret)
1203 return ret;
1204
1205 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1206 if (!sbinfo)
1207 return -ENOMEM;
1208 sb->s_fs_info = sbinfo;
Andi Kleena137e1c2008-07-23 21:27:43 -07001209 sbinfo->hstate = config.hstate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 spin_lock_init(&sbinfo->stat_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 sbinfo->max_inodes = config.nr_inodes;
1212 sbinfo->free_inodes = config.nr_inodes;
David Gibson90481622012-03-21 16:34:12 -07001213 sbinfo->spool = NULL;
David Howells4a252202017-07-05 16:24:18 +01001214 sbinfo->uid = config.uid;
1215 sbinfo->gid = config.gid;
1216 sbinfo->mode = config.mode;
1217
Mike Kravetz7ca02d02015-04-15 16:13:42 -07001218 /*
1219 * Allocate and initialize subpool if maximum or minimum size is
1220 * specified. Any needed reservations (for minimim size) are taken
1221 * taken when the subpool is created.
1222 */
1223 if (config.max_hpages != -1 || config.min_hpages != -1) {
1224 sbinfo->spool = hugepage_new_subpool(config.hstate,
1225 config.max_hpages,
1226 config.min_hpages);
David Gibson90481622012-03-21 16:34:12 -07001227 if (!sbinfo->spool)
1228 goto out_free;
1229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 sb->s_maxbytes = MAX_LFS_FILESIZE;
Andi Kleena137e1c2008-07-23 21:27:43 -07001231 sb->s_blocksize = huge_page_size(config.hstate);
1232 sb->s_blocksize_bits = huge_page_shift(config.hstate);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 sb->s_magic = HUGETLBFS_MAGIC;
1234 sb->s_op = &hugetlbfs_ops;
1235 sb->s_time_gran = 1;
Al Viro48fde702012-01-08 22:15:13 -05001236 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
1237 if (!sb->s_root)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 goto out_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 return 0;
1240out_free:
Fabian Frederick6e6870d2014-06-04 16:10:40 -07001241 kfree(sbinfo->spool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 kfree(sbinfo);
1243 return -ENOMEM;
1244}
1245
Al Viro3c26ff62010-07-25 11:46:36 +04001246static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
1247 int flags, const char *dev_name, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248{
Al Viro3c26ff62010-07-25 11:46:36 +04001249 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
1251
1252static struct file_system_type hugetlbfs_fs_type = {
1253 .name = "hugetlbfs",
Al Viro3c26ff62010-07-25 11:46:36 +04001254 .mount = hugetlbfs_mount,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 .kill_sb = kill_litter_super,
1256};
1257
Andi Kleen42d73952012-12-11 16:01:34 -08001258static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001260static int can_do_hugetlb_shm(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Eric W. Biedermana0eb3a02012-02-07 16:19:25 -08001262 kgid_t shm_group;
1263 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1264 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265}
1266
Andi Kleen42d73952012-12-11 16:01:34 -08001267static int get_hstate_idx(int page_size_log)
1268{
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001269 struct hstate *h = hstate_sizelog(page_size_log);
Andi Kleen42d73952012-12-11 16:01:34 -08001270
Andi Kleen42d73952012-12-11 16:01:34 -08001271 if (!h)
1272 return -1;
1273 return h - hstates;
1274}
1275
Fabian Frederickbe1d2cf2014-06-04 16:10:39 -07001276static const struct dentry_operations anon_ops = {
Al Viro118b2302013-08-24 12:08:17 -04001277 .d_dname = simple_dname
Al Viro0df4d6e2013-02-14 22:39:53 -05001278};
1279
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001280/*
1281 * Note that size should be aligned to proper hugepage size in caller side,
1282 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1283 */
1284struct file *hugetlb_file_setup(const char *name, size_t size,
1285 vm_flags_t acctflag, struct user_struct **user,
Andi Kleen42d73952012-12-11 16:01:34 -08001286 int creat_flags, int page_size_log)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287{
Anatol Pomozov39b65252012-09-12 20:11:55 -07001288 struct file *file = ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 struct inode *inode;
Al Viro2c48b9c2009-08-09 00:52:35 +04001290 struct path path;
Al Viro0df4d6e2013-02-14 22:39:53 -05001291 struct super_block *sb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 struct qstr quick_string;
Andi Kleen42d73952012-12-11 16:01:34 -08001293 int hstate_idx;
1294
1295 hstate_idx = get_hstate_idx(page_size_log);
1296 if (hstate_idx < 0)
1297 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
Hugh Dickins353d5c32009-08-24 16:30:28 +01001299 *user = NULL;
Andi Kleen42d73952012-12-11 16:01:34 -08001300 if (!hugetlbfs_vfsmount[hstate_idx])
Akinobu Mita5bc98592007-05-06 14:50:18 -07001301 return ERR_PTR(-ENOENT);
1302
From: Mel Gormanef1ff6b2009-09-23 15:56:05 -07001303 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
Hugh Dickins353d5c32009-08-24 16:30:28 +01001304 *user = current_user();
1305 if (user_shm_lock(size, *user)) {
David Rientjes21a3c272012-03-21 16:34:13 -07001306 task_lock(current);
Andrew Morton9b857d22014-06-04 16:07:21 -07001307 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
David Rientjes21a3c272012-03-21 16:34:13 -07001308 current->comm, current->pid);
1309 task_unlock(current);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001310 } else {
1311 *user = NULL;
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001312 return ERR_PTR(-EPERM);
Hugh Dickins353d5c32009-08-24 16:30:28 +01001313 }
Ravikiran G Thirumalai2584e512009-03-31 15:21:26 -07001314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Al Viro0df4d6e2013-02-14 22:39:53 -05001316 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
Eric W. Biederman9d665862007-06-16 10:16:16 -07001317 quick_string.name = name;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 quick_string.len = strlen(quick_string.name);
1319 quick_string.hash = 0;
Al Viro0df4d6e2013-02-14 22:39:53 -05001320 path.dentry = d_alloc_pseudo(sb, &quick_string);
Al Viro2c48b9c2009-08-09 00:52:35 +04001321 if (!path.dentry)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 goto out_shm_unlock;
1323
Al Viro0df4d6e2013-02-14 22:39:53 -05001324 d_set_d_op(path.dentry, &anon_ops);
Andi Kleen42d73952012-12-11 16:01:34 -08001325 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
Anatol Pomozov39b65252012-09-12 20:11:55 -07001326 file = ERR_PTR(-ENOSPC);
Al Viro0df4d6e2013-02-14 22:39:53 -05001327 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 if (!inode)
Dave Hansence8d2cd2007-10-16 23:31:13 -07001329 goto out_dentry;
Stephen Smalleye1832f22015-08-06 15:46:55 -07001330 if (creat_flags == HUGETLB_SHMFS_INODE)
1331 inode->i_flags |= S_PRIVATE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332
Anatol Pomozov39b65252012-09-12 20:11:55 -07001333 file = ERR_PTR(-ENOMEM);
Naoya Horiguchiaf73e4d2013-05-07 16:18:13 -07001334 if (hugetlb_reserve_pages(inode, 0,
1335 size >> huge_page_shift(hstate_inode(inode)), NULL,
1336 acctflag))
David Gibsonb45b5bd2006-03-22 00:08:55 -08001337 goto out_inode;
1338
Al Viro2c48b9c2009-08-09 00:52:35 +04001339 d_instantiate(path.dentry, inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 inode->i_size = size;
Miklos Szeredi6d6b77f2011-10-28 14:13:28 +02001341 clear_nlink(inode);
Dave Hansence8d2cd2007-10-16 23:31:13 -07001342
Al Viro2c48b9c2009-08-09 00:52:35 +04001343 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
Dave Hansence8d2cd2007-10-16 23:31:13 -07001344 &hugetlbfs_file_operations);
Anatol Pomozov39b65252012-09-12 20:11:55 -07001345 if (IS_ERR(file))
Al Virob4d232e2008-02-23 05:59:19 -05001346 goto out_dentry; /* inode is already attached */
Dave Hansence8d2cd2007-10-16 23:31:13 -07001347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 return file;
1349
David Gibsonb45b5bd2006-03-22 00:08:55 -08001350out_inode:
1351 iput(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352out_dentry:
Al Viro2c48b9c2009-08-09 00:52:35 +04001353 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354out_shm_unlock:
Hugh Dickins353d5c32009-08-24 16:30:28 +01001355 if (*user) {
1356 user_shm_unlock(size, *user);
1357 *user = NULL;
1358 }
Anatol Pomozov39b65252012-09-12 20:11:55 -07001359 return file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360}
1361
1362static int __init init_hugetlbfs_fs(void)
1363{
Andi Kleen42d73952012-12-11 16:01:34 -08001364 struct hstate *h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 int error;
Andi Kleen42d73952012-12-11 16:01:34 -08001366 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001368 if (!hugepages_supported()) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001369 pr_info("disabling because there are no supported hugepage sizes\n");
Nishanth Aravamudan457c1b22014-05-06 12:50:00 -07001370 return -ENOTSUPP;
1371 }
1372
Hillf Dantond1d5e052012-03-21 16:34:15 -07001373 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1375 sizeof(struct hugetlbfs_inode_info),
Vladimir Davydov5d097052016-01-14 15:18:21 -08001376 0, SLAB_ACCOUNT, init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 if (hugetlbfs_inode_cachep == NULL)
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001378 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380 error = register_filesystem(&hugetlbfs_fs_type);
1381 if (error)
1382 goto out;
1383
Andi Kleen42d73952012-12-11 16:01:34 -08001384 i = 0;
1385 for_each_hstate(h) {
1386 char buf[50];
1387 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Andi Kleen42d73952012-12-11 16:01:34 -08001389 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1390 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1391 buf);
1392
1393 if (IS_ERR(hugetlbfs_vfsmount[i])) {
Andrew Morton9b857d22014-06-04 16:07:21 -07001394 pr_err("Cannot mount internal hugetlbfs for "
Andi Kleen42d73952012-12-11 16:01:34 -08001395 "page size %uK", ps_kb);
1396 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1397 hugetlbfs_vfsmount[i] = NULL;
1398 }
1399 i++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 }
Andi Kleen42d73952012-12-11 16:01:34 -08001401 /* Non default hstates are optional */
1402 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1403 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
1405 out:
Hillf Dantond1d5e052012-03-21 16:34:15 -07001406 kmem_cache_destroy(hugetlbfs_inode_cachep);
Peter Zijlstrae0bf68d2007-10-16 23:25:46 -07001407 out2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 return error;
1409}
Paul Gortmaker3e89e1c2016-01-14 15:21:52 -08001410fs_initcall(init_hugetlbfs_fs)