Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Resizable virtual memory filesystem for Linux. |
| 3 | * |
| 4 | * Copyright (C) 2000 Linus Torvalds. |
| 5 | * 2000 Transmeta Corp. |
| 6 | * 2000-2001 Christoph Rohland |
| 7 | * 2000-2001 SAP AG |
| 8 | * 2002 Red Hat Inc. |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 9 | * Copyright (C) 2002-2011 Hugh Dickins. |
| 10 | * Copyright (C) 2011 Google Inc. |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 11 | * Copyright (C) 2002-2005 VERITAS Software Corporation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | * Copyright (C) 2004 Andi Kleen, SuSE Labs |
| 13 | * |
| 14 | * Extended attribute support for tmpfs: |
| 15 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> |
| 16 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> |
| 17 | * |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 18 | * tiny-shmem: |
| 19 | * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> |
| 20 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | * This file is released under the GPL. |
| 22 | */ |
| 23 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 24 | #include <linux/fs.h> |
| 25 | #include <linux/init.h> |
| 26 | #include <linux/vfs.h> |
| 27 | #include <linux/mount.h> |
Andrew Morton | 250297e | 2013-04-29 15:06:12 -0700 | [diff] [blame] | 28 | #include <linux/ramfs.h> |
Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 29 | #include <linux/pagemap.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 30 | #include <linux/file.h> |
| 31 | #include <linux/mm.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 32 | #include <linux/export.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 33 | #include <linux/swap.h> |
Christoph Hellwig | e2e40f2 | 2015-02-22 08:58:50 -0800 | [diff] [blame] | 34 | #include <linux/uio.h> |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 35 | #include <linux/khugepaged.h> |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 36 | |
| 37 | static struct vfsmount *shm_mnt; |
| 38 | |
| 39 | #ifdef CONFIG_SHMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | /* |
| 41 | * This virtual memory filesystem is heavily based on the ramfs. It |
| 42 | * extends ramfs by the ability to use swap and honor resource limits |
| 43 | * which makes it a completely usable filesystem. |
| 44 | */ |
| 45 | |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 46 | #include <linux/xattr.h> |
Christoph Hellwig | a569425 | 2007-07-17 04:04:28 -0700 | [diff] [blame] | 47 | #include <linux/exportfs.h> |
Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 48 | #include <linux/posix_acl.h> |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 49 | #include <linux/posix_acl_xattr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | #include <linux/mman.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/string.h> |
| 52 | #include <linux/slab.h> |
| 53 | #include <linux/backing-dev.h> |
| 54 | #include <linux/shmem_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/writeback.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #include <linux/blkdev.h> |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 57 | #include <linux/pagevec.h> |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 58 | #include <linux/percpu_counter.h> |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 59 | #include <linux/falloc.h> |
Hugh Dickins | 708e350 | 2011-07-25 17:12:32 -0700 | [diff] [blame] | 60 | #include <linux/splice.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | #include <linux/security.h> |
| 62 | #include <linux/swapops.h> |
| 63 | #include <linux/mempolicy.h> |
| 64 | #include <linux/namei.h> |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 65 | #include <linux/ctype.h> |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 66 | #include <linux/migrate.h> |
Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 67 | #include <linux/highmem.h> |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 68 | #include <linux/seq_file.h> |
Mimi Zohar | 9256292 | 2008-10-07 14:00:12 -0400 | [diff] [blame] | 69 | #include <linux/magic.h> |
David Herrmann | 9183df2 | 2014-08-08 14:25:29 -0700 | [diff] [blame] | 70 | #include <linux/syscalls.h> |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 71 | #include <linux/fcntl.h> |
David Herrmann | 9183df2 | 2014-08-08 14:25:29 -0700 | [diff] [blame] | 72 | #include <uapi/linux/memfd.h> |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 73 | |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 74 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | #include <asm/pgtable.h> |
| 76 | |
Mel Gorman | dd56b04 | 2015-11-06 16:28:43 -0800 | [diff] [blame] | 77 | #include "internal.h" |
| 78 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 79 | #define BLOCKS_PER_PAGE (PAGE_SIZE/512) |
| 80 | #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | /* Pretend that each entry is of this size in directory's i_size */ |
| 83 | #define BOGO_DIRENT_SIZE 20 |
| 84 | |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 85 | /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ |
| 86 | #define SHORT_SYMLINK_LEN 128 |
| 87 | |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 88 | /* |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 89 | * shmem_fallocate communicates with shmem_fault or shmem_writepage via |
| 90 | * inode->i_private (with i_mutex making sure that it has only one user at |
| 91 | * a time): we would prefer not to enlarge the shmem inode just for that. |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 92 | */ |
| 93 | struct shmem_falloc { |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 94 | wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 95 | pgoff_t start; /* start of range currently being fallocated */ |
| 96 | pgoff_t next; /* the next page offset to be fallocated */ |
| 97 | pgoff_t nr_falloced; /* how many new pages have been fallocated */ |
| 98 | pgoff_t nr_unswapped; /* how often writepage refused to swap out */ |
| 99 | }; |
| 100 | |
Andrew Morton | b76db73 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 101 | #ifdef CONFIG_TMPFS |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 102 | static unsigned long shmem_default_max_blocks(void) |
| 103 | { |
| 104 | return totalram_pages / 2; |
| 105 | } |
| 106 | |
| 107 | static unsigned long shmem_default_max_inodes(void) |
| 108 | { |
| 109 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); |
| 110 | } |
Andrew Morton | b76db73 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 111 | #endif |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 112 | |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 113 | static bool shmem_should_replace_page(struct page *page, gfp_t gfp); |
| 114 | static int shmem_replace_page(struct page **pagep, gfp_t gfp, |
| 115 | struct shmem_inode_info *info, pgoff_t index); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 116 | static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 117 | struct page **pagep, enum sgp_type sgp, |
| 118 | gfp_t gfp, struct mm_struct *fault_mm, int *fault_type); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 119 | |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 120 | int shmem_getpage(struct inode *inode, pgoff_t index, |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 121 | struct page **pagep, enum sgp_type sgp) |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 122 | { |
| 123 | return shmem_getpage_gfp(inode, index, pagep, sgp, |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 124 | mapping_gfp_mask(inode->i_mapping), NULL, NULL); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 125 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) |
| 128 | { |
| 129 | return sb->s_fs_info; |
| 130 | } |
| 131 | |
| 132 | /* |
| 133 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, |
| 134 | * for shared memory and for shared anonymous (/dev/zero) mappings |
| 135 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), |
| 136 | * consistent with the pre-accounting of private mappings ... |
| 137 | */ |
| 138 | static inline int shmem_acct_size(unsigned long flags, loff_t size) |
| 139 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 140 | return (flags & VM_NORESERVE) ? |
Al Viro | 191c542 | 2012-02-13 03:58:52 +0000 | [diff] [blame] | 141 | 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) |
| 145 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 146 | if (!(flags & VM_NORESERVE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | vm_unacct_memory(VM_ACCT(size)); |
| 148 | } |
| 149 | |
Konstantin Khlebnikov | 7714251 | 2014-08-06 16:06:34 -0700 | [diff] [blame] | 150 | static inline int shmem_reacct_size(unsigned long flags, |
| 151 | loff_t oldsize, loff_t newsize) |
| 152 | { |
| 153 | if (!(flags & VM_NORESERVE)) { |
| 154 | if (VM_ACCT(newsize) > VM_ACCT(oldsize)) |
| 155 | return security_vm_enough_memory_mm(current->mm, |
| 156 | VM_ACCT(newsize) - VM_ACCT(oldsize)); |
| 157 | else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) |
| 158 | vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); |
| 159 | } |
| 160 | return 0; |
| 161 | } |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | /* |
| 164 | * ... whereas tmpfs objects are accounted incrementally as |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 165 | * pages are allocated, in order to allow large sparse files. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, |
| 167 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. |
| 168 | */ |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 169 | static inline int shmem_acct_block(unsigned long flags, long pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 171 | if (!(flags & VM_NORESERVE)) |
| 172 | return 0; |
| 173 | |
| 174 | return security_vm_enough_memory_mm(current->mm, |
| 175 | pages * VM_ACCT(PAGE_SIZE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) |
| 179 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 180 | if (flags & VM_NORESERVE) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 181 | vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | } |
| 183 | |
Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 184 | static const struct super_operations shmem_ops; |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 185 | static const struct address_space_operations shmem_aops; |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 186 | static const struct file_operations shmem_file_operations; |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 187 | static const struct inode_operations shmem_inode_operations; |
| 188 | static const struct inode_operations shmem_dir_inode_operations; |
| 189 | static const struct inode_operations shmem_special_inode_operations; |
Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 190 | static const struct vm_operations_struct shmem_vm_ops; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 191 | static struct file_system_type shmem_fs_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | static LIST_HEAD(shmem_swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 194 | static DEFINE_MUTEX(shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 196 | static int shmem_reserve_inode(struct super_block *sb) |
| 197 | { |
| 198 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 199 | if (sbinfo->max_inodes) { |
| 200 | spin_lock(&sbinfo->stat_lock); |
| 201 | if (!sbinfo->free_inodes) { |
| 202 | spin_unlock(&sbinfo->stat_lock); |
| 203 | return -ENOSPC; |
| 204 | } |
| 205 | sbinfo->free_inodes--; |
| 206 | spin_unlock(&sbinfo->stat_lock); |
| 207 | } |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | static void shmem_free_inode(struct super_block *sb) |
| 212 | { |
| 213 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 214 | if (sbinfo->max_inodes) { |
| 215 | spin_lock(&sbinfo->stat_lock); |
| 216 | sbinfo->free_inodes++; |
| 217 | spin_unlock(&sbinfo->stat_lock); |
| 218 | } |
| 219 | } |
| 220 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 221 | /** |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 222 | * shmem_recalc_inode - recalculate the block usage of an inode |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | * @inode: inode to recalc |
| 224 | * |
| 225 | * We have to calculate the free blocks since the mm can drop |
| 226 | * undirtied hole pages behind our back. |
| 227 | * |
| 228 | * But normally info->alloced == inode->i_mapping->nrpages + info->swapped |
| 229 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) |
| 230 | * |
| 231 | * It has to be called with the spinlock held. |
| 232 | */ |
| 233 | static void shmem_recalc_inode(struct inode *inode) |
| 234 | { |
| 235 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 236 | long freed; |
| 237 | |
| 238 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; |
| 239 | if (freed > 0) { |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 240 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
| 241 | if (sbinfo->max_blocks) |
| 242 | percpu_counter_add(&sbinfo->used_blocks, -freed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | info->alloced -= freed; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 244 | inode->i_blocks -= freed * BLOCKS_PER_PAGE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | shmem_unacct_blocks(info->flags, freed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | } |
| 247 | } |
| 248 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 249 | bool shmem_charge(struct inode *inode, long pages) |
| 250 | { |
| 251 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 252 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 253 | unsigned long flags; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 254 | |
| 255 | if (shmem_acct_block(info->flags, pages)) |
| 256 | return false; |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 257 | spin_lock_irqsave(&info->lock, flags); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 258 | info->alloced += pages; |
| 259 | inode->i_blocks += pages * BLOCKS_PER_PAGE; |
| 260 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 261 | spin_unlock_irqrestore(&info->lock, flags); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 262 | inode->i_mapping->nrpages += pages; |
| 263 | |
| 264 | if (!sbinfo->max_blocks) |
| 265 | return true; |
| 266 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 267 | sbinfo->max_blocks - pages) > 0) { |
| 268 | inode->i_mapping->nrpages -= pages; |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 269 | spin_lock_irqsave(&info->lock, flags); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 270 | info->alloced -= pages; |
| 271 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 272 | spin_unlock_irqrestore(&info->lock, flags); |
Hugh Dickins | 7166466 | 2016-09-23 20:24:23 -0700 | [diff] [blame] | 273 | shmem_unacct_blocks(info->flags, pages); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 274 | return false; |
| 275 | } |
| 276 | percpu_counter_add(&sbinfo->used_blocks, pages); |
| 277 | return true; |
| 278 | } |
| 279 | |
| 280 | void shmem_uncharge(struct inode *inode, long pages) |
| 281 | { |
| 282 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 283 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 284 | unsigned long flags; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 285 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 286 | spin_lock_irqsave(&info->lock, flags); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 287 | info->alloced -= pages; |
| 288 | inode->i_blocks -= pages * BLOCKS_PER_PAGE; |
| 289 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 290 | spin_unlock_irqrestore(&info->lock, flags); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 291 | |
| 292 | if (sbinfo->max_blocks) |
| 293 | percpu_counter_sub(&sbinfo->used_blocks, pages); |
Hugh Dickins | 7166466 | 2016-09-23 20:24:23 -0700 | [diff] [blame] | 294 | shmem_unacct_blocks(info->flags, pages); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 295 | } |
| 296 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 297 | /* |
| 298 | * Replace item expected in radix tree by a new item, while holding tree lock. |
| 299 | */ |
| 300 | static int shmem_radix_tree_replace(struct address_space *mapping, |
| 301 | pgoff_t index, void *expected, void *replacement) |
| 302 | { |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 303 | struct radix_tree_node *node; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 304 | void **pslot; |
Johannes Weiner | 6dbaf22 | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 305 | void *item; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 306 | |
| 307 | VM_BUG_ON(!expected); |
Johannes Weiner | 6dbaf22 | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 308 | VM_BUG_ON(!replacement); |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 309 | item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot); |
| 310 | if (!item) |
Johannes Weiner | 6dbaf22 | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 311 | return -ENOENT; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 312 | if (item != expected) |
| 313 | return -ENOENT; |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 314 | __radix_tree_replace(&mapping->page_tree, node, pslot, |
| 315 | replacement, NULL, NULL); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | /* |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 320 | * Sometimes, before we decide whether to proceed or to fail, we must check |
| 321 | * that an entry was not already brought back from swap by a racing thread. |
| 322 | * |
| 323 | * Checking page is not enough: by the time a SwapCache page is locked, it |
| 324 | * might be reused, and again be SwapCache, using the same swap as before. |
| 325 | */ |
| 326 | static bool shmem_confirm_swap(struct address_space *mapping, |
| 327 | pgoff_t index, swp_entry_t swap) |
| 328 | { |
| 329 | void *item; |
| 330 | |
| 331 | rcu_read_lock(); |
| 332 | item = radix_tree_lookup(&mapping->page_tree, index); |
| 333 | rcu_read_unlock(); |
| 334 | return item == swp_to_radix_entry(swap); |
| 335 | } |
| 336 | |
| 337 | /* |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 338 | * Definitions for "huge tmpfs": tmpfs mounted with the huge= option |
| 339 | * |
| 340 | * SHMEM_HUGE_NEVER: |
| 341 | * disables huge pages for the mount; |
| 342 | * SHMEM_HUGE_ALWAYS: |
| 343 | * enables huge pages for the mount; |
| 344 | * SHMEM_HUGE_WITHIN_SIZE: |
| 345 | * only allocate huge pages if the page will be fully within i_size, |
| 346 | * also respect fadvise()/madvise() hints; |
| 347 | * SHMEM_HUGE_ADVISE: |
| 348 | * only allocate huge pages if requested with fadvise()/madvise(); |
| 349 | */ |
| 350 | |
| 351 | #define SHMEM_HUGE_NEVER 0 |
| 352 | #define SHMEM_HUGE_ALWAYS 1 |
| 353 | #define SHMEM_HUGE_WITHIN_SIZE 2 |
| 354 | #define SHMEM_HUGE_ADVISE 3 |
| 355 | |
| 356 | /* |
| 357 | * Special values. |
| 358 | * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: |
| 359 | * |
| 360 | * SHMEM_HUGE_DENY: |
| 361 | * disables huge on shm_mnt and all mounts, for emergency use; |
| 362 | * SHMEM_HUGE_FORCE: |
| 363 | * enables huge on shm_mnt and all mounts, w/o needing option, for testing; |
| 364 | * |
| 365 | */ |
| 366 | #define SHMEM_HUGE_DENY (-1) |
| 367 | #define SHMEM_HUGE_FORCE (-2) |
| 368 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 369 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 370 | /* ifdef here to avoid bloating shmem.o when not necessary */ |
| 371 | |
| 372 | int shmem_huge __read_mostly; |
| 373 | |
Jérémy Lefaure | f1f5929 | 2016-12-12 16:43:23 -0800 | [diff] [blame] | 374 | #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 375 | static int shmem_parse_huge(const char *str) |
| 376 | { |
| 377 | if (!strcmp(str, "never")) |
| 378 | return SHMEM_HUGE_NEVER; |
| 379 | if (!strcmp(str, "always")) |
| 380 | return SHMEM_HUGE_ALWAYS; |
| 381 | if (!strcmp(str, "within_size")) |
| 382 | return SHMEM_HUGE_WITHIN_SIZE; |
| 383 | if (!strcmp(str, "advise")) |
| 384 | return SHMEM_HUGE_ADVISE; |
| 385 | if (!strcmp(str, "deny")) |
| 386 | return SHMEM_HUGE_DENY; |
| 387 | if (!strcmp(str, "force")) |
| 388 | return SHMEM_HUGE_FORCE; |
| 389 | return -EINVAL; |
| 390 | } |
| 391 | |
| 392 | static const char *shmem_format_huge(int huge) |
| 393 | { |
| 394 | switch (huge) { |
| 395 | case SHMEM_HUGE_NEVER: |
| 396 | return "never"; |
| 397 | case SHMEM_HUGE_ALWAYS: |
| 398 | return "always"; |
| 399 | case SHMEM_HUGE_WITHIN_SIZE: |
| 400 | return "within_size"; |
| 401 | case SHMEM_HUGE_ADVISE: |
| 402 | return "advise"; |
| 403 | case SHMEM_HUGE_DENY: |
| 404 | return "deny"; |
| 405 | case SHMEM_HUGE_FORCE: |
| 406 | return "force"; |
| 407 | default: |
| 408 | VM_BUG_ON(1); |
| 409 | return "bad_val"; |
| 410 | } |
| 411 | } |
Jérémy Lefaure | f1f5929 | 2016-12-12 16:43:23 -0800 | [diff] [blame] | 412 | #endif |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 413 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 414 | static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, |
| 415 | struct shrink_control *sc, unsigned long nr_to_split) |
| 416 | { |
| 417 | LIST_HEAD(list), *pos, *next; |
| 418 | struct inode *inode; |
| 419 | struct shmem_inode_info *info; |
| 420 | struct page *page; |
| 421 | unsigned long batch = sc ? sc->nr_to_scan : 128; |
| 422 | int removed = 0, split = 0; |
| 423 | |
| 424 | if (list_empty(&sbinfo->shrinklist)) |
| 425 | return SHRINK_STOP; |
| 426 | |
| 427 | spin_lock(&sbinfo->shrinklist_lock); |
| 428 | list_for_each_safe(pos, next, &sbinfo->shrinklist) { |
| 429 | info = list_entry(pos, struct shmem_inode_info, shrinklist); |
| 430 | |
| 431 | /* pin the inode */ |
| 432 | inode = igrab(&info->vfs_inode); |
| 433 | |
| 434 | /* inode is about to be evicted */ |
| 435 | if (!inode) { |
| 436 | list_del_init(&info->shrinklist); |
| 437 | removed++; |
| 438 | goto next; |
| 439 | } |
| 440 | |
| 441 | /* Check if there's anything to gain */ |
| 442 | if (round_up(inode->i_size, PAGE_SIZE) == |
| 443 | round_up(inode->i_size, HPAGE_PMD_SIZE)) { |
| 444 | list_del_init(&info->shrinklist); |
| 445 | removed++; |
| 446 | iput(inode); |
| 447 | goto next; |
| 448 | } |
| 449 | |
| 450 | list_move(&info->shrinklist, &list); |
| 451 | next: |
| 452 | if (!--batch) |
| 453 | break; |
| 454 | } |
| 455 | spin_unlock(&sbinfo->shrinklist_lock); |
| 456 | |
| 457 | list_for_each_safe(pos, next, &list) { |
| 458 | int ret; |
| 459 | |
| 460 | info = list_entry(pos, struct shmem_inode_info, shrinklist); |
| 461 | inode = &info->vfs_inode; |
| 462 | |
| 463 | if (nr_to_split && split >= nr_to_split) { |
| 464 | iput(inode); |
| 465 | continue; |
| 466 | } |
| 467 | |
| 468 | page = find_lock_page(inode->i_mapping, |
| 469 | (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); |
| 470 | if (!page) |
| 471 | goto drop; |
| 472 | |
| 473 | if (!PageTransHuge(page)) { |
| 474 | unlock_page(page); |
| 475 | put_page(page); |
| 476 | goto drop; |
| 477 | } |
| 478 | |
| 479 | ret = split_huge_page(page); |
| 480 | unlock_page(page); |
| 481 | put_page(page); |
| 482 | |
| 483 | if (ret) { |
| 484 | /* split failed: leave it on the list */ |
| 485 | iput(inode); |
| 486 | continue; |
| 487 | } |
| 488 | |
| 489 | split++; |
| 490 | drop: |
| 491 | list_del_init(&info->shrinklist); |
| 492 | removed++; |
| 493 | iput(inode); |
| 494 | } |
| 495 | |
| 496 | spin_lock(&sbinfo->shrinklist_lock); |
| 497 | list_splice_tail(&list, &sbinfo->shrinklist); |
| 498 | sbinfo->shrinklist_len -= removed; |
| 499 | spin_unlock(&sbinfo->shrinklist_lock); |
| 500 | |
| 501 | return split; |
| 502 | } |
| 503 | |
| 504 | static long shmem_unused_huge_scan(struct super_block *sb, |
| 505 | struct shrink_control *sc) |
| 506 | { |
| 507 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 508 | |
| 509 | if (!READ_ONCE(sbinfo->shrinklist_len)) |
| 510 | return SHRINK_STOP; |
| 511 | |
| 512 | return shmem_unused_huge_shrink(sbinfo, sc, 0); |
| 513 | } |
| 514 | |
| 515 | static long shmem_unused_huge_count(struct super_block *sb, |
| 516 | struct shrink_control *sc) |
| 517 | { |
| 518 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 519 | return READ_ONCE(sbinfo->shrinklist_len); |
| 520 | } |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 521 | #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 522 | |
| 523 | #define shmem_huge SHMEM_HUGE_DENY |
| 524 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 525 | static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, |
| 526 | struct shrink_control *sc, unsigned long nr_to_split) |
| 527 | { |
| 528 | return 0; |
| 529 | } |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 530 | #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 531 | |
| 532 | /* |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 533 | * Like add_to_page_cache_locked, but error if expected item has gone. |
| 534 | */ |
| 535 | static int shmem_add_to_page_cache(struct page *page, |
| 536 | struct address_space *mapping, |
Wang Sheng-Hui | fed400a | 2014-08-06 16:07:26 -0700 | [diff] [blame] | 537 | pgoff_t index, void *expected) |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 538 | { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 539 | int error, nr = hpage_nr_pages(page); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 540 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 541 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 542 | VM_BUG_ON_PAGE(index != round_down(index, nr), page); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 543 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 544 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 545 | VM_BUG_ON(expected && PageTransHuge(page)); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 546 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 547 | page_ref_add(page, nr); |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 548 | page->mapping = mapping; |
| 549 | page->index = index; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 550 | |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 551 | spin_lock_irq(&mapping->tree_lock); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 552 | if (PageTransHuge(page)) { |
| 553 | void __rcu **results; |
| 554 | pgoff_t idx; |
| 555 | int i; |
| 556 | |
| 557 | error = 0; |
| 558 | if (radix_tree_gang_lookup_slot(&mapping->page_tree, |
| 559 | &results, &idx, index, 1) && |
| 560 | idx < index + HPAGE_PMD_NR) { |
| 561 | error = -EEXIST; |
| 562 | } |
| 563 | |
| 564 | if (!error) { |
| 565 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 566 | error = radix_tree_insert(&mapping->page_tree, |
| 567 | index + i, page + i); |
| 568 | VM_BUG_ON(error); |
| 569 | } |
| 570 | count_vm_event(THP_FILE_ALLOC); |
| 571 | } |
| 572 | } else if (!expected) { |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 573 | error = radix_tree_insert(&mapping->page_tree, index, page); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 574 | } else { |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 575 | error = shmem_radix_tree_replace(mapping, index, expected, |
| 576 | page); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 577 | } |
| 578 | |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 579 | if (!error) { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 580 | mapping->nrpages += nr; |
| 581 | if (PageTransHuge(page)) |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 582 | __inc_node_page_state(page, NR_SHMEM_THPS); |
| 583 | __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); |
| 584 | __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 585 | spin_unlock_irq(&mapping->tree_lock); |
| 586 | } else { |
| 587 | page->mapping = NULL; |
| 588 | spin_unlock_irq(&mapping->tree_lock); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 589 | page_ref_sub(page, nr); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 590 | } |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 591 | return error; |
| 592 | } |
| 593 | |
| 594 | /* |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 595 | * Like delete_from_page_cache, but substitutes swap for page. |
| 596 | */ |
| 597 | static void shmem_delete_from_page_cache(struct page *page, void *radswap) |
| 598 | { |
| 599 | struct address_space *mapping = page->mapping; |
| 600 | int error; |
| 601 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 602 | VM_BUG_ON_PAGE(PageCompound(page), page); |
| 603 | |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 604 | spin_lock_irq(&mapping->tree_lock); |
| 605 | error = shmem_radix_tree_replace(mapping, page->index, page, radswap); |
| 606 | page->mapping = NULL; |
| 607 | mapping->nrpages--; |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 608 | __dec_node_page_state(page, NR_FILE_PAGES); |
| 609 | __dec_node_page_state(page, NR_SHMEM); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 610 | spin_unlock_irq(&mapping->tree_lock); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 611 | put_page(page); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 612 | BUG_ON(error); |
| 613 | } |
| 614 | |
| 615 | /* |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 616 | * Remove swap entry from radix tree, free the swap and its page cache. |
| 617 | */ |
| 618 | static int shmem_free_swap(struct address_space *mapping, |
| 619 | pgoff_t index, void *radswap) |
| 620 | { |
Johannes Weiner | 6dbaf22 | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 621 | void *old; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 622 | |
| 623 | spin_lock_irq(&mapping->tree_lock); |
Johannes Weiner | 6dbaf22 | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 624 | old = radix_tree_delete_item(&mapping->page_tree, index, radswap); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 625 | spin_unlock_irq(&mapping->tree_lock); |
Johannes Weiner | 6dbaf22 | 2014-04-03 14:47:41 -0700 | [diff] [blame] | 626 | if (old != radswap) |
| 627 | return -ENOENT; |
| 628 | free_swap_and_cache(radix_to_swp_entry(radswap)); |
| 629 | return 0; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 630 | } |
| 631 | |
| 632 | /* |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 633 | * Determine (in bytes) how many of the shmem object's pages mapped by the |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 634 | * given offsets are swapped out. |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 635 | * |
| 636 | * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, |
| 637 | * as long as the inode doesn't go away and racy results are not a problem. |
| 638 | */ |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 639 | unsigned long shmem_partial_swap_usage(struct address_space *mapping, |
| 640 | pgoff_t start, pgoff_t end) |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 641 | { |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 642 | struct radix_tree_iter iter; |
| 643 | void **slot; |
| 644 | struct page *page; |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 645 | unsigned long swapped = 0; |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 646 | |
| 647 | rcu_read_lock(); |
| 648 | |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 649 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { |
| 650 | if (iter.index >= end) |
| 651 | break; |
| 652 | |
| 653 | page = radix_tree_deref_slot(slot); |
| 654 | |
Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 655 | if (radix_tree_deref_retry(page)) { |
| 656 | slot = radix_tree_iter_retry(&iter); |
| 657 | continue; |
| 658 | } |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 659 | |
| 660 | if (radix_tree_exceptional_entry(page)) |
| 661 | swapped++; |
| 662 | |
| 663 | if (need_resched()) { |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 664 | slot = radix_tree_iter_resume(slot, &iter); |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 665 | cond_resched_rcu(); |
Vlastimil Babka | 6a15a37 | 2016-01-14 15:19:20 -0800 | [diff] [blame] | 666 | } |
| 667 | } |
| 668 | |
| 669 | rcu_read_unlock(); |
| 670 | |
| 671 | return swapped << PAGE_SHIFT; |
| 672 | } |
| 673 | |
| 674 | /* |
Vlastimil Babka | 48131e0 | 2016-01-14 15:19:23 -0800 | [diff] [blame] | 675 | * Determine (in bytes) how many of the shmem object's pages mapped by the |
| 676 | * given vma is swapped out. |
| 677 | * |
| 678 | * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, |
| 679 | * as long as the inode doesn't go away and racy results are not a problem. |
| 680 | */ |
| 681 | unsigned long shmem_swap_usage(struct vm_area_struct *vma) |
| 682 | { |
| 683 | struct inode *inode = file_inode(vma->vm_file); |
| 684 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 685 | struct address_space *mapping = inode->i_mapping; |
| 686 | unsigned long swapped; |
| 687 | |
| 688 | /* Be careful as we don't hold info->lock */ |
| 689 | swapped = READ_ONCE(info->swapped); |
| 690 | |
| 691 | /* |
| 692 | * The easier cases are when the shmem object has nothing in swap, or |
| 693 | * the vma maps it whole. Then we can simply use the stats that we |
| 694 | * already track. |
| 695 | */ |
| 696 | if (!swapped) |
| 697 | return 0; |
| 698 | |
| 699 | if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) |
| 700 | return swapped << PAGE_SHIFT; |
| 701 | |
| 702 | /* Here comes the more involved part */ |
| 703 | return shmem_partial_swap_usage(mapping, |
| 704 | linear_page_index(vma, vma->vm_start), |
| 705 | linear_page_index(vma, vma->vm_end)); |
| 706 | } |
| 707 | |
| 708 | /* |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 709 | * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. |
| 710 | */ |
| 711 | void shmem_unlock_mapping(struct address_space *mapping) |
| 712 | { |
| 713 | struct pagevec pvec; |
| 714 | pgoff_t indices[PAGEVEC_SIZE]; |
| 715 | pgoff_t index = 0; |
| 716 | |
| 717 | pagevec_init(&pvec, 0); |
| 718 | /* |
| 719 | * Minor point, but we might as well stop if someone else SHM_LOCKs it. |
| 720 | */ |
| 721 | while (!mapping_unevictable(mapping)) { |
| 722 | /* |
| 723 | * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it |
| 724 | * has finished, if it hits a row of PAGEVEC_SIZE swap entries. |
| 725 | */ |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 726 | pvec.nr = find_get_entries(mapping, index, |
| 727 | PAGEVEC_SIZE, pvec.pages, indices); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 728 | if (!pvec.nr) |
| 729 | break; |
| 730 | index = indices[pvec.nr - 1] + 1; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 731 | pagevec_remove_exceptionals(&pvec); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 732 | check_move_unevictable_pages(pvec.pages, pvec.nr); |
| 733 | pagevec_release(&pvec); |
| 734 | cond_resched(); |
| 735 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 736 | } |
| 737 | |
| 738 | /* |
| 739 | * Remove range of pages and swap entries from radix tree, and free them. |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 740 | * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 741 | */ |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 742 | static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, |
| 743 | bool unfalloc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 745 | struct address_space *mapping = inode->i_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 747 | pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 748 | pgoff_t end = (lend + 1) >> PAGE_SHIFT; |
| 749 | unsigned int partial_start = lstart & (PAGE_SIZE - 1); |
| 750 | unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 751 | struct pagevec pvec; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 752 | pgoff_t indices[PAGEVEC_SIZE]; |
| 753 | long nr_swaps_freed = 0; |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 754 | pgoff_t index; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 755 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 756 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 757 | if (lend == -1) |
| 758 | end = -1; /* unsigned, so actually very big */ |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 759 | |
| 760 | pagevec_init(&pvec, 0); |
| 761 | index = start; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 762 | while (index < end) { |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 763 | pvec.nr = find_get_entries(mapping, index, |
| 764 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
| 765 | pvec.pages, indices); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 766 | if (!pvec.nr) |
| 767 | break; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 768 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 769 | struct page *page = pvec.pages[i]; |
| 770 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 771 | index = indices[i]; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 772 | if (index >= end) |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 773 | break; |
| 774 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 775 | if (radix_tree_exceptional_entry(page)) { |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 776 | if (unfalloc) |
| 777 | continue; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 778 | nr_swaps_freed += !shmem_free_swap(mapping, |
| 779 | index, page); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 780 | continue; |
| 781 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 782 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 783 | VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); |
| 784 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 785 | if (!trylock_page(page)) |
| 786 | continue; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 787 | |
| 788 | if (PageTransTail(page)) { |
| 789 | /* Middle of THP: zero out the page */ |
| 790 | clear_highpage(page); |
| 791 | unlock_page(page); |
| 792 | continue; |
| 793 | } else if (PageTransHuge(page)) { |
| 794 | if (index == round_down(end, HPAGE_PMD_NR)) { |
| 795 | /* |
| 796 | * Range ends in the middle of THP: |
| 797 | * zero out the page |
| 798 | */ |
| 799 | clear_highpage(page); |
| 800 | unlock_page(page); |
| 801 | continue; |
| 802 | } |
| 803 | index += HPAGE_PMD_NR - 1; |
| 804 | i += HPAGE_PMD_NR - 1; |
| 805 | } |
| 806 | |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 807 | if (!unfalloc || !PageUptodate(page)) { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 808 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 809 | if (page_mapping(page) == mapping) { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 810 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 811 | truncate_inode_page(mapping, page); |
| 812 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 813 | } |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 814 | unlock_page(page); |
| 815 | } |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 816 | pagevec_remove_exceptionals(&pvec); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 817 | pagevec_release(&pvec); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 818 | cond_resched(); |
| 819 | index++; |
| 820 | } |
| 821 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 822 | if (partial_start) { |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 823 | struct page *page = NULL; |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 824 | shmem_getpage(inode, start - 1, &page, SGP_READ); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 825 | if (page) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 826 | unsigned int top = PAGE_SIZE; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 827 | if (start > end) { |
| 828 | top = partial_end; |
| 829 | partial_end = 0; |
| 830 | } |
| 831 | zero_user_segment(page, partial_start, top); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 832 | set_page_dirty(page); |
| 833 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 834 | put_page(page); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 835 | } |
| 836 | } |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 837 | if (partial_end) { |
| 838 | struct page *page = NULL; |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 839 | shmem_getpage(inode, end, &page, SGP_READ); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 840 | if (page) { |
| 841 | zero_user_segment(page, 0, partial_end); |
| 842 | set_page_dirty(page); |
| 843 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 844 | put_page(page); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 845 | } |
| 846 | } |
| 847 | if (start >= end) |
| 848 | return; |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 849 | |
| 850 | index = start; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 851 | while (index < end) { |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 852 | cond_resched(); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 853 | |
| 854 | pvec.nr = find_get_entries(mapping, index, |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 855 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 856 | pvec.pages, indices); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 857 | if (!pvec.nr) { |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 858 | /* If all gone or hole-punch or unfalloc, we're done */ |
| 859 | if (index == start || end != -1) |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 860 | break; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 861 | /* But if truncating, restart to make sure all gone */ |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 862 | index = start; |
| 863 | continue; |
| 864 | } |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 865 | for (i = 0; i < pagevec_count(&pvec); i++) { |
| 866 | struct page *page = pvec.pages[i]; |
| 867 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 868 | index = indices[i]; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 869 | if (index >= end) |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 870 | break; |
| 871 | |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 872 | if (radix_tree_exceptional_entry(page)) { |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 873 | if (unfalloc) |
| 874 | continue; |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 875 | if (shmem_free_swap(mapping, index, page)) { |
| 876 | /* Swap was replaced by page: retry */ |
| 877 | index--; |
| 878 | break; |
| 879 | } |
| 880 | nr_swaps_freed++; |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 881 | continue; |
| 882 | } |
| 883 | |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 884 | lock_page(page); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 885 | |
| 886 | if (PageTransTail(page)) { |
| 887 | /* Middle of THP: zero out the page */ |
| 888 | clear_highpage(page); |
| 889 | unlock_page(page); |
| 890 | /* |
| 891 | * Partial thp truncate due 'start' in middle |
| 892 | * of THP: don't need to look on these pages |
| 893 | * again on !pvec.nr restart. |
| 894 | */ |
| 895 | if (index != round_down(end, HPAGE_PMD_NR)) |
| 896 | start++; |
| 897 | continue; |
| 898 | } else if (PageTransHuge(page)) { |
| 899 | if (index == round_down(end, HPAGE_PMD_NR)) { |
| 900 | /* |
| 901 | * Range ends in the middle of THP: |
| 902 | * zero out the page |
| 903 | */ |
| 904 | clear_highpage(page); |
| 905 | unlock_page(page); |
| 906 | continue; |
| 907 | } |
| 908 | index += HPAGE_PMD_NR - 1; |
| 909 | i += HPAGE_PMD_NR - 1; |
| 910 | } |
| 911 | |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 912 | if (!unfalloc || !PageUptodate(page)) { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 913 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 914 | if (page_mapping(page) == mapping) { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 915 | VM_BUG_ON_PAGE(PageWriteback(page), page); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 916 | truncate_inode_page(mapping, page); |
Hugh Dickins | b1a3665 | 2014-07-23 14:00:13 -0700 | [diff] [blame] | 917 | } else { |
| 918 | /* Page was replaced by swap: retry */ |
| 919 | unlock_page(page); |
| 920 | index--; |
| 921 | break; |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 922 | } |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 923 | } |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 924 | unlock_page(page); |
| 925 | } |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 926 | pagevec_remove_exceptionals(&pvec); |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 927 | pagevec_release(&pvec); |
Hugh Dickins | bda97ea | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 928 | index++; |
| 929 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 930 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 931 | spin_lock_irq(&info->lock); |
Hugh Dickins | 7a5d0fb | 2011-08-03 16:21:22 -0700 | [diff] [blame] | 932 | info->swapped -= nr_swaps_freed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 934 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 935 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 936 | |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 937 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
| 938 | { |
| 939 | shmem_undo_range(inode, lstart, lend, false); |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 940 | inode->i_ctime = inode->i_mtime = current_time(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 942 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 944 | static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, |
| 945 | struct kstat *stat) |
| 946 | { |
| 947 | struct inode *inode = dentry->d_inode; |
| 948 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 949 | |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 950 | if (info->alloced - info->swapped != inode->i_mapping->nrpages) { |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 951 | spin_lock_irq(&info->lock); |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 952 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 953 | spin_unlock_irq(&info->lock); |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 954 | } |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 955 | generic_fillattr(inode, stat); |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 956 | return 0; |
| 957 | } |
| 958 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 959 | static int shmem_setattr(struct dentry *dentry, struct iattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 960 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 961 | struct inode *inode = d_inode(dentry); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 962 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 963 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | int error; |
| 965 | |
Jan Kara | 31051c8 | 2016-05-26 16:55:18 +0200 | [diff] [blame] | 966 | error = setattr_prepare(dentry, attr); |
Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 967 | if (error) |
| 968 | return error; |
| 969 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 970 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { |
| 971 | loff_t oldsize = inode->i_size; |
| 972 | loff_t newsize = attr->ia_size; |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 973 | |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 974 | /* protected by i_mutex */ |
| 975 | if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || |
| 976 | (newsize > oldsize && (info->seals & F_SEAL_GROW))) |
| 977 | return -EPERM; |
| 978 | |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 979 | if (newsize != oldsize) { |
Konstantin Khlebnikov | 7714251 | 2014-08-06 16:06:34 -0700 | [diff] [blame] | 980 | error = shmem_reacct_size(SHMEM_I(inode)->flags, |
| 981 | oldsize, newsize); |
| 982 | if (error) |
| 983 | return error; |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 984 | i_size_write(inode, newsize); |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 985 | inode->i_ctime = inode->i_mtime = current_time(inode); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 986 | } |
Josef Bacik | afa2db2 | 2015-06-24 16:58:45 -0700 | [diff] [blame] | 987 | if (newsize <= oldsize) { |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 988 | loff_t holebegin = round_up(newsize, PAGE_SIZE); |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 989 | if (oldsize > holebegin) |
| 990 | unmap_mapping_range(inode->i_mapping, |
| 991 | holebegin, 0, 1); |
| 992 | if (info->alloced) |
| 993 | shmem_truncate_range(inode, |
| 994 | newsize, (loff_t)-1); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 995 | /* unmap again to remove racily COWed private pages */ |
Hugh Dickins | d0424c4 | 2015-11-05 18:50:34 -0800 | [diff] [blame] | 996 | if (oldsize > holebegin) |
| 997 | unmap_mapping_range(inode->i_mapping, |
| 998 | holebegin, 0, 1); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 999 | |
| 1000 | /* |
| 1001 | * Part of the huge page can be beyond i_size: subject |
| 1002 | * to shrink under memory pressure. |
| 1003 | */ |
| 1004 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { |
| 1005 | spin_lock(&sbinfo->shrinklist_lock); |
| 1006 | if (list_empty(&info->shrinklist)) { |
| 1007 | list_add_tail(&info->shrinklist, |
| 1008 | &sbinfo->shrinklist); |
| 1009 | sbinfo->shrinklist_len++; |
| 1010 | } |
| 1011 | spin_unlock(&sbinfo->shrinklist_lock); |
| 1012 | } |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 1013 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | } |
| 1015 | |
Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 1016 | setattr_copy(inode, attr); |
Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 1017 | if (attr->ia_valid & ATTR_MODE) |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 1018 | error = posix_acl_chmod(inode, inode->i_mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | return error; |
| 1020 | } |
| 1021 | |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 1022 | static void shmem_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1025 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 | |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 1027 | if (inode->i_mapping->a_ops == &shmem_aops) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | shmem_unacct_size(info->flags, inode->i_size); |
| 1029 | inode->i_size = 0; |
npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 1030 | shmem_truncate_range(inode, 0, (loff_t)-1); |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1031 | if (!list_empty(&info->shrinklist)) { |
| 1032 | spin_lock(&sbinfo->shrinklist_lock); |
| 1033 | if (!list_empty(&info->shrinklist)) { |
| 1034 | list_del_init(&info->shrinklist); |
| 1035 | sbinfo->shrinklist_len--; |
| 1036 | } |
| 1037 | spin_unlock(&sbinfo->shrinklist_lock); |
| 1038 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | if (!list_empty(&info->swaplist)) { |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1040 | mutex_lock(&shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1041 | list_del_init(&info->swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1042 | mutex_unlock(&shmem_swaplist_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | } |
Al Viro | 3ed47db | 2016-01-22 18:08:52 -0500 | [diff] [blame] | 1044 | } |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 1045 | |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 1046 | simple_xattrs_free(&info->xattrs); |
Hugh Dickins | 0f3c42f | 2012-11-16 14:15:04 -0800 | [diff] [blame] | 1047 | WARN_ON(inode->i_blocks); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1048 | shmem_free_inode(inode->i_sb); |
Jan Kara | dbd5768 | 2012-05-03 14:48:02 +0200 | [diff] [blame] | 1049 | clear_inode(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1050 | } |
| 1051 | |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1052 | static unsigned long find_swap_entry(struct radix_tree_root *root, void *item) |
| 1053 | { |
| 1054 | struct radix_tree_iter iter; |
| 1055 | void **slot; |
| 1056 | unsigned long found = -1; |
| 1057 | unsigned int checked = 0; |
| 1058 | |
| 1059 | rcu_read_lock(); |
| 1060 | radix_tree_for_each_slot(slot, root, &iter, 0) { |
| 1061 | if (*slot == item) { |
| 1062 | found = iter.index; |
| 1063 | break; |
| 1064 | } |
| 1065 | checked++; |
| 1066 | if ((checked % 4096) != 0) |
| 1067 | continue; |
| 1068 | slot = radix_tree_iter_resume(slot, &iter); |
| 1069 | cond_resched_rcu(); |
| 1070 | } |
| 1071 | |
| 1072 | rcu_read_unlock(); |
| 1073 | return found; |
| 1074 | } |
| 1075 | |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1076 | /* |
| 1077 | * If swap found in inode, free it and move page from swapcache to filecache. |
| 1078 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1079 | static int shmem_unuse_inode(struct shmem_inode_info *info, |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1080 | swp_entry_t swap, struct page **pagep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1081 | { |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1082 | struct address_space *mapping = info->vfs_inode.i_mapping; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1083 | void *radswap; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1084 | pgoff_t index; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1085 | gfp_t gfp; |
| 1086 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1088 | radswap = swp_to_radix_entry(swap); |
Matthew Wilcox | 478922e | 2016-12-14 15:08:52 -0800 | [diff] [blame] | 1089 | index = find_swap_entry(&mapping->page_tree, radswap); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1090 | if (index == -1) |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1091 | return -EAGAIN; /* tell shmem_unuse we found nothing */ |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1092 | |
Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 1093 | /* |
| 1094 | * Move _head_ to start search for next from here. |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 1095 | * But be careful: shmem_evict_inode checks list_empty without taking |
Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 1096 | * mutex, and there's an instant in list_move_tail when info->swaplist |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1097 | * would appear empty, if it were the only one on shmem_swaplist. |
Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 1098 | */ |
| 1099 | if (shmem_swaplist.next != &info->swaplist) |
| 1100 | list_move_tail(&shmem_swaplist, &info->swaplist); |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1101 | |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1102 | gfp = mapping_gfp_mask(mapping); |
| 1103 | if (shmem_should_replace_page(*pagep, gfp)) { |
| 1104 | mutex_unlock(&shmem_swaplist_mutex); |
| 1105 | error = shmem_replace_page(pagep, gfp, info, index); |
| 1106 | mutex_lock(&shmem_swaplist_mutex); |
| 1107 | /* |
| 1108 | * We needed to drop mutex to make that restrictive page |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1109 | * allocation, but the inode might have been freed while we |
| 1110 | * dropped it: although a racing shmem_evict_inode() cannot |
| 1111 | * complete without emptying the radix_tree, our page lock |
| 1112 | * on this swapcache page is not enough to prevent that - |
| 1113 | * free_swap_and_cache() of our swap entry will only |
| 1114 | * trylock_page(), removing swap from radix_tree whatever. |
| 1115 | * |
| 1116 | * We must not proceed to shmem_add_to_page_cache() if the |
| 1117 | * inode has been freed, but of course we cannot rely on |
| 1118 | * inode or mapping or info to check that. However, we can |
| 1119 | * safely check if our swap entry is still in use (and here |
| 1120 | * it can't have got reused for another page): if it's still |
| 1121 | * in use, then the inode cannot have been freed yet, and we |
| 1122 | * can safely proceed (if it's no longer in use, that tells |
| 1123 | * nothing about the inode, but we don't need to unuse swap). |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1124 | */ |
| 1125 | if (!page_swapcount(*pagep)) |
| 1126 | error = -ENOENT; |
| 1127 | } |
| 1128 | |
KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 1129 | /* |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1130 | * We rely on shmem_swaplist_mutex, not only to protect the swaplist, |
| 1131 | * but also to hold up shmem_evict_inode(): so inode cannot be freed |
| 1132 | * beneath us (pagelock doesn't help until the page is in pagecache). |
KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 1133 | */ |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1134 | if (!error) |
| 1135 | error = shmem_add_to_page_cache(*pagep, mapping, index, |
Wang Sheng-Hui | fed400a | 2014-08-06 16:07:26 -0700 | [diff] [blame] | 1136 | radswap); |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1137 | if (error != -ENOMEM) { |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1138 | /* |
| 1139 | * Truncation and eviction use free_swap_and_cache(), which |
| 1140 | * only does trylock page: if we raced, best clean up here. |
| 1141 | */ |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1142 | delete_from_swap_cache(*pagep); |
| 1143 | set_page_dirty(*pagep); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1144 | if (!error) { |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1145 | spin_lock_irq(&info->lock); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1146 | info->swapped--; |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1147 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1148 | swap_free(swap); |
| 1149 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | } |
Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1151 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | } |
| 1153 | |
| 1154 | /* |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1155 | * Search through swapped inodes to find and replace swap by page. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1157 | int shmem_unuse(swp_entry_t swap, struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1159 | struct list_head *this, *next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1160 | struct shmem_inode_info *info; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1161 | struct mem_cgroup *memcg; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1162 | int error = 0; |
| 1163 | |
| 1164 | /* |
| 1165 | * There's a faint possibility that swap page was replaced before |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1166 | * caller locked it: caller will come back later with the right page. |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1167 | */ |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1168 | if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1169 | goto out; |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1170 | |
| 1171 | /* |
| 1172 | * Charge page using GFP_KERNEL while we can wait, before taking |
| 1173 | * the shmem_swaplist_mutex which might hold up shmem_writepage(). |
| 1174 | * Charged back to the user (not to caller) when swap account is used. |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1175 | */ |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1176 | error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg, |
| 1177 | false); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1178 | if (error) |
| 1179 | goto out; |
Hugh Dickins | 46f65ec | 2011-08-03 16:21:23 -0700 | [diff] [blame] | 1180 | /* No radix_tree_preload: swap entry keeps a place for page in tree */ |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1181 | error = -EAGAIN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1183 | mutex_lock(&shmem_swaplist_mutex); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1184 | list_for_each_safe(this, next, &shmem_swaplist) { |
| 1185 | info = list_entry(this, struct shmem_inode_info, swaplist); |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 1186 | if (info->swapped) |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1187 | error = shmem_unuse_inode(info, swap, &page); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1188 | else |
| 1189 | list_del_init(&info->swaplist); |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1190 | cond_resched(); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1191 | if (error != -EAGAIN) |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1192 | break; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1193 | /* found nothing in this: move on to search the next */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | } |
Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1195 | mutex_unlock(&shmem_swaplist_mutex); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1196 | |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1197 | if (error) { |
| 1198 | if (error != -ENOMEM) |
| 1199 | error = 0; |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1200 | mem_cgroup_cancel_charge(page, memcg, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1201 | } else |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1202 | mem_cgroup_commit_charge(page, memcg, true, false); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1203 | out: |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 1204 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1205 | put_page(page); |
Hugh Dickins | 778dd89 | 2011-05-11 15:13:37 -0700 | [diff] [blame] | 1206 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | } |
| 1208 | |
| 1209 | /* |
| 1210 | * Move the page from the page cache to the swap cache. |
| 1211 | */ |
| 1212 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) |
| 1213 | { |
| 1214 | struct shmem_inode_info *info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | struct address_space *mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | struct inode *inode; |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1217 | swp_entry_t swap; |
| 1218 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1219 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1220 | VM_BUG_ON_PAGE(PageCompound(page), page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | BUG_ON(!PageLocked(page)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | mapping = page->mapping; |
| 1223 | index = page->index; |
| 1224 | inode = mapping->host; |
| 1225 | info = SHMEM_I(inode); |
| 1226 | if (info->flags & VM_LOCKED) |
| 1227 | goto redirty; |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1228 | if (!total_swap_pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | goto redirty; |
| 1230 | |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1231 | /* |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 1232 | * Our capabilities prevent regular writeback or sync from ever calling |
| 1233 | * shmem_writepage; but a stacking filesystem might use ->writepage of |
| 1234 | * its underlying filesystem, in which case tmpfs should write out to |
| 1235 | * swap only in response to memory pressure, and not for the writeback |
| 1236 | * threads or sync. |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1237 | */ |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1238 | if (!wbc->for_reclaim) { |
| 1239 | WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ |
| 1240 | goto redirty; |
| 1241 | } |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1242 | |
| 1243 | /* |
| 1244 | * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC |
| 1245 | * value into swapfile.c, the only way we can correctly account for a |
| 1246 | * fallocated page arriving here is now to initialize it and write it. |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1247 | * |
| 1248 | * That's okay for a page already fallocated earlier, but if we have |
| 1249 | * not yet completed the fallocation, then (a) we want to keep track |
| 1250 | * of this page in case we have to undo it, and (b) it may not be a |
| 1251 | * good idea to continue anyway, once we're pushing into swap. So |
| 1252 | * reactivate the page, and let shmem_fallocate() quit when too many. |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1253 | */ |
| 1254 | if (!PageUptodate(page)) { |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1255 | if (inode->i_private) { |
| 1256 | struct shmem_falloc *shmem_falloc; |
| 1257 | spin_lock(&inode->i_lock); |
| 1258 | shmem_falloc = inode->i_private; |
| 1259 | if (shmem_falloc && |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1260 | !shmem_falloc->waitq && |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1261 | index >= shmem_falloc->start && |
| 1262 | index < shmem_falloc->next) |
| 1263 | shmem_falloc->nr_unswapped++; |
| 1264 | else |
| 1265 | shmem_falloc = NULL; |
| 1266 | spin_unlock(&inode->i_lock); |
| 1267 | if (shmem_falloc) |
| 1268 | goto redirty; |
| 1269 | } |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1270 | clear_highpage(page); |
| 1271 | flush_dcache_page(page); |
| 1272 | SetPageUptodate(page); |
| 1273 | } |
| 1274 | |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1275 | swap = get_swap_page(); |
| 1276 | if (!swap.val) |
| 1277 | goto redirty; |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1278 | |
Vladimir Davydov | 37e8435 | 2016-01-20 15:02:56 -0800 | [diff] [blame] | 1279 | if (mem_cgroup_try_charge_swap(page, swap)) |
| 1280 | goto free_swap; |
| 1281 | |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1282 | /* |
| 1283 | * Add inode to shmem_unuse()'s list of swapped-out inodes, |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1284 | * if it's not already there. Do it now before the page is |
| 1285 | * moved to swap cache, when its pagelock no longer protects |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1286 | * the inode from eviction. But don't unlock the mutex until |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1287 | * we've incremented swapped, because shmem_unuse_inode() will |
| 1288 | * prune a !swapped inode from the swaplist under this mutex. |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1289 | */ |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1290 | mutex_lock(&shmem_swaplist_mutex); |
| 1291 | if (list_empty(&info->swaplist)) |
| 1292 | list_add_tail(&info->swaplist, &shmem_swaplist); |
Hugh Dickins | b1dea80 | 2011-05-11 15:13:36 -0700 | [diff] [blame] | 1293 | |
Hugh Dickins | 48f170f | 2011-07-25 17:12:37 -0700 | [diff] [blame] | 1294 | if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1295 | spin_lock_irq(&info->lock); |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1296 | shmem_recalc_inode(inode); |
| 1297 | info->swapped++; |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1298 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1299 | |
Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 1300 | swap_shmem_alloc(swap); |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1301 | shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); |
| 1302 | |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1303 | mutex_unlock(&shmem_swaplist_mutex); |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1304 | BUG_ON(page_mapped(page)); |
Hugh Dickins | 9fab561 | 2009-03-31 15:23:33 -0700 | [diff] [blame] | 1305 | swap_writepage(page, wbc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | return 0; |
| 1307 | } |
| 1308 | |
Hugh Dickins | 6922c0c | 2011-08-03 16:21:25 -0700 | [diff] [blame] | 1309 | mutex_unlock(&shmem_swaplist_mutex); |
Vladimir Davydov | 37e8435 | 2016-01-20 15:02:56 -0800 | [diff] [blame] | 1310 | free_swap: |
Johannes Weiner | 0a31bc9 | 2014-08-08 14:19:22 -0700 | [diff] [blame] | 1311 | swapcache_free(swap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1312 | redirty: |
| 1313 | set_page_dirty(page); |
Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1314 | if (wbc->for_reclaim) |
| 1315 | return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ |
| 1316 | unlock_page(page); |
| 1317 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | } |
| 1319 | |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1320 | #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1321 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1322 | { |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1323 | char buffer[64]; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1324 | |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1325 | if (!mpol || mpol->mode == MPOL_DEFAULT) |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1326 | return; /* show nothing */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1327 | |
Hugh Dickins | a7a88b2 | 2013-01-02 02:04:23 -0800 | [diff] [blame] | 1328 | mpol_to_str(buffer, sizeof(buffer), mpol); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1329 | |
Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1330 | seq_printf(seq, ",mpol=%s", buffer); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1331 | } |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1332 | |
| 1333 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
| 1334 | { |
| 1335 | struct mempolicy *mpol = NULL; |
| 1336 | if (sbinfo->mpol) { |
| 1337 | spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ |
| 1338 | mpol = sbinfo->mpol; |
| 1339 | mpol_get(mpol); |
| 1340 | spin_unlock(&sbinfo->stat_lock); |
| 1341 | } |
| 1342 | return mpol; |
| 1343 | } |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1344 | #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ |
| 1345 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) |
| 1346 | { |
| 1347 | } |
| 1348 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) |
| 1349 | { |
| 1350 | return NULL; |
| 1351 | } |
| 1352 | #endif /* CONFIG_NUMA && CONFIG_TMPFS */ |
| 1353 | #ifndef CONFIG_NUMA |
| 1354 | #define vm_policy vm_private_data |
| 1355 | #endif |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1356 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1357 | static void shmem_pseudo_vma_init(struct vm_area_struct *vma, |
| 1358 | struct shmem_inode_info *info, pgoff_t index) |
| 1359 | { |
| 1360 | /* Create a pseudo vma that just contains the policy */ |
| 1361 | vma->vm_start = 0; |
| 1362 | /* Bias interleave by inode number to distribute better across nodes */ |
| 1363 | vma->vm_pgoff = index + info->vfs_inode.i_ino; |
| 1364 | vma->vm_ops = NULL; |
| 1365 | vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); |
| 1366 | } |
| 1367 | |
| 1368 | static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) |
| 1369 | { |
| 1370 | /* Drop reference taken by mpol_shared_policy_lookup() */ |
| 1371 | mpol_cond_put(vma->vm_policy); |
| 1372 | } |
| 1373 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1374 | static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, |
| 1375 | struct shmem_inode_info *info, pgoff_t index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | struct vm_area_struct pvma; |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1378 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1380 | shmem_pseudo_vma_init(&pvma, info, index); |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1381 | page = swapin_readahead(swap, gfp, &pvma, 0); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1382 | shmem_pseudo_vma_destroy(&pvma); |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1383 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1384 | return page; |
| 1385 | } |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1386 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1387 | static struct page *shmem_alloc_hugepage(gfp_t gfp, |
| 1388 | struct shmem_inode_info *info, pgoff_t index) |
| 1389 | { |
| 1390 | struct vm_area_struct pvma; |
| 1391 | struct inode *inode = &info->vfs_inode; |
| 1392 | struct address_space *mapping = inode->i_mapping; |
Geert Uytterhoeven | 4620a06 | 2016-08-03 19:58:19 +0200 | [diff] [blame] | 1393 | pgoff_t idx, hindex; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1394 | void __rcu **results; |
| 1395 | struct page *page; |
| 1396 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 1397 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1398 | return NULL; |
| 1399 | |
Geert Uytterhoeven | 4620a06 | 2016-08-03 19:58:19 +0200 | [diff] [blame] | 1400 | hindex = round_down(index, HPAGE_PMD_NR); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1401 | rcu_read_lock(); |
| 1402 | if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, |
| 1403 | hindex, 1) && idx < hindex + HPAGE_PMD_NR) { |
| 1404 | rcu_read_unlock(); |
| 1405 | return NULL; |
| 1406 | } |
| 1407 | rcu_read_unlock(); |
| 1408 | |
| 1409 | shmem_pseudo_vma_init(&pvma, info, hindex); |
| 1410 | page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, |
| 1411 | HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); |
| 1412 | shmem_pseudo_vma_destroy(&pvma); |
| 1413 | if (page) |
| 1414 | prep_transhuge_page(page); |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1415 | return page; |
| 1416 | } |
| 1417 | |
| 1418 | static struct page *shmem_alloc_page(gfp_t gfp, |
| 1419 | struct shmem_inode_info *info, pgoff_t index) |
| 1420 | { |
| 1421 | struct vm_area_struct pvma; |
| 1422 | struct page *page; |
| 1423 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1424 | shmem_pseudo_vma_init(&pvma, info, index); |
| 1425 | page = alloc_page_vma(gfp, &pvma, 0); |
| 1426 | shmem_pseudo_vma_destroy(&pvma); |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1427 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1428 | return page; |
| 1429 | } |
| 1430 | |
| 1431 | static struct page *shmem_alloc_and_acct_page(gfp_t gfp, |
| 1432 | struct shmem_inode_info *info, struct shmem_sb_info *sbinfo, |
| 1433 | pgoff_t index, bool huge) |
| 1434 | { |
| 1435 | struct page *page; |
| 1436 | int nr; |
| 1437 | int err = -ENOSPC; |
| 1438 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 1439 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1440 | huge = false; |
| 1441 | nr = huge ? HPAGE_PMD_NR : 1; |
| 1442 | |
| 1443 | if (shmem_acct_block(info->flags, nr)) |
| 1444 | goto failed; |
| 1445 | if (sbinfo->max_blocks) { |
| 1446 | if (percpu_counter_compare(&sbinfo->used_blocks, |
| 1447 | sbinfo->max_blocks - nr) > 0) |
| 1448 | goto unacct; |
| 1449 | percpu_counter_add(&sbinfo->used_blocks, nr); |
| 1450 | } |
| 1451 | |
| 1452 | if (huge) |
| 1453 | page = shmem_alloc_hugepage(gfp, info, index); |
| 1454 | else |
| 1455 | page = shmem_alloc_page(gfp, info, index); |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1456 | if (page) { |
| 1457 | __SetPageLocked(page); |
| 1458 | __SetPageSwapBacked(page); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1459 | return page; |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1460 | } |
Mel Gorman | 18a2f37 | 2012-12-05 14:01:41 -0800 | [diff] [blame] | 1461 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1462 | err = -ENOMEM; |
| 1463 | if (sbinfo->max_blocks) |
| 1464 | percpu_counter_add(&sbinfo->used_blocks, -nr); |
| 1465 | unacct: |
| 1466 | shmem_unacct_blocks(info->flags, nr); |
| 1467 | failed: |
| 1468 | return ERR_PTR(err); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1469 | } |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1470 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1471 | /* |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1472 | * When a page is moved from swapcache to shmem filecache (either by the |
| 1473 | * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of |
| 1474 | * shmem_unuse_inode()), it may have been read in earlier from swap, in |
| 1475 | * ignorance of the mapping it belongs to. If that mapping has special |
| 1476 | * constraints (like the gma500 GEM driver, which requires RAM below 4GB), |
| 1477 | * we may need to copy to a suitable page before moving to filecache. |
| 1478 | * |
| 1479 | * In a future release, this may well be extended to respect cpuset and |
| 1480 | * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); |
| 1481 | * but for now it is a simple matter of zone. |
| 1482 | */ |
| 1483 | static bool shmem_should_replace_page(struct page *page, gfp_t gfp) |
| 1484 | { |
| 1485 | return page_zonenum(page) > gfp_zone(gfp); |
| 1486 | } |
| 1487 | |
| 1488 | static int shmem_replace_page(struct page **pagep, gfp_t gfp, |
| 1489 | struct shmem_inode_info *info, pgoff_t index) |
| 1490 | { |
| 1491 | struct page *oldpage, *newpage; |
| 1492 | struct address_space *swap_mapping; |
| 1493 | pgoff_t swap_index; |
| 1494 | int error; |
| 1495 | |
| 1496 | oldpage = *pagep; |
| 1497 | swap_index = page_private(oldpage); |
| 1498 | swap_mapping = page_mapping(oldpage); |
| 1499 | |
| 1500 | /* |
| 1501 | * We have arrived here because our zones are constrained, so don't |
| 1502 | * limit chance of success by further cpuset and node constraints. |
| 1503 | */ |
| 1504 | gfp &= ~GFP_CONSTRAINT_MASK; |
| 1505 | newpage = shmem_alloc_page(gfp, info, index); |
| 1506 | if (!newpage) |
| 1507 | return -ENOMEM; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1508 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1509 | get_page(newpage); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1510 | copy_highpage(newpage, oldpage); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1511 | flush_dcache_page(newpage); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1512 | |
Hugh Dickins | 9956edf | 2016-11-10 10:46:11 -0800 | [diff] [blame] | 1513 | __SetPageLocked(newpage); |
| 1514 | __SetPageSwapBacked(newpage); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1515 | SetPageUptodate(newpage); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1516 | set_page_private(newpage, swap_index); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1517 | SetPageSwapCache(newpage); |
| 1518 | |
| 1519 | /* |
| 1520 | * Our caller will very soon move newpage out of swapcache, but it's |
| 1521 | * a nice clean interface for us to replace oldpage by newpage there. |
| 1522 | */ |
| 1523 | spin_lock_irq(&swap_mapping->tree_lock); |
| 1524 | error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, |
| 1525 | newpage); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1526 | if (!error) { |
Mel Gorman | 11fb998 | 2016-07-28 15:46:20 -0700 | [diff] [blame] | 1527 | __inc_node_page_state(newpage, NR_FILE_PAGES); |
| 1528 | __dec_node_page_state(oldpage, NR_FILE_PAGES); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1529 | } |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1530 | spin_unlock_irq(&swap_mapping->tree_lock); |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1531 | |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1532 | if (unlikely(error)) { |
| 1533 | /* |
| 1534 | * Is this possible? I think not, now that our callers check |
| 1535 | * both PageSwapCache and page_private after getting page lock; |
| 1536 | * but be defensive. Reverse old to newpage for clear and free. |
| 1537 | */ |
| 1538 | oldpage = newpage; |
| 1539 | } else { |
Johannes Weiner | 6a93ca8 | 2016-03-15 14:57:19 -0700 | [diff] [blame] | 1540 | mem_cgroup_migrate(oldpage, newpage); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1541 | lru_cache_add_anon(newpage); |
| 1542 | *pagep = newpage; |
| 1543 | } |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1544 | |
| 1545 | ClearPageSwapCache(oldpage); |
| 1546 | set_page_private(oldpage, 0); |
| 1547 | |
| 1548 | unlock_page(oldpage); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1549 | put_page(oldpage); |
| 1550 | put_page(oldpage); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1551 | return error; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1552 | } |
| 1553 | |
| 1554 | /* |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1555 | * shmem_getpage_gfp - find page in cache, or get from swap, or allocate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | * |
| 1557 | * If we allocate a new one we do not mark it dirty. That's up to the |
| 1558 | * vm. If we swap it in we mark it dirty since we also free the swap |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1559 | * entry since a page cannot live in both the swap and page cache. |
| 1560 | * |
| 1561 | * fault_mm and fault_type are only supplied by shmem_fault: |
| 1562 | * otherwise they are NULL. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1564 | static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1565 | struct page **pagep, enum sgp_type sgp, gfp_t gfp, |
| 1566 | struct mm_struct *fault_mm, int *fault_type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1567 | { |
| 1568 | struct address_space *mapping = inode->i_mapping; |
Arnd Bergmann | 23f919d | 2016-12-12 16:42:28 -0800 | [diff] [blame] | 1569 | struct shmem_inode_info *info = SHMEM_I(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1570 | struct shmem_sb_info *sbinfo; |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1571 | struct mm_struct *charge_mm; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1572 | struct mem_cgroup *memcg; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1573 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | swp_entry_t swap; |
Kirill A. Shutemov | 657e303 | 2016-07-26 15:26:21 -0700 | [diff] [blame] | 1575 | enum sgp_type sgp_huge = sgp; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1576 | pgoff_t hindex = index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1577 | int error; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1578 | int once = 0; |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1579 | int alloced = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1581 | if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1582 | return -EFBIG; |
Kirill A. Shutemov | 657e303 | 2016-07-26 15:26:21 -0700 | [diff] [blame] | 1583 | if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) |
| 1584 | sgp = SGP_CACHE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 | repeat: |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1586 | swap.val = 0; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 1587 | page = find_lock_entry(mapping, index); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1588 | if (radix_tree_exceptional_entry(page)) { |
| 1589 | swap = radix_to_swp_entry(page); |
| 1590 | page = NULL; |
| 1591 | } |
| 1592 | |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1593 | if (sgp <= SGP_CACHE && |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1594 | ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1595 | error = -EINVAL; |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1596 | goto unlock; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1597 | } |
| 1598 | |
Hugh Dickins | 66d2f4d | 2014-07-02 15:22:38 -0700 | [diff] [blame] | 1599 | if (page && sgp == SGP_WRITE) |
| 1600 | mark_page_accessed(page); |
| 1601 | |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1602 | /* fallocated page? */ |
| 1603 | if (page && !PageUptodate(page)) { |
| 1604 | if (sgp != SGP_READ) |
| 1605 | goto clear; |
| 1606 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1607 | put_page(page); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1608 | page = NULL; |
| 1609 | } |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1610 | if (page || (sgp == SGP_READ && !swap.val)) { |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1611 | *pagep = page; |
| 1612 | return 0; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1613 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 | |
| 1615 | /* |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1616 | * Fast cache lookup did not find it: |
| 1617 | * bring it back from swap or allocate. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | */ |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1619 | sbinfo = SHMEM_SB(inode->i_sb); |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1620 | charge_mm = fault_mm ? : current->mm; |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1621 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1622 | if (swap.val) { |
| 1623 | /* Look it up and read it in.. */ |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1624 | page = lookup_swap_cache(swap); |
| 1625 | if (!page) { |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1626 | /* Or update major stats only when swapin succeeds?? */ |
| 1627 | if (fault_type) { |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1628 | *fault_type |= VM_FAULT_MAJOR; |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1629 | count_vm_event(PGMAJFAULT); |
| 1630 | mem_cgroup_count_vm_event(fault_mm, PGMAJFAULT); |
| 1631 | } |
| 1632 | /* Here we actually start the io */ |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 1633 | page = shmem_swapin(swap, gfp, info, index); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1634 | if (!page) { |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1635 | error = -ENOMEM; |
| 1636 | goto failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1637 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | } |
| 1639 | |
| 1640 | /* We have to do this with page locked to prevent races */ |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1641 | lock_page(page); |
Hugh Dickins | 0142ef6 | 2012-06-07 14:21:09 -0700 | [diff] [blame] | 1642 | if (!PageSwapCache(page) || page_private(page) != swap.val || |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 1643 | !shmem_confirm_swap(mapping, index, swap)) { |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1644 | error = -EEXIST; /* try again */ |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 1645 | goto unlock; |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1646 | } |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1647 | if (!PageUptodate(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | error = -EIO; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1649 | goto failed; |
| 1650 | } |
| 1651 | wait_on_page_writeback(page); |
| 1652 | |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1653 | if (shmem_should_replace_page(page, gfp)) { |
| 1654 | error = shmem_replace_page(&page, gfp, info, index); |
| 1655 | if (error) |
| 1656 | goto failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | } |
| 1658 | |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1659 | error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1660 | false); |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 1661 | if (!error) { |
Hugh Dickins | aa3b189 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1662 | error = shmem_add_to_page_cache(page, mapping, index, |
Wang Sheng-Hui | fed400a | 2014-08-06 16:07:26 -0700 | [diff] [blame] | 1663 | swp_to_radix_entry(swap)); |
Hugh Dickins | 215c02b | 2012-11-16 14:15:03 -0800 | [diff] [blame] | 1664 | /* |
| 1665 | * We already confirmed swap under page lock, and make |
| 1666 | * no memory allocation here, so usually no possibility |
| 1667 | * of error; but free_swap_and_cache() only trylocks a |
| 1668 | * page, so it is just possible that the entry has been |
| 1669 | * truncated or holepunched since swap was confirmed. |
| 1670 | * shmem_undo_range() will have done some of the |
| 1671 | * unaccounting, now delete_from_swap_cache() will do |
Vladimir Davydov | 93aa7d9 | 2015-02-11 15:24:59 -0800 | [diff] [blame] | 1672 | * the rest. |
Hugh Dickins | 215c02b | 2012-11-16 14:15:03 -0800 | [diff] [blame] | 1673 | * Reset swap.val? No, leave it so "failed" goes back to |
| 1674 | * "repeat": reading a hole and writing should succeed. |
| 1675 | */ |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1676 | if (error) { |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1677 | mem_cgroup_cancel_charge(page, memcg, false); |
Hugh Dickins | 215c02b | 2012-11-16 14:15:03 -0800 | [diff] [blame] | 1678 | delete_from_swap_cache(page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1679 | } |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 1680 | } |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1681 | if (error) |
| 1682 | goto failed; |
| 1683 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1684 | mem_cgroup_commit_charge(page, memcg, true, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1685 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1686 | spin_lock_irq(&info->lock); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1687 | info->swapped--; |
| 1688 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1689 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1690 | |
Hugh Dickins | 66d2f4d | 2014-07-02 15:22:38 -0700 | [diff] [blame] | 1691 | if (sgp == SGP_WRITE) |
| 1692 | mark_page_accessed(page); |
| 1693 | |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1694 | delete_from_swap_cache(page); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1695 | set_page_dirty(page); |
| 1696 | swap_free(swap); |
| 1697 | |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1698 | } else { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1699 | /* shmem_symlink() */ |
| 1700 | if (mapping->a_ops != &shmem_aops) |
| 1701 | goto alloc_nohuge; |
Kirill A. Shutemov | 657e303 | 2016-07-26 15:26:21 -0700 | [diff] [blame] | 1702 | if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1703 | goto alloc_nohuge; |
| 1704 | if (shmem_huge == SHMEM_HUGE_FORCE) |
| 1705 | goto alloc_huge; |
| 1706 | switch (sbinfo->huge) { |
| 1707 | loff_t i_size; |
| 1708 | pgoff_t off; |
| 1709 | case SHMEM_HUGE_NEVER: |
| 1710 | goto alloc_nohuge; |
| 1711 | case SHMEM_HUGE_WITHIN_SIZE: |
| 1712 | off = round_up(index, HPAGE_PMD_NR); |
| 1713 | i_size = round_up(i_size_read(inode), PAGE_SIZE); |
| 1714 | if (i_size >= HPAGE_PMD_SIZE && |
| 1715 | i_size >> PAGE_SHIFT >= off) |
| 1716 | goto alloc_huge; |
| 1717 | /* fallthrough */ |
| 1718 | case SHMEM_HUGE_ADVISE: |
Kirill A. Shutemov | 657e303 | 2016-07-26 15:26:21 -0700 | [diff] [blame] | 1719 | if (sgp_huge == SGP_HUGE) |
| 1720 | goto alloc_huge; |
| 1721 | /* TODO: implement fadvise() hints */ |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1722 | goto alloc_nohuge; |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 1723 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1725 | alloc_huge: |
| 1726 | page = shmem_alloc_and_acct_page(gfp, info, sbinfo, |
| 1727 | index, true); |
| 1728 | if (IS_ERR(page)) { |
| 1729 | alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, |
| 1730 | index, false); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1731 | } |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1732 | if (IS_ERR(page)) { |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1733 | int retry = 5; |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1734 | error = PTR_ERR(page); |
| 1735 | page = NULL; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1736 | if (error != -ENOSPC) |
| 1737 | goto failed; |
| 1738 | /* |
| 1739 | * Try to reclaim some spece by splitting a huge page |
| 1740 | * beyond i_size on the filesystem. |
| 1741 | */ |
| 1742 | while (retry--) { |
| 1743 | int ret; |
| 1744 | ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); |
| 1745 | if (ret == SHRINK_STOP) |
| 1746 | break; |
| 1747 | if (ret) |
| 1748 | goto alloc_nohuge; |
| 1749 | } |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1750 | goto failed; |
| 1751 | } |
| 1752 | |
| 1753 | if (PageTransHuge(page)) |
| 1754 | hindex = round_down(index, HPAGE_PMD_NR); |
| 1755 | else |
| 1756 | hindex = index; |
| 1757 | |
Hugh Dickins | 66d2f4d | 2014-07-02 15:22:38 -0700 | [diff] [blame] | 1758 | if (sgp == SGP_WRITE) |
Hugh Dickins | eb39d61 | 2014-08-06 16:06:43 -0700 | [diff] [blame] | 1759 | __SetPageReferenced(page); |
Hugh Dickins | 66d2f4d | 2014-07-02 15:22:38 -0700 | [diff] [blame] | 1760 | |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1761 | error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1762 | PageTransHuge(page)); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1763 | if (error) |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1764 | goto unacct; |
| 1765 | error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, |
| 1766 | compound_order(page)); |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 1767 | if (!error) { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1768 | error = shmem_add_to_page_cache(page, mapping, hindex, |
Wang Sheng-Hui | fed400a | 2014-08-06 16:07:26 -0700 | [diff] [blame] | 1769 | NULL); |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 1770 | radix_tree_preload_end(); |
| 1771 | } |
| 1772 | if (error) { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1773 | mem_cgroup_cancel_charge(page, memcg, |
| 1774 | PageTransHuge(page)); |
| 1775 | goto unacct; |
Hugh Dickins | b065b43 | 2012-07-11 14:02:48 -0700 | [diff] [blame] | 1776 | } |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1777 | mem_cgroup_commit_charge(page, memcg, false, |
| 1778 | PageTransHuge(page)); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1779 | lru_cache_add_anon(page); |
| 1780 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1781 | spin_lock_irq(&info->lock); |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1782 | info->alloced += 1 << compound_order(page); |
| 1783 | inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1784 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1785 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1786 | alloced = true; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1787 | |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 1788 | if (PageTransHuge(page) && |
| 1789 | DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < |
| 1790 | hindex + HPAGE_PMD_NR - 1) { |
| 1791 | /* |
| 1792 | * Part of the huge page is beyond i_size: subject |
| 1793 | * to shrink under memory pressure. |
| 1794 | */ |
| 1795 | spin_lock(&sbinfo->shrinklist_lock); |
| 1796 | if (list_empty(&info->shrinklist)) { |
| 1797 | list_add_tail(&info->shrinklist, |
| 1798 | &sbinfo->shrinklist); |
| 1799 | sbinfo->shrinklist_len++; |
| 1800 | } |
| 1801 | spin_unlock(&sbinfo->shrinklist_lock); |
| 1802 | } |
| 1803 | |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 1804 | /* |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 1805 | * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. |
| 1806 | */ |
| 1807 | if (sgp == SGP_FALLOC) |
| 1808 | sgp = SGP_WRITE; |
| 1809 | clear: |
| 1810 | /* |
| 1811 | * Let SGP_WRITE caller clear ends if write does not fill page; |
| 1812 | * but SGP_FALLOC on a page fallocated earlier must initialize |
| 1813 | * it now, lest undo on failure cancel our earlier guarantee. |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 1814 | */ |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1815 | if (sgp != SGP_WRITE && !PageUptodate(page)) { |
| 1816 | struct page *head = compound_head(page); |
| 1817 | int i; |
| 1818 | |
| 1819 | for (i = 0; i < (1 << compound_order(head)); i++) { |
| 1820 | clear_highpage(head + i); |
| 1821 | flush_dcache_page(head + i); |
| 1822 | } |
| 1823 | SetPageUptodate(head); |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 1824 | } |
Hugh Dickins | 59a16ea | 2011-05-11 15:13:38 -0700 | [diff] [blame] | 1825 | } |
Hugh Dickins | bde05d1 | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 1826 | |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1827 | /* Perhaps the file has been truncated since we checked */ |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 1828 | if (sgp <= SGP_CACHE && |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1829 | ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1830 | if (alloced) { |
| 1831 | ClearPageDirty(page); |
| 1832 | delete_from_page_cache(page); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1833 | spin_lock_irq(&info->lock); |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1834 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1835 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1836 | } |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1837 | error = -EINVAL; |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1838 | goto unlock; |
Shaohua Li | ff36b801 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1839 | } |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1840 | *pagep = page + index - hindex; |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1841 | return 0; |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1842 | |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1843 | /* |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1844 | * Error recovery. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | */ |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1846 | unacct: |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 1847 | if (sbinfo->max_blocks) |
| 1848 | percpu_counter_sub(&sbinfo->used_blocks, |
| 1849 | 1 << compound_order(page)); |
| 1850 | shmem_unacct_blocks(info->flags, 1 << compound_order(page)); |
| 1851 | |
| 1852 | if (PageTransHuge(page)) { |
| 1853 | unlock_page(page); |
| 1854 | put_page(page); |
| 1855 | goto alloc_nohuge; |
| 1856 | } |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1857 | failed: |
Hugh Dickins | 267a4c7 | 2015-12-11 13:40:55 -0800 | [diff] [blame] | 1858 | if (swap.val && !shmem_confirm_swap(mapping, index, swap)) |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 1859 | error = -EEXIST; |
| 1860 | unlock: |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1861 | if (page) { |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1862 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 1863 | put_page(page); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1864 | } |
| 1865 | if (error == -ENOSPC && !once++) { |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1866 | spin_lock_irq(&info->lock); |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1867 | shmem_recalc_inode(inode); |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 1868 | spin_unlock_irq(&info->lock); |
Hugh Dickins | 27ab700 | 2011-07-25 17:12:36 -0700 | [diff] [blame] | 1869 | goto repeat; |
Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1870 | } |
Hugh Dickins | d189922 | 2012-07-11 14:02:47 -0700 | [diff] [blame] | 1871 | if (error == -EEXIST) /* from above or from radix_tree_insert */ |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 1872 | goto repeat; |
| 1873 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1874 | } |
| 1875 | |
Linus Torvalds | 10d20bd | 2016-12-05 12:10:29 -0800 | [diff] [blame] | 1876 | /* |
| 1877 | * This is like autoremove_wake_function, but it removes the wait queue |
| 1878 | * entry unconditionally - even if something else had already woken the |
| 1879 | * target. |
| 1880 | */ |
| 1881 | static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) |
| 1882 | { |
| 1883 | int ret = default_wake_function(wait, mode, sync, key); |
| 1884 | list_del_init(&wait->task_list); |
| 1885 | return ret; |
| 1886 | } |
| 1887 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 1889 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 1890 | struct inode *inode = file_inode(vma->vm_file); |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1891 | gfp_t gfp = mapping_gfp_mask(inode->i_mapping); |
Kirill A. Shutemov | 657e303 | 2016-07-26 15:26:21 -0700 | [diff] [blame] | 1892 | enum sgp_type sgp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | int error; |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1894 | int ret = VM_FAULT_LOCKED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1895 | |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1896 | /* |
| 1897 | * Trinity finds that probing a hole which tmpfs is punching can |
| 1898 | * prevent the hole-punch from ever completing: which in turn |
| 1899 | * locks writers out with its hold on i_mutex. So refrain from |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1900 | * faulting pages into the hole while it's being punched. Although |
| 1901 | * shmem_undo_range() does remove the additions, it may be unable to |
| 1902 | * keep up, as each new page needs its own unmap_mapping_range() call, |
| 1903 | * and the i_mmap tree grows ever slower to scan if new vmas are added. |
| 1904 | * |
| 1905 | * It does not matter if we sometimes reach this check just before the |
| 1906 | * hole-punch begins, so that one fault then races with the punch: |
| 1907 | * we just need to make racing faults a rare case. |
| 1908 | * |
| 1909 | * The implementation below would be much simpler if we just used a |
| 1910 | * standard mutex or completion: but we cannot take i_mutex in fault, |
| 1911 | * and bloating every shmem inode for this unlikely case would be sad. |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1912 | */ |
| 1913 | if (unlikely(inode->i_private)) { |
| 1914 | struct shmem_falloc *shmem_falloc; |
| 1915 | |
| 1916 | spin_lock(&inode->i_lock); |
| 1917 | shmem_falloc = inode->i_private; |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1918 | if (shmem_falloc && |
| 1919 | shmem_falloc->waitq && |
| 1920 | vmf->pgoff >= shmem_falloc->start && |
| 1921 | vmf->pgoff < shmem_falloc->next) { |
| 1922 | wait_queue_head_t *shmem_falloc_waitq; |
Linus Torvalds | 10d20bd | 2016-12-05 12:10:29 -0800 | [diff] [blame] | 1923 | DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1924 | |
| 1925 | ret = VM_FAULT_NOPAGE; |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1926 | if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && |
| 1927 | !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1928 | /* It's polite to up mmap_sem if we can */ |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1929 | up_read(&vma->vm_mm->mmap_sem); |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1930 | ret = VM_FAULT_RETRY; |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1931 | } |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1932 | |
| 1933 | shmem_falloc_waitq = shmem_falloc->waitq; |
| 1934 | prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, |
| 1935 | TASK_UNINTERRUPTIBLE); |
| 1936 | spin_unlock(&inode->i_lock); |
| 1937 | schedule(); |
| 1938 | |
| 1939 | /* |
| 1940 | * shmem_falloc_waitq points into the shmem_fallocate() |
| 1941 | * stack of the hole-punching task: shmem_falloc_waitq |
| 1942 | * is usually invalid by the time we reach here, but |
| 1943 | * finish_wait() does not dereference it in that case; |
| 1944 | * though i_lock needed lest racing with wake_up_all(). |
| 1945 | */ |
| 1946 | spin_lock(&inode->i_lock); |
| 1947 | finish_wait(shmem_falloc_waitq, &shmem_fault_wait); |
| 1948 | spin_unlock(&inode->i_lock); |
| 1949 | return ret; |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1950 | } |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 1951 | spin_unlock(&inode->i_lock); |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 1952 | } |
| 1953 | |
Kirill A. Shutemov | 657e303 | 2016-07-26 15:26:21 -0700 | [diff] [blame] | 1954 | sgp = SGP_CACHE; |
| 1955 | if (vma->vm_flags & VM_HUGEPAGE) |
| 1956 | sgp = SGP_HUGE; |
| 1957 | else if (vma->vm_flags & VM_NOHUGEPAGE) |
| 1958 | sgp = SGP_NOHUGE; |
| 1959 | |
| 1960 | error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 1961 | gfp, vma->vm_mm, &ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | if (error) |
| 1963 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 1964 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1965 | } |
| 1966 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 1967 | unsigned long shmem_get_unmapped_area(struct file *file, |
| 1968 | unsigned long uaddr, unsigned long len, |
| 1969 | unsigned long pgoff, unsigned long flags) |
| 1970 | { |
| 1971 | unsigned long (*get_area)(struct file *, |
| 1972 | unsigned long, unsigned long, unsigned long, unsigned long); |
| 1973 | unsigned long addr; |
| 1974 | unsigned long offset; |
| 1975 | unsigned long inflated_len; |
| 1976 | unsigned long inflated_addr; |
| 1977 | unsigned long inflated_offset; |
| 1978 | |
| 1979 | if (len > TASK_SIZE) |
| 1980 | return -ENOMEM; |
| 1981 | |
| 1982 | get_area = current->mm->get_unmapped_area; |
| 1983 | addr = get_area(file, uaddr, len, pgoff, flags); |
| 1984 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 1985 | if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 1986 | return addr; |
| 1987 | if (IS_ERR_VALUE(addr)) |
| 1988 | return addr; |
| 1989 | if (addr & ~PAGE_MASK) |
| 1990 | return addr; |
| 1991 | if (addr > TASK_SIZE - len) |
| 1992 | return addr; |
| 1993 | |
| 1994 | if (shmem_huge == SHMEM_HUGE_DENY) |
| 1995 | return addr; |
| 1996 | if (len < HPAGE_PMD_SIZE) |
| 1997 | return addr; |
| 1998 | if (flags & MAP_FIXED) |
| 1999 | return addr; |
| 2000 | /* |
| 2001 | * Our priority is to support MAP_SHARED mapped hugely; |
| 2002 | * and support MAP_PRIVATE mapped hugely too, until it is COWed. |
| 2003 | * But if caller specified an address hint, respect that as before. |
| 2004 | */ |
| 2005 | if (uaddr) |
| 2006 | return addr; |
| 2007 | |
| 2008 | if (shmem_huge != SHMEM_HUGE_FORCE) { |
| 2009 | struct super_block *sb; |
| 2010 | |
| 2011 | if (file) { |
| 2012 | VM_BUG_ON(file->f_op != &shmem_file_operations); |
| 2013 | sb = file_inode(file)->i_sb; |
| 2014 | } else { |
| 2015 | /* |
| 2016 | * Called directly from mm/mmap.c, or drivers/char/mem.c |
| 2017 | * for "/dev/zero", to create a shared anonymous object. |
| 2018 | */ |
| 2019 | if (IS_ERR(shm_mnt)) |
| 2020 | return addr; |
| 2021 | sb = shm_mnt->mnt_sb; |
| 2022 | } |
Toshi Kani | 3089bf6 | 2016-09-23 20:21:56 -0700 | [diff] [blame] | 2023 | if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 2024 | return addr; |
| 2025 | } |
| 2026 | |
| 2027 | offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); |
| 2028 | if (offset && offset + len < 2 * HPAGE_PMD_SIZE) |
| 2029 | return addr; |
| 2030 | if ((addr & (HPAGE_PMD_SIZE-1)) == offset) |
| 2031 | return addr; |
| 2032 | |
| 2033 | inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; |
| 2034 | if (inflated_len > TASK_SIZE) |
| 2035 | return addr; |
| 2036 | if (inflated_len < len) |
| 2037 | return addr; |
| 2038 | |
| 2039 | inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); |
| 2040 | if (IS_ERR_VALUE(inflated_addr)) |
| 2041 | return addr; |
| 2042 | if (inflated_addr & ~PAGE_MASK) |
| 2043 | return addr; |
| 2044 | |
| 2045 | inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); |
| 2046 | inflated_addr += offset - inflated_offset; |
| 2047 | if (inflated_offset > offset) |
| 2048 | inflated_addr += HPAGE_PMD_SIZE; |
| 2049 | |
| 2050 | if (inflated_addr > TASK_SIZE - len) |
| 2051 | return addr; |
| 2052 | return inflated_addr; |
| 2053 | } |
| 2054 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2055 | #ifdef CONFIG_NUMA |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2056 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2057 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2058 | struct inode *inode = file_inode(vma->vm_file); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2059 | return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2060 | } |
| 2061 | |
Adrian Bunk | d8dc74f | 2007-10-16 01:26:26 -0700 | [diff] [blame] | 2062 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, |
| 2063 | unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2064 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2065 | struct inode *inode = file_inode(vma->vm_file); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2066 | pgoff_t index; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2067 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2068 | index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 2069 | return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2070 | } |
| 2071 | #endif |
| 2072 | |
| 2073 | int shmem_lock(struct file *file, int lock, struct user_struct *user) |
| 2074 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2075 | struct inode *inode = file_inode(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2076 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 2077 | int retval = -ENOMEM; |
| 2078 | |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 2079 | spin_lock_irq(&info->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | if (lock && !(info->flags & VM_LOCKED)) { |
| 2081 | if (!user_shm_lock(inode->i_size, user)) |
| 2082 | goto out_nomem; |
| 2083 | info->flags |= VM_LOCKED; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 2084 | mapping_set_unevictable(file->f_mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 | } |
| 2086 | if (!lock && (info->flags & VM_LOCKED) && user) { |
| 2087 | user_shm_unlock(inode->i_size, user); |
| 2088 | info->flags &= ~VM_LOCKED; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 2089 | mapping_clear_unevictable(file->f_mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | } |
| 2091 | retval = 0; |
Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 2092 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2093 | out_nomem: |
Kirill A. Shutemov | 4595ef8 | 2016-07-26 15:26:29 -0700 | [diff] [blame] | 2094 | spin_unlock_irq(&info->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2095 | return retval; |
| 2096 | } |
| 2097 | |
Adrian Bunk | 9b83a6a | 2007-02-28 20:11:03 -0800 | [diff] [blame] | 2098 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | { |
| 2100 | file_accessed(file); |
| 2101 | vma->vm_ops = &shmem_vm_ops; |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 2102 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 2103 | ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < |
| 2104 | (vma->vm_end & HPAGE_PMD_MASK)) { |
| 2105 | khugepaged_enter(vma, vma->vm_flags); |
| 2106 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2107 | return 0; |
| 2108 | } |
| 2109 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2110 | static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, |
Al Viro | 09208d1 | 2011-07-26 03:15:03 -0400 | [diff] [blame] | 2111 | umode_t mode, dev_t dev, unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2112 | { |
| 2113 | struct inode *inode; |
| 2114 | struct shmem_inode_info *info; |
| 2115 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 2116 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 2117 | if (shmem_reserve_inode(sb)) |
| 2118 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2119 | |
| 2120 | inode = new_inode(sb); |
| 2121 | if (inode) { |
Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 2122 | inode->i_ino = get_next_ino(); |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2123 | inode_init_owner(inode, dir, mode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2124 | inode->i_blocks = 0; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 2125 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2126 | inode->i_generation = get_seconds(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2127 | info = SHMEM_I(inode); |
| 2128 | memset(info, 0, (char *)inode - (char *)info); |
| 2129 | spin_lock_init(&info->lock); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2130 | info->seals = F_SEAL_SEAL; |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2131 | info->flags = flags & VM_NORESERVE; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 2132 | INIT_LIST_HEAD(&info->shrinklist); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2133 | INIT_LIST_HEAD(&info->swaplist); |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 2134 | simple_xattrs_init(&info->xattrs); |
Al Viro | 72c0490 | 2009-06-24 16:58:48 -0400 | [diff] [blame] | 2135 | cache_no_acl(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2136 | |
| 2137 | switch (mode & S_IFMT) { |
| 2138 | default: |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2139 | inode->i_op = &shmem_special_inode_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2140 | init_special_inode(inode, mode, dev); |
| 2141 | break; |
| 2142 | case S_IFREG: |
Hugh Dickins | 14fcc23 | 2008-07-28 15:46:19 -0700 | [diff] [blame] | 2143 | inode->i_mapping->a_ops = &shmem_aops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2144 | inode->i_op = &shmem_inode_operations; |
| 2145 | inode->i_fop = &shmem_file_operations; |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2146 | mpol_shared_policy_init(&info->policy, |
| 2147 | shmem_get_sbmpol(sbinfo)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2148 | break; |
| 2149 | case S_IFDIR: |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 2150 | inc_nlink(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2151 | /* Some things misbehave if size == 0 on a directory */ |
| 2152 | inode->i_size = 2 * BOGO_DIRENT_SIZE; |
| 2153 | inode->i_op = &shmem_dir_inode_operations; |
| 2154 | inode->i_fop = &simple_dir_operations; |
| 2155 | break; |
| 2156 | case S_IFLNK: |
| 2157 | /* |
| 2158 | * Must not load anything in the rbtree, |
| 2159 | * mpol_free_shared_policy will not be called. |
| 2160 | */ |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2161 | mpol_shared_policy_init(&info->policy, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2162 | break; |
| 2163 | } |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 2164 | } else |
| 2165 | shmem_free_inode(sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2166 | return inode; |
| 2167 | } |
| 2168 | |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 2169 | bool shmem_mapping(struct address_space *mapping) |
| 2170 | { |
Sasha Levin | f0774d8 | 2015-02-23 05:38:00 -0500 | [diff] [blame] | 2171 | if (!mapping->host) |
| 2172 | return false; |
| 2173 | |
Christoph Hellwig | 97b713b | 2015-01-14 10:42:31 +0100 | [diff] [blame] | 2174 | return mapping->host->i_sb->s_op == &shmem_ops; |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 2175 | } |
| 2176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2177 | #ifdef CONFIG_TMPFS |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2178 | static const struct inode_operations shmem_symlink_inode_operations; |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 2179 | static const struct inode_operations shmem_short_symlink_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2180 | |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 2181 | #ifdef CONFIG_TMPFS_XATTR |
| 2182 | static int shmem_initxattrs(struct inode *, const struct xattr *, void *); |
| 2183 | #else |
| 2184 | #define shmem_initxattrs NULL |
| 2185 | #endif |
| 2186 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2187 | static int |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2188 | shmem_write_begin(struct file *file, struct address_space *mapping, |
| 2189 | loff_t pos, unsigned len, unsigned flags, |
| 2190 | struct page **pagep, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2191 | { |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2192 | struct inode *inode = mapping->host; |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2193 | struct shmem_inode_info *info = SHMEM_I(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2194 | pgoff_t index = pos >> PAGE_SHIFT; |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2195 | |
| 2196 | /* i_mutex is held by caller */ |
| 2197 | if (unlikely(info->seals)) { |
| 2198 | if (info->seals & F_SEAL_WRITE) |
| 2199 | return -EPERM; |
| 2200 | if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) |
| 2201 | return -EPERM; |
| 2202 | } |
| 2203 | |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 2204 | return shmem_getpage(inode, index, pagep, SGP_WRITE); |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2205 | } |
| 2206 | |
| 2207 | static int |
| 2208 | shmem_write_end(struct file *file, struct address_space *mapping, |
| 2209 | loff_t pos, unsigned len, unsigned copied, |
| 2210 | struct page *page, void *fsdata) |
| 2211 | { |
| 2212 | struct inode *inode = mapping->host; |
| 2213 | |
Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2214 | if (pos + copied > inode->i_size) |
| 2215 | i_size_write(inode, pos + copied); |
| 2216 | |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 2217 | if (!PageUptodate(page)) { |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 2218 | struct page *head = compound_head(page); |
| 2219 | if (PageTransCompound(page)) { |
| 2220 | int i; |
| 2221 | |
| 2222 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 2223 | if (head + i == page) |
| 2224 | continue; |
| 2225 | clear_highpage(head + i); |
| 2226 | flush_dcache_page(head + i); |
| 2227 | } |
| 2228 | } |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2229 | if (copied < PAGE_SIZE) { |
| 2230 | unsigned from = pos & (PAGE_SIZE - 1); |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 2231 | zero_user_segments(page, 0, from, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2232 | from + copied, PAGE_SIZE); |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 2233 | } |
Kirill A. Shutemov | 800d8c6 | 2016-07-26 15:26:18 -0700 | [diff] [blame] | 2234 | SetPageUptodate(head); |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 2235 | } |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2236 | set_page_dirty(page); |
Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 2237 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2238 | put_page(page); |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2239 | |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2240 | return copied; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2241 | } |
| 2242 | |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 2243 | static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2244 | { |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2245 | struct file *file = iocb->ki_filp; |
| 2246 | struct inode *inode = file_inode(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2247 | struct address_space *mapping = inode->i_mapping; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2248 | pgoff_t index; |
| 2249 | unsigned long offset; |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 2250 | enum sgp_type sgp = SGP_READ; |
Geert Uytterhoeven | f7c1d07 | 2014-04-13 20:46:22 +0200 | [diff] [blame] | 2251 | int error = 0; |
Al Viro | cb66a7a | 2014-03-04 15:24:06 -0500 | [diff] [blame] | 2252 | ssize_t retval = 0; |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2253 | loff_t *ppos = &iocb->ki_pos; |
Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 2254 | |
| 2255 | /* |
| 2256 | * Might this read be for a stacking filesystem? Then when reading |
| 2257 | * holes of a sparse file, we actually need to allocate those pages, |
| 2258 | * and even mark them dirty, so it cannot exceed the max_blocks limit. |
| 2259 | */ |
Al Viro | 777eda2 | 2014-12-17 04:46:46 -0500 | [diff] [blame] | 2260 | if (!iter_is_iovec(to)) |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 2261 | sgp = SGP_CACHE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2262 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2263 | index = *ppos >> PAGE_SHIFT; |
| 2264 | offset = *ppos & ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2265 | |
| 2266 | for (;;) { |
| 2267 | struct page *page = NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2268 | pgoff_t end_index; |
| 2269 | unsigned long nr, ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2270 | loff_t i_size = i_size_read(inode); |
| 2271 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2272 | end_index = i_size >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2273 | if (index > end_index) |
| 2274 | break; |
| 2275 | if (index == end_index) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2276 | nr = i_size & ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2277 | if (nr <= offset) |
| 2278 | break; |
| 2279 | } |
| 2280 | |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 2281 | error = shmem_getpage(inode, index, &page, sgp); |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2282 | if (error) { |
| 2283 | if (error == -EINVAL) |
| 2284 | error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2285 | break; |
| 2286 | } |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 2287 | if (page) { |
| 2288 | if (sgp == SGP_CACHE) |
| 2289 | set_page_dirty(page); |
Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2290 | unlock_page(page); |
Hugh Dickins | 75edd34 | 2016-05-19 17:12:44 -0700 | [diff] [blame] | 2291 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | |
| 2293 | /* |
| 2294 | * We must evaluate after, since reads (unlike writes) |
Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 2295 | * are called without i_mutex protection against truncate |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2296 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2297 | nr = PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 | i_size = i_size_read(inode); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2299 | end_index = i_size >> PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | if (index == end_index) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2301 | nr = i_size & ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2302 | if (nr <= offset) { |
| 2303 | if (page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2304 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2305 | break; |
| 2306 | } |
| 2307 | } |
| 2308 | nr -= offset; |
| 2309 | |
| 2310 | if (page) { |
| 2311 | /* |
| 2312 | * If users can be writing to this page using arbitrary |
| 2313 | * virtual addresses, take care about potential aliasing |
| 2314 | * before reading the page on the kernel side. |
| 2315 | */ |
| 2316 | if (mapping_writably_mapped(mapping)) |
| 2317 | flush_dcache_page(page); |
| 2318 | /* |
| 2319 | * Mark the page accessed if we read the beginning. |
| 2320 | */ |
| 2321 | if (!offset) |
| 2322 | mark_page_accessed(page); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 2323 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2324 | page = ZERO_PAGE(0); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2325 | get_page(page); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 2326 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2327 | |
| 2328 | /* |
| 2329 | * Ok, we have the page, and it's up-to-date, so |
| 2330 | * now we can copy it to user space... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2331 | */ |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 2332 | ret = copy_page_to_iter(page, offset, nr, to); |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2333 | retval += ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2334 | offset += ret; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2335 | index += offset >> PAGE_SHIFT; |
| 2336 | offset &= ~PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2337 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2338 | put_page(page); |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 2339 | if (!iov_iter_count(to)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2340 | break; |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2341 | if (ret < nr) { |
| 2342 | error = -EFAULT; |
| 2343 | break; |
| 2344 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2345 | cond_resched(); |
| 2346 | } |
| 2347 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2348 | *ppos = ((loff_t) index << PAGE_SHIFT) + offset; |
Al Viro | 6e58e79 | 2014-02-03 17:07:03 -0500 | [diff] [blame] | 2349 | file_accessed(file); |
| 2350 | return retval ? retval : error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2351 | } |
| 2352 | |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2353 | /* |
| 2354 | * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. |
| 2355 | */ |
| 2356 | static pgoff_t shmem_seek_hole_data(struct address_space *mapping, |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2357 | pgoff_t index, pgoff_t end, int whence) |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2358 | { |
| 2359 | struct page *page; |
| 2360 | struct pagevec pvec; |
| 2361 | pgoff_t indices[PAGEVEC_SIZE]; |
| 2362 | bool done = false; |
| 2363 | int i; |
| 2364 | |
| 2365 | pagevec_init(&pvec, 0); |
| 2366 | pvec.nr = 1; /* start small: we may be there already */ |
| 2367 | while (!done) { |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 2368 | pvec.nr = find_get_entries(mapping, index, |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2369 | pvec.nr, pvec.pages, indices); |
| 2370 | if (!pvec.nr) { |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2371 | if (whence == SEEK_DATA) |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2372 | index = end; |
| 2373 | break; |
| 2374 | } |
| 2375 | for (i = 0; i < pvec.nr; i++, index++) { |
| 2376 | if (index < indices[i]) { |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2377 | if (whence == SEEK_HOLE) { |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2378 | done = true; |
| 2379 | break; |
| 2380 | } |
| 2381 | index = indices[i]; |
| 2382 | } |
| 2383 | page = pvec.pages[i]; |
| 2384 | if (page && !radix_tree_exceptional_entry(page)) { |
| 2385 | if (!PageUptodate(page)) |
| 2386 | page = NULL; |
| 2387 | } |
| 2388 | if (index >= end || |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2389 | (page && whence == SEEK_DATA) || |
| 2390 | (!page && whence == SEEK_HOLE)) { |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2391 | done = true; |
| 2392 | break; |
| 2393 | } |
| 2394 | } |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 2395 | pagevec_remove_exceptionals(&pvec); |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2396 | pagevec_release(&pvec); |
| 2397 | pvec.nr = PAGEVEC_SIZE; |
| 2398 | cond_resched(); |
| 2399 | } |
| 2400 | return index; |
| 2401 | } |
| 2402 | |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2403 | static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2404 | { |
| 2405 | struct address_space *mapping = file->f_mapping; |
| 2406 | struct inode *inode = mapping->host; |
| 2407 | pgoff_t start, end; |
| 2408 | loff_t new_offset; |
| 2409 | |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2410 | if (whence != SEEK_DATA && whence != SEEK_HOLE) |
| 2411 | return generic_file_llseek_size(file, offset, whence, |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2412 | MAX_LFS_FILESIZE, i_size_read(inode)); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 2413 | inode_lock(inode); |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2414 | /* We're holding i_mutex so we can access i_size directly */ |
| 2415 | |
| 2416 | if (offset < 0) |
| 2417 | offset = -EINVAL; |
| 2418 | else if (offset >= inode->i_size) |
| 2419 | offset = -ENXIO; |
| 2420 | else { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2421 | start = offset >> PAGE_SHIFT; |
| 2422 | end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2423 | new_offset = shmem_seek_hole_data(mapping, start, end, whence); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2424 | new_offset <<= PAGE_SHIFT; |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2425 | if (new_offset > offset) { |
| 2426 | if (new_offset < inode->i_size) |
| 2427 | offset = new_offset; |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 2428 | else if (whence == SEEK_DATA) |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2429 | offset = -ENXIO; |
| 2430 | else |
| 2431 | offset = inode->i_size; |
| 2432 | } |
| 2433 | } |
| 2434 | |
Hugh Dickins | 387aae6 | 2013-08-04 11:30:25 -0700 | [diff] [blame] | 2435 | if (offset >= 0) |
| 2436 | offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 2437 | inode_unlock(inode); |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 2438 | return offset; |
| 2439 | } |
| 2440 | |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2441 | /* |
| 2442 | * We need a tag: a new tag would expand every radix_tree_node by 8 bytes, |
| 2443 | * so reuse a tag which we firmly believe is never set or cleared on shmem. |
| 2444 | */ |
| 2445 | #define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE |
| 2446 | #define LAST_SCAN 4 /* about 150ms max */ |
| 2447 | |
| 2448 | static void shmem_tag_pins(struct address_space *mapping) |
| 2449 | { |
| 2450 | struct radix_tree_iter iter; |
| 2451 | void **slot; |
| 2452 | pgoff_t start; |
| 2453 | struct page *page; |
| 2454 | |
| 2455 | lru_add_drain(); |
| 2456 | start = 0; |
| 2457 | rcu_read_lock(); |
| 2458 | |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2459 | radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { |
| 2460 | page = radix_tree_deref_slot(slot); |
| 2461 | if (!page || radix_tree_exception(page)) { |
Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 2462 | if (radix_tree_deref_retry(page)) { |
| 2463 | slot = radix_tree_iter_retry(&iter); |
| 2464 | continue; |
| 2465 | } |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2466 | } else if (page_count(page) - page_mapcount(page) > 1) { |
| 2467 | spin_lock_irq(&mapping->tree_lock); |
| 2468 | radix_tree_tag_set(&mapping->page_tree, iter.index, |
| 2469 | SHMEM_TAG_PINNED); |
| 2470 | spin_unlock_irq(&mapping->tree_lock); |
| 2471 | } |
| 2472 | |
| 2473 | if (need_resched()) { |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 2474 | slot = radix_tree_iter_resume(slot, &iter); |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2475 | cond_resched_rcu(); |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2476 | } |
| 2477 | } |
| 2478 | rcu_read_unlock(); |
| 2479 | } |
| 2480 | |
| 2481 | /* |
| 2482 | * Setting SEAL_WRITE requires us to verify there's no pending writer. However, |
| 2483 | * via get_user_pages(), drivers might have some pending I/O without any active |
| 2484 | * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages |
| 2485 | * and see whether it has an elevated ref-count. If so, we tag them and wait for |
| 2486 | * them to be dropped. |
| 2487 | * The caller must guarantee that no new user will acquire writable references |
| 2488 | * to those pages to avoid races. |
| 2489 | */ |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2490 | static int shmem_wait_for_pins(struct address_space *mapping) |
| 2491 | { |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2492 | struct radix_tree_iter iter; |
| 2493 | void **slot; |
| 2494 | pgoff_t start; |
| 2495 | struct page *page; |
| 2496 | int error, scan; |
| 2497 | |
| 2498 | shmem_tag_pins(mapping); |
| 2499 | |
| 2500 | error = 0; |
| 2501 | for (scan = 0; scan <= LAST_SCAN; scan++) { |
| 2502 | if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED)) |
| 2503 | break; |
| 2504 | |
| 2505 | if (!scan) |
| 2506 | lru_add_drain_all(); |
| 2507 | else if (schedule_timeout_killable((HZ << scan) / 200)) |
| 2508 | scan = LAST_SCAN; |
| 2509 | |
| 2510 | start = 0; |
| 2511 | rcu_read_lock(); |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2512 | radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, |
| 2513 | start, SHMEM_TAG_PINNED) { |
| 2514 | |
| 2515 | page = radix_tree_deref_slot(slot); |
| 2516 | if (radix_tree_exception(page)) { |
Matthew Wilcox | 2cf938a | 2016-03-17 14:22:03 -0700 | [diff] [blame] | 2517 | if (radix_tree_deref_retry(page)) { |
| 2518 | slot = radix_tree_iter_retry(&iter); |
| 2519 | continue; |
| 2520 | } |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2521 | |
| 2522 | page = NULL; |
| 2523 | } |
| 2524 | |
| 2525 | if (page && |
| 2526 | page_count(page) - page_mapcount(page) != 1) { |
| 2527 | if (scan < LAST_SCAN) |
| 2528 | goto continue_resched; |
| 2529 | |
| 2530 | /* |
| 2531 | * On the last scan, we clean up all those tags |
| 2532 | * we inserted; but make a note that we still |
| 2533 | * found pages pinned. |
| 2534 | */ |
| 2535 | error = -EBUSY; |
| 2536 | } |
| 2537 | |
| 2538 | spin_lock_irq(&mapping->tree_lock); |
| 2539 | radix_tree_tag_clear(&mapping->page_tree, |
| 2540 | iter.index, SHMEM_TAG_PINNED); |
| 2541 | spin_unlock_irq(&mapping->tree_lock); |
| 2542 | continue_resched: |
| 2543 | if (need_resched()) { |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 2544 | slot = radix_tree_iter_resume(slot, &iter); |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2545 | cond_resched_rcu(); |
David Herrmann | 05f65b5 | 2014-08-08 14:25:36 -0700 | [diff] [blame] | 2546 | } |
| 2547 | } |
| 2548 | rcu_read_unlock(); |
| 2549 | } |
| 2550 | |
| 2551 | return error; |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2552 | } |
| 2553 | |
| 2554 | #define F_ALL_SEALS (F_SEAL_SEAL | \ |
| 2555 | F_SEAL_SHRINK | \ |
| 2556 | F_SEAL_GROW | \ |
| 2557 | F_SEAL_WRITE) |
| 2558 | |
| 2559 | int shmem_add_seals(struct file *file, unsigned int seals) |
| 2560 | { |
| 2561 | struct inode *inode = file_inode(file); |
| 2562 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 2563 | int error; |
| 2564 | |
| 2565 | /* |
| 2566 | * SEALING |
| 2567 | * Sealing allows multiple parties to share a shmem-file but restrict |
| 2568 | * access to a specific subset of file operations. Seals can only be |
| 2569 | * added, but never removed. This way, mutually untrusted parties can |
| 2570 | * share common memory regions with a well-defined policy. A malicious |
| 2571 | * peer can thus never perform unwanted operations on a shared object. |
| 2572 | * |
| 2573 | * Seals are only supported on special shmem-files and always affect |
| 2574 | * the whole underlying inode. Once a seal is set, it may prevent some |
| 2575 | * kinds of access to the file. Currently, the following seals are |
| 2576 | * defined: |
| 2577 | * SEAL_SEAL: Prevent further seals from being set on this file |
| 2578 | * SEAL_SHRINK: Prevent the file from shrinking |
| 2579 | * SEAL_GROW: Prevent the file from growing |
| 2580 | * SEAL_WRITE: Prevent write access to the file |
| 2581 | * |
| 2582 | * As we don't require any trust relationship between two parties, we |
| 2583 | * must prevent seals from being removed. Therefore, sealing a file |
| 2584 | * only adds a given set of seals to the file, it never touches |
| 2585 | * existing seals. Furthermore, the "setting seals"-operation can be |
| 2586 | * sealed itself, which basically prevents any further seal from being |
| 2587 | * added. |
| 2588 | * |
| 2589 | * Semantics of sealing are only defined on volatile files. Only |
| 2590 | * anonymous shmem files support sealing. More importantly, seals are |
| 2591 | * never written to disk. Therefore, there's no plan to support it on |
| 2592 | * other file types. |
| 2593 | */ |
| 2594 | |
| 2595 | if (file->f_op != &shmem_file_operations) |
| 2596 | return -EINVAL; |
| 2597 | if (!(file->f_mode & FMODE_WRITE)) |
| 2598 | return -EPERM; |
| 2599 | if (seals & ~(unsigned int)F_ALL_SEALS) |
| 2600 | return -EINVAL; |
| 2601 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 2602 | inode_lock(inode); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2603 | |
| 2604 | if (info->seals & F_SEAL_SEAL) { |
| 2605 | error = -EPERM; |
| 2606 | goto unlock; |
| 2607 | } |
| 2608 | |
| 2609 | if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) { |
| 2610 | error = mapping_deny_writable(file->f_mapping); |
| 2611 | if (error) |
| 2612 | goto unlock; |
| 2613 | |
| 2614 | error = shmem_wait_for_pins(file->f_mapping); |
| 2615 | if (error) { |
| 2616 | mapping_allow_writable(file->f_mapping); |
| 2617 | goto unlock; |
| 2618 | } |
| 2619 | } |
| 2620 | |
| 2621 | info->seals |= seals; |
| 2622 | error = 0; |
| 2623 | |
| 2624 | unlock: |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 2625 | inode_unlock(inode); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2626 | return error; |
| 2627 | } |
| 2628 | EXPORT_SYMBOL_GPL(shmem_add_seals); |
| 2629 | |
| 2630 | int shmem_get_seals(struct file *file) |
| 2631 | { |
| 2632 | if (file->f_op != &shmem_file_operations) |
| 2633 | return -EINVAL; |
| 2634 | |
| 2635 | return SHMEM_I(file_inode(file))->seals; |
| 2636 | } |
| 2637 | EXPORT_SYMBOL_GPL(shmem_get_seals); |
| 2638 | |
| 2639 | long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg) |
| 2640 | { |
| 2641 | long error; |
| 2642 | |
| 2643 | switch (cmd) { |
| 2644 | case F_ADD_SEALS: |
| 2645 | /* disallow upper 32bit */ |
| 2646 | if (arg > UINT_MAX) |
| 2647 | return -EINVAL; |
| 2648 | |
| 2649 | error = shmem_add_seals(file, arg); |
| 2650 | break; |
| 2651 | case F_GET_SEALS: |
| 2652 | error = shmem_get_seals(file); |
| 2653 | break; |
| 2654 | default: |
| 2655 | error = -EINVAL; |
| 2656 | break; |
| 2657 | } |
| 2658 | |
| 2659 | return error; |
| 2660 | } |
| 2661 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2662 | static long shmem_fallocate(struct file *file, int mode, loff_t offset, |
| 2663 | loff_t len) |
| 2664 | { |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 2665 | struct inode *inode = file_inode(file); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2666 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2667 | struct shmem_inode_info *info = SHMEM_I(inode); |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2668 | struct shmem_falloc shmem_falloc; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2669 | pgoff_t start, index, end; |
| 2670 | int error; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2671 | |
Hugh Dickins | 13ace4d | 2014-06-23 13:22:03 -0700 | [diff] [blame] | 2672 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
| 2673 | return -EOPNOTSUPP; |
| 2674 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 2675 | inode_lock(inode); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2676 | |
| 2677 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
| 2678 | struct address_space *mapping = file->f_mapping; |
| 2679 | loff_t unmap_start = round_up(offset, PAGE_SIZE); |
| 2680 | loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2681 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2682 | |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2683 | /* protected by i_mutex */ |
| 2684 | if (info->seals & F_SEAL_WRITE) { |
| 2685 | error = -EPERM; |
| 2686 | goto out; |
| 2687 | } |
| 2688 | |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2689 | shmem_falloc.waitq = &shmem_falloc_waitq; |
Hugh Dickins | f00cdc6 | 2014-06-23 13:22:06 -0700 | [diff] [blame] | 2690 | shmem_falloc.start = unmap_start >> PAGE_SHIFT; |
| 2691 | shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; |
| 2692 | spin_lock(&inode->i_lock); |
| 2693 | inode->i_private = &shmem_falloc; |
| 2694 | spin_unlock(&inode->i_lock); |
| 2695 | |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2696 | if ((u64)unmap_end > (u64)unmap_start) |
| 2697 | unmap_mapping_range(mapping, unmap_start, |
| 2698 | 1 + unmap_end - unmap_start, 0); |
| 2699 | shmem_truncate_range(inode, offset, offset + len - 1); |
| 2700 | /* No need to unmap again: hole-punching leaves COWed pages */ |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2701 | |
| 2702 | spin_lock(&inode->i_lock); |
| 2703 | inode->i_private = NULL; |
| 2704 | wake_up_all(&shmem_falloc_waitq); |
Linus Torvalds | 10d20bd | 2016-12-05 12:10:29 -0800 | [diff] [blame] | 2705 | WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list)); |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2706 | spin_unlock(&inode->i_lock); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2707 | error = 0; |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2708 | goto out; |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2709 | } |
| 2710 | |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2711 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ |
| 2712 | error = inode_newsize_ok(inode, offset + len); |
| 2713 | if (error) |
| 2714 | goto out; |
| 2715 | |
David Herrmann | 40e041a | 2014-08-08 14:25:27 -0700 | [diff] [blame] | 2716 | if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { |
| 2717 | error = -EPERM; |
| 2718 | goto out; |
| 2719 | } |
| 2720 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2721 | start = offset >> PAGE_SHIFT; |
| 2722 | end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2723 | /* Try to avoid a swapstorm if len is impossible to satisfy */ |
| 2724 | if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { |
| 2725 | error = -ENOSPC; |
| 2726 | goto out; |
| 2727 | } |
| 2728 | |
Hugh Dickins | 8e205f7 | 2014-07-23 14:00:10 -0700 | [diff] [blame] | 2729 | shmem_falloc.waitq = NULL; |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2730 | shmem_falloc.start = start; |
| 2731 | shmem_falloc.next = start; |
| 2732 | shmem_falloc.nr_falloced = 0; |
| 2733 | shmem_falloc.nr_unswapped = 0; |
| 2734 | spin_lock(&inode->i_lock); |
| 2735 | inode->i_private = &shmem_falloc; |
| 2736 | spin_unlock(&inode->i_lock); |
| 2737 | |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2738 | for (index = start; index < end; index++) { |
| 2739 | struct page *page; |
| 2740 | |
| 2741 | /* |
| 2742 | * Good, the fallocate(2) manpage permits EINTR: we may have |
| 2743 | * been interrupted because we are using up too much memory. |
| 2744 | */ |
| 2745 | if (signal_pending(current)) |
| 2746 | error = -EINTR; |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2747 | else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) |
| 2748 | error = -ENOMEM; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2749 | else |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 2750 | error = shmem_getpage(inode, index, &page, SGP_FALLOC); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2751 | if (error) { |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2752 | /* Remove the !PageUptodate pages we added */ |
Hugh Dickins | 7f55656 | 2016-07-10 16:46:32 -0700 | [diff] [blame] | 2753 | if (index > start) { |
| 2754 | shmem_undo_range(inode, |
| 2755 | (loff_t)start << PAGE_SHIFT, |
| 2756 | ((loff_t)index << PAGE_SHIFT) - 1, true); |
| 2757 | } |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2758 | goto undone; |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2759 | } |
| 2760 | |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2761 | /* |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2762 | * Inform shmem_writepage() how far we have reached. |
| 2763 | * No need for lock or barrier: we have the page lock. |
| 2764 | */ |
| 2765 | shmem_falloc.next++; |
| 2766 | if (!PageUptodate(page)) |
| 2767 | shmem_falloc.nr_falloced++; |
| 2768 | |
| 2769 | /* |
Hugh Dickins | 1635f6a | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2770 | * If !PageUptodate, leave it that way so that freeable pages |
| 2771 | * can be recognized if we need to rollback on error later. |
| 2772 | * But set_page_dirty so that memory pressure will swap rather |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2773 | * than free the pages we are allocating (and SGP_CACHE pages |
| 2774 | * might still be clean: we now need to mark those dirty too). |
| 2775 | */ |
| 2776 | set_page_dirty(page); |
| 2777 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2778 | put_page(page); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2779 | cond_resched(); |
| 2780 | } |
| 2781 | |
| 2782 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 2783 | i_size_write(inode, offset + len); |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 2784 | inode->i_ctime = current_time(inode); |
Hugh Dickins | 1aac140 | 2012-05-29 15:06:42 -0700 | [diff] [blame] | 2785 | undone: |
| 2786 | spin_lock(&inode->i_lock); |
| 2787 | inode->i_private = NULL; |
| 2788 | spin_unlock(&inode->i_lock); |
Hugh Dickins | e2d12e2 | 2012-05-29 15:06:41 -0700 | [diff] [blame] | 2789 | out: |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 2790 | inode_unlock(inode); |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 2791 | return error; |
| 2792 | } |
| 2793 | |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 2794 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2795 | { |
David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 2796 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2797 | |
| 2798 | buf->f_type = TMPFS_MAGIC; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 2799 | buf->f_bsize = PAGE_SIZE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2800 | buf->f_namelen = NAME_MAX; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2801 | if (sbinfo->max_blocks) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2802 | buf->f_blocks = sbinfo->max_blocks; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 2803 | buf->f_bavail = |
| 2804 | buf->f_bfree = sbinfo->max_blocks - |
| 2805 | percpu_counter_sum(&sbinfo->used_blocks); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2806 | } |
| 2807 | if (sbinfo->max_inodes) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2808 | buf->f_files = sbinfo->max_inodes; |
| 2809 | buf->f_ffree = sbinfo->free_inodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2810 | } |
| 2811 | /* else leave those fields 0 like simple_statfs */ |
| 2812 | return 0; |
| 2813 | } |
| 2814 | |
| 2815 | /* |
| 2816 | * File creation. Allocate an inode, and we're done.. |
| 2817 | */ |
| 2818 | static int |
Al Viro | 1a67aaf | 2011-07-26 01:52:52 -0400 | [diff] [blame] | 2819 | shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2820 | { |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2821 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2822 | int error = -ENOSPC; |
| 2823 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2824 | inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2825 | if (inode) { |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 2826 | error = simple_acl_create(dir, inode); |
| 2827 | if (error) |
| 2828 | goto out_iput; |
Eric Paris | 2a7dba3 | 2011-02-01 11:05:39 -0500 | [diff] [blame] | 2829 | error = security_inode_init_security(inode, dir, |
Mimi Zohar | 9d8f13b | 2011-06-06 15:29:25 -0400 | [diff] [blame] | 2830 | &dentry->d_name, |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 2831 | shmem_initxattrs, NULL); |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 2832 | if (error && error != -EOPNOTSUPP) |
| 2833 | goto out_iput; |
Mimi Zohar | 37ec43c | 2013-04-14 09:21:47 -0400 | [diff] [blame] | 2834 | |
Al Viro | 718deb6 | 2009-12-16 19:35:36 -0500 | [diff] [blame] | 2835 | error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2836 | dir->i_size += BOGO_DIRENT_SIZE; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 2837 | dir->i_ctime = dir->i_mtime = current_time(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2838 | d_instantiate(dentry, inode); |
| 2839 | dget(dentry); /* Extra count - pin the dentry in core */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2840 | } |
| 2841 | return error; |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 2842 | out_iput: |
| 2843 | iput(inode); |
| 2844 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2845 | } |
| 2846 | |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 2847 | static int |
| 2848 | shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) |
| 2849 | { |
| 2850 | struct inode *inode; |
| 2851 | int error = -ENOSPC; |
| 2852 | |
| 2853 | inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); |
| 2854 | if (inode) { |
| 2855 | error = security_inode_init_security(inode, dir, |
| 2856 | NULL, |
| 2857 | shmem_initxattrs, NULL); |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 2858 | if (error && error != -EOPNOTSUPP) |
| 2859 | goto out_iput; |
| 2860 | error = simple_acl_create(dir, inode); |
| 2861 | if (error) |
| 2862 | goto out_iput; |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 2863 | d_tmpfile(dentry, inode); |
| 2864 | } |
| 2865 | return error; |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 2866 | out_iput: |
| 2867 | iput(inode); |
| 2868 | return error; |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 2869 | } |
| 2870 | |
Al Viro | 18bb1db | 2011-07-26 01:41:39 -0400 | [diff] [blame] | 2871 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2872 | { |
| 2873 | int error; |
| 2874 | |
| 2875 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) |
| 2876 | return error; |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 2877 | inc_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2878 | return 0; |
| 2879 | } |
| 2880 | |
Al Viro | 4acdaf2 | 2011-07-26 01:42:34 -0400 | [diff] [blame] | 2881 | static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, |
Al Viro | ebfc3b4 | 2012-06-10 18:05:36 -0400 | [diff] [blame] | 2882 | bool excl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2883 | { |
| 2884 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); |
| 2885 | } |
| 2886 | |
| 2887 | /* |
| 2888 | * Link a file.. |
| 2889 | */ |
| 2890 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) |
| 2891 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 2892 | struct inode *inode = d_inode(old_dentry); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 2893 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2894 | |
| 2895 | /* |
| 2896 | * No ordinary (disk based) filesystem counts links as inodes; |
| 2897 | * but each new link needs a new dentry, pinning lowmem, and |
| 2898 | * tmpfs dentries cannot be pruned until they are unlinked. |
| 2899 | */ |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 2900 | ret = shmem_reserve_inode(inode->i_sb); |
| 2901 | if (ret) |
| 2902 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2903 | |
| 2904 | dir->i_size += BOGO_DIRENT_SIZE; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 2905 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 2906 | inc_nlink(inode); |
Al Viro | 7de9c6ee | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 2907 | ihold(inode); /* New dentry reference */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2908 | dget(dentry); /* Extra pinning count for the created dentry */ |
| 2909 | d_instantiate(dentry, inode); |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 2910 | out: |
| 2911 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2912 | } |
| 2913 | |
| 2914 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) |
| 2915 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 2916 | struct inode *inode = d_inode(dentry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2917 | |
Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 2918 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) |
| 2919 | shmem_free_inode(inode->i_sb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2920 | |
| 2921 | dir->i_size -= BOGO_DIRENT_SIZE; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 2922 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 2923 | drop_nlink(inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2924 | dput(dentry); /* Undo the count from "create" - this does all the work */ |
| 2925 | return 0; |
| 2926 | } |
| 2927 | |
| 2928 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) |
| 2929 | { |
| 2930 | if (!simple_empty(dentry)) |
| 2931 | return -ENOTEMPTY; |
| 2932 | |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 2933 | drop_nlink(d_inode(dentry)); |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 2934 | drop_nlink(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2935 | return shmem_unlink(dir, dentry); |
| 2936 | } |
| 2937 | |
Miklos Szeredi | 3745677 | 2014-07-23 15:15:34 +0200 | [diff] [blame] | 2938 | static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) |
| 2939 | { |
David Howells | e36cb0b | 2015-01-29 12:02:35 +0000 | [diff] [blame] | 2940 | bool old_is_dir = d_is_dir(old_dentry); |
| 2941 | bool new_is_dir = d_is_dir(new_dentry); |
Miklos Szeredi | 3745677 | 2014-07-23 15:15:34 +0200 | [diff] [blame] | 2942 | |
| 2943 | if (old_dir != new_dir && old_is_dir != new_is_dir) { |
| 2944 | if (old_is_dir) { |
| 2945 | drop_nlink(old_dir); |
| 2946 | inc_nlink(new_dir); |
| 2947 | } else { |
| 2948 | drop_nlink(new_dir); |
| 2949 | inc_nlink(old_dir); |
| 2950 | } |
| 2951 | } |
| 2952 | old_dir->i_ctime = old_dir->i_mtime = |
| 2953 | new_dir->i_ctime = new_dir->i_mtime = |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 2954 | d_inode(old_dentry)->i_ctime = |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 2955 | d_inode(new_dentry)->i_ctime = current_time(old_dir); |
Miklos Szeredi | 3745677 | 2014-07-23 15:15:34 +0200 | [diff] [blame] | 2956 | |
| 2957 | return 0; |
| 2958 | } |
| 2959 | |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 2960 | static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) |
| 2961 | { |
| 2962 | struct dentry *whiteout; |
| 2963 | int error; |
| 2964 | |
| 2965 | whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); |
| 2966 | if (!whiteout) |
| 2967 | return -ENOMEM; |
| 2968 | |
| 2969 | error = shmem_mknod(old_dir, whiteout, |
| 2970 | S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); |
| 2971 | dput(whiteout); |
| 2972 | if (error) |
| 2973 | return error; |
| 2974 | |
| 2975 | /* |
| 2976 | * Cheat and hash the whiteout while the old dentry is still in |
| 2977 | * place, instead of playing games with FS_RENAME_DOES_D_MOVE. |
| 2978 | * |
| 2979 | * d_lookup() will consistently find one of them at this point, |
| 2980 | * not sure which one, but that isn't even important. |
| 2981 | */ |
| 2982 | d_rehash(whiteout); |
| 2983 | return 0; |
| 2984 | } |
| 2985 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2986 | /* |
| 2987 | * The VFS layer already does all the dentry stuff for rename, |
| 2988 | * we just have to decrement the usage count for the target if |
| 2989 | * it exists so that the VFS layer correctly free's it when it |
| 2990 | * gets overwritten. |
| 2991 | */ |
Miklos Szeredi | 3b69ff5 | 2014-07-23 15:15:33 +0200 | [diff] [blame] | 2992 | static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2993 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 2994 | struct inode *inode = d_inode(old_dentry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2995 | int they_are_dirs = S_ISDIR(inode->i_mode); |
| 2996 | |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 2997 | if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) |
Miklos Szeredi | 3b69ff5 | 2014-07-23 15:15:33 +0200 | [diff] [blame] | 2998 | return -EINVAL; |
| 2999 | |
Miklos Szeredi | 3745677 | 2014-07-23 15:15:34 +0200 | [diff] [blame] | 3000 | if (flags & RENAME_EXCHANGE) |
| 3001 | return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); |
| 3002 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3003 | if (!simple_empty(new_dentry)) |
| 3004 | return -ENOTEMPTY; |
| 3005 | |
Miklos Szeredi | 46fdb79 | 2014-10-24 00:14:37 +0200 | [diff] [blame] | 3006 | if (flags & RENAME_WHITEOUT) { |
| 3007 | int error; |
| 3008 | |
| 3009 | error = shmem_whiteout(old_dir, old_dentry); |
| 3010 | if (error) |
| 3011 | return error; |
| 3012 | } |
| 3013 | |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3014 | if (d_really_is_positive(new_dentry)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3015 | (void) shmem_unlink(new_dir, new_dentry); |
Miklos Szeredi | b928095 | 2014-09-24 17:56:17 +0200 | [diff] [blame] | 3016 | if (they_are_dirs) { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3017 | drop_nlink(d_inode(new_dentry)); |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 3018 | drop_nlink(old_dir); |
Miklos Szeredi | b928095 | 2014-09-24 17:56:17 +0200 | [diff] [blame] | 3019 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3020 | } else if (they_are_dirs) { |
Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 3021 | drop_nlink(old_dir); |
Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 3022 | inc_nlink(new_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3023 | } |
| 3024 | |
| 3025 | old_dir->i_size -= BOGO_DIRENT_SIZE; |
| 3026 | new_dir->i_size += BOGO_DIRENT_SIZE; |
| 3027 | old_dir->i_ctime = old_dir->i_mtime = |
| 3028 | new_dir->i_ctime = new_dir->i_mtime = |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 3029 | inode->i_ctime = current_time(old_dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3030 | return 0; |
| 3031 | } |
| 3032 | |
| 3033 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) |
| 3034 | { |
| 3035 | int error; |
| 3036 | int len; |
| 3037 | struct inode *inode; |
Hugh Dickins | 9276aad | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 3038 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3039 | struct shmem_inode_info *info; |
| 3040 | |
| 3041 | len = strlen(symname) + 1; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3042 | if (len > PAGE_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3043 | return -ENAMETOOLONG; |
| 3044 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 3045 | inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3046 | if (!inode) |
| 3047 | return -ENOSPC; |
| 3048 | |
Mimi Zohar | 9d8f13b | 2011-06-06 15:29:25 -0400 | [diff] [blame] | 3049 | error = security_inode_init_security(inode, dir, &dentry->d_name, |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3050 | shmem_initxattrs, NULL); |
Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 3051 | if (error) { |
| 3052 | if (error != -EOPNOTSUPP) { |
| 3053 | iput(inode); |
| 3054 | return error; |
| 3055 | } |
| 3056 | error = 0; |
| 3057 | } |
| 3058 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3059 | info = SHMEM_I(inode); |
| 3060 | inode->i_size = len-1; |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 3061 | if (len <= SHORT_SYMLINK_LEN) { |
Al Viro | 3ed47db | 2016-01-22 18:08:52 -0500 | [diff] [blame] | 3062 | inode->i_link = kmemdup(symname, len, GFP_KERNEL); |
| 3063 | if (!inode->i_link) { |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 3064 | iput(inode); |
| 3065 | return -ENOMEM; |
| 3066 | } |
| 3067 | inode->i_op = &shmem_short_symlink_operations; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3068 | } else { |
Al Viro | e8ecde2 | 2016-01-14 17:52:59 -0500 | [diff] [blame] | 3069 | inode_nohighmem(inode); |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 3070 | error = shmem_getpage(inode, 0, &page, SGP_WRITE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3071 | if (error) { |
| 3072 | iput(inode); |
| 3073 | return error; |
| 3074 | } |
Hugh Dickins | 14fcc23 | 2008-07-28 15:46:19 -0700 | [diff] [blame] | 3075 | inode->i_mapping->a_ops = &shmem_aops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3076 | inode->i_op = &shmem_symlink_inode_operations; |
Al Viro | 21fc61c | 2015-11-17 01:07:57 -0500 | [diff] [blame] | 3077 | memcpy(page_address(page), symname, len); |
Hugh Dickins | ec9516f | 2012-05-29 15:06:39 -0700 | [diff] [blame] | 3078 | SetPageUptodate(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3079 | set_page_dirty(page); |
Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 3080 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3081 | put_page(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3082 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3083 | dir->i_size += BOGO_DIRENT_SIZE; |
Deepa Dinamani | 078cd82 | 2016-09-14 07:48:04 -0700 | [diff] [blame] | 3084 | dir->i_ctime = dir->i_mtime = current_time(dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3085 | d_instantiate(dentry, inode); |
| 3086 | dget(dentry); |
| 3087 | return 0; |
| 3088 | } |
| 3089 | |
Al Viro | fceef39 | 2015-12-29 15:58:39 -0500 | [diff] [blame] | 3090 | static void shmem_put_link(void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3091 | { |
Al Viro | fceef39 | 2015-12-29 15:58:39 -0500 | [diff] [blame] | 3092 | mark_page_accessed(arg); |
| 3093 | put_page(arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3094 | } |
| 3095 | |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 3096 | static const char *shmem_get_link(struct dentry *dentry, |
Al Viro | fceef39 | 2015-12-29 15:58:39 -0500 | [diff] [blame] | 3097 | struct inode *inode, |
| 3098 | struct delayed_call *done) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3099 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3100 | struct page *page = NULL; |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 3101 | int error; |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3102 | if (!dentry) { |
| 3103 | page = find_get_page(inode->i_mapping, 0); |
| 3104 | if (!page) |
| 3105 | return ERR_PTR(-ECHILD); |
| 3106 | if (!PageUptodate(page)) { |
| 3107 | put_page(page); |
| 3108 | return ERR_PTR(-ECHILD); |
| 3109 | } |
| 3110 | } else { |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 3111 | error = shmem_getpage(inode, 0, &page, SGP_READ); |
Al Viro | 6a6c990 | 2015-11-17 10:54:32 -0500 | [diff] [blame] | 3112 | if (error) |
| 3113 | return ERR_PTR(error); |
| 3114 | unlock_page(page); |
| 3115 | } |
Al Viro | fceef39 | 2015-12-29 15:58:39 -0500 | [diff] [blame] | 3116 | set_delayed_call(done, shmem_put_link, page); |
Al Viro | 21fc61c | 2015-11-17 01:07:57 -0500 | [diff] [blame] | 3117 | return page_address(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3118 | } |
| 3119 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3120 | #ifdef CONFIG_TMPFS_XATTR |
| 3121 | /* |
| 3122 | * Superblocks without xattr inode operations may get some security.* xattr |
| 3123 | * support from the LSM "for free". As soon as we have any other xattrs |
| 3124 | * like ACLs, we also need to implement the security.* handlers at |
| 3125 | * filesystem level, though. |
| 3126 | */ |
| 3127 | |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3128 | /* |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3129 | * Callback for security_inode_init_security() for acquiring xattrs. |
| 3130 | */ |
| 3131 | static int shmem_initxattrs(struct inode *inode, |
| 3132 | const struct xattr *xattr_array, |
| 3133 | void *fs_info) |
| 3134 | { |
| 3135 | struct shmem_inode_info *info = SHMEM_I(inode); |
| 3136 | const struct xattr *xattr; |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 3137 | struct simple_xattr *new_xattr; |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3138 | size_t len; |
| 3139 | |
| 3140 | for (xattr = xattr_array; xattr->name != NULL; xattr++) { |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 3141 | new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3142 | if (!new_xattr) |
| 3143 | return -ENOMEM; |
| 3144 | |
| 3145 | len = strlen(xattr->name) + 1; |
| 3146 | new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, |
| 3147 | GFP_KERNEL); |
| 3148 | if (!new_xattr->name) { |
| 3149 | kfree(new_xattr); |
| 3150 | return -ENOMEM; |
| 3151 | } |
| 3152 | |
| 3153 | memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, |
| 3154 | XATTR_SECURITY_PREFIX_LEN); |
| 3155 | memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, |
| 3156 | xattr->name, len); |
| 3157 | |
Aristeu Rozanski | 38f3865 | 2012-08-23 16:53:28 -0400 | [diff] [blame] | 3158 | simple_xattr_list_add(&info->xattrs, new_xattr); |
Jarkko Sakkinen | 6d9d88d | 2012-03-21 16:34:05 -0700 | [diff] [blame] | 3159 | } |
| 3160 | |
| 3161 | return 0; |
| 3162 | } |
| 3163 | |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3164 | static int shmem_xattr_handler_get(const struct xattr_handler *handler, |
Al Viro | b296821 | 2016-04-10 20:48:24 -0400 | [diff] [blame] | 3165 | struct dentry *unused, struct inode *inode, |
| 3166 | const char *name, void *buffer, size_t size) |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3167 | { |
Al Viro | b296821 | 2016-04-10 20:48:24 -0400 | [diff] [blame] | 3168 | struct shmem_inode_info *info = SHMEM_I(inode); |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3169 | |
| 3170 | name = xattr_full_name(handler, name); |
| 3171 | return simple_xattr_get(&info->xattrs, name, buffer, size); |
| 3172 | } |
| 3173 | |
| 3174 | static int shmem_xattr_handler_set(const struct xattr_handler *handler, |
Al Viro | 5930122 | 2016-05-27 10:19:30 -0400 | [diff] [blame] | 3175 | struct dentry *unused, struct inode *inode, |
| 3176 | const char *name, const void *value, |
| 3177 | size_t size, int flags) |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3178 | { |
Al Viro | 5930122 | 2016-05-27 10:19:30 -0400 | [diff] [blame] | 3179 | struct shmem_inode_info *info = SHMEM_I(inode); |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3180 | |
| 3181 | name = xattr_full_name(handler, name); |
| 3182 | return simple_xattr_set(&info->xattrs, name, value, size, flags); |
| 3183 | } |
| 3184 | |
| 3185 | static const struct xattr_handler shmem_security_xattr_handler = { |
| 3186 | .prefix = XATTR_SECURITY_PREFIX, |
| 3187 | .get = shmem_xattr_handler_get, |
| 3188 | .set = shmem_xattr_handler_set, |
| 3189 | }; |
| 3190 | |
| 3191 | static const struct xattr_handler shmem_trusted_xattr_handler = { |
| 3192 | .prefix = XATTR_TRUSTED_PREFIX, |
| 3193 | .get = shmem_xattr_handler_get, |
| 3194 | .set = shmem_xattr_handler_set, |
| 3195 | }; |
| 3196 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3197 | static const struct xattr_handler *shmem_xattr_handlers[] = { |
| 3198 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 3199 | &posix_acl_access_xattr_handler, |
| 3200 | &posix_acl_default_xattr_handler, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3201 | #endif |
Andreas Gruenbacher | aa7c524 | 2015-12-02 14:44:38 +0100 | [diff] [blame] | 3202 | &shmem_security_xattr_handler, |
| 3203 | &shmem_trusted_xattr_handler, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3204 | NULL |
| 3205 | }; |
| 3206 | |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3207 | static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) |
| 3208 | { |
David Howells | 75c3cfa | 2015-03-17 22:26:12 +0000 | [diff] [blame] | 3209 | struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); |
Andreas Gruenbacher | 786534b | 2015-12-02 14:44:39 +0100 | [diff] [blame] | 3210 | return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3211 | } |
| 3212 | #endif /* CONFIG_TMPFS_XATTR */ |
| 3213 | |
Hugh Dickins | 69f07ec | 2011-08-03 16:21:26 -0700 | [diff] [blame] | 3214 | static const struct inode_operations shmem_short_symlink_operations = { |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 3215 | .get_link = simple_get_link, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3216 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3217 | .listxattr = shmem_listxattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3218 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3219 | }; |
| 3220 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 3221 | static const struct inode_operations shmem_symlink_inode_operations = { |
Al Viro | 6b25539 | 2015-11-17 10:20:54 -0500 | [diff] [blame] | 3222 | .get_link = shmem_get_link, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3223 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3224 | .listxattr = shmem_listxattr, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3225 | #endif |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3226 | }; |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3227 | |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3228 | static struct dentry *shmem_get_parent(struct dentry *child) |
| 3229 | { |
| 3230 | return ERR_PTR(-ESTALE); |
| 3231 | } |
| 3232 | |
| 3233 | static int shmem_match(struct inode *ino, void *vfh) |
| 3234 | { |
| 3235 | __u32 *fh = vfh; |
| 3236 | __u64 inum = fh[2]; |
| 3237 | inum = (inum << 32) | fh[1]; |
| 3238 | return ino->i_ino == inum && fh[0] == ino->i_generation; |
| 3239 | } |
| 3240 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3241 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, |
| 3242 | struct fid *fid, int fh_len, int fh_type) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3243 | { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3244 | struct inode *inode; |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3245 | struct dentry *dentry = NULL; |
Hugh Dickins | 35c2a7f | 2012-10-07 20:32:51 -0700 | [diff] [blame] | 3246 | u64 inum; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3247 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3248 | if (fh_len < 3) |
| 3249 | return NULL; |
| 3250 | |
Hugh Dickins | 35c2a7f | 2012-10-07 20:32:51 -0700 | [diff] [blame] | 3251 | inum = fid->raw[2]; |
| 3252 | inum = (inum << 32) | fid->raw[1]; |
| 3253 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3254 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), |
| 3255 | shmem_match, fid->raw); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3256 | if (inode) { |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3257 | dentry = d_find_alias(inode); |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3258 | iput(inode); |
| 3259 | } |
| 3260 | |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3261 | return dentry; |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3262 | } |
| 3263 | |
Al Viro | b0b0382 | 2012-04-02 14:34:06 -0400 | [diff] [blame] | 3264 | static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, |
| 3265 | struct inode *parent) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3266 | { |
Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 3267 | if (*len < 3) { |
| 3268 | *len = 3; |
Namjae Jeon | 94e07a75 | 2013-02-17 15:48:11 +0900 | [diff] [blame] | 3269 | return FILEID_INVALID; |
Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 3270 | } |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3271 | |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 3272 | if (inode_unhashed(inode)) { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3273 | /* Unfortunately insert_inode_hash is not idempotent, |
| 3274 | * so as we hash inodes here rather than at creation |
| 3275 | * time, we need a lock to ensure we only try |
| 3276 | * to do it once |
| 3277 | */ |
| 3278 | static DEFINE_SPINLOCK(lock); |
| 3279 | spin_lock(&lock); |
Al Viro | 1d3382cb | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 3280 | if (inode_unhashed(inode)) |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3281 | __insert_inode_hash(inode, |
| 3282 | inode->i_ino + inode->i_generation); |
| 3283 | spin_unlock(&lock); |
| 3284 | } |
| 3285 | |
| 3286 | fh[0] = inode->i_generation; |
| 3287 | fh[1] = inode->i_ino; |
| 3288 | fh[2] = ((__u64)inode->i_ino) >> 32; |
| 3289 | |
| 3290 | *len = 3; |
| 3291 | return 1; |
| 3292 | } |
| 3293 | |
Christoph Hellwig | 3965516 | 2007-10-21 16:42:17 -0700 | [diff] [blame] | 3294 | static const struct export_operations shmem_export_ops = { |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3295 | .get_parent = shmem_get_parent, |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3296 | .encode_fh = shmem_encode_fh, |
Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 3297 | .fh_to_dentry = shmem_fh_to_dentry, |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3298 | }; |
| 3299 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3300 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, |
| 3301 | bool remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3302 | { |
| 3303 | char *this_char, *value, *rest; |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3304 | struct mempolicy *mpol = NULL; |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 3305 | uid_t uid; |
| 3306 | gid_t gid; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3307 | |
Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 3308 | while (options != NULL) { |
| 3309 | this_char = options; |
| 3310 | for (;;) { |
| 3311 | /* |
| 3312 | * NUL-terminate this option: unfortunately, |
| 3313 | * mount options form a comma-separated list, |
| 3314 | * but mpol's nodelist may also contain commas. |
| 3315 | */ |
| 3316 | options = strchr(options, ','); |
| 3317 | if (options == NULL) |
| 3318 | break; |
| 3319 | options++; |
| 3320 | if (!isdigit(*options)) { |
| 3321 | options[-1] = '\0'; |
| 3322 | break; |
| 3323 | } |
| 3324 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3325 | if (!*this_char) |
| 3326 | continue; |
| 3327 | if ((value = strchr(this_char,'=')) != NULL) { |
| 3328 | *value++ = 0; |
| 3329 | } else { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 3330 | pr_err("tmpfs: No value for mount option '%s'\n", |
| 3331 | this_char); |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3332 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3333 | } |
| 3334 | |
| 3335 | if (!strcmp(this_char,"size")) { |
| 3336 | unsigned long long size; |
| 3337 | size = memparse(value,&rest); |
| 3338 | if (*rest == '%') { |
| 3339 | size <<= PAGE_SHIFT; |
| 3340 | size *= totalram_pages; |
| 3341 | do_div(size, 100); |
| 3342 | rest++; |
| 3343 | } |
| 3344 | if (*rest) |
| 3345 | goto bad_val; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3346 | sbinfo->max_blocks = |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3347 | DIV_ROUND_UP(size, PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3348 | } else if (!strcmp(this_char,"nr_blocks")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3349 | sbinfo->max_blocks = memparse(value, &rest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3350 | if (*rest) |
| 3351 | goto bad_val; |
| 3352 | } else if (!strcmp(this_char,"nr_inodes")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3353 | sbinfo->max_inodes = memparse(value, &rest); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3354 | if (*rest) |
| 3355 | goto bad_val; |
| 3356 | } else if (!strcmp(this_char,"mode")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3357 | if (remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3358 | continue; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3359 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3360 | if (*rest) |
| 3361 | goto bad_val; |
| 3362 | } else if (!strcmp(this_char,"uid")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3363 | if (remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3364 | continue; |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 3365 | uid = simple_strtoul(value, &rest, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3366 | if (*rest) |
| 3367 | goto bad_val; |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 3368 | sbinfo->uid = make_kuid(current_user_ns(), uid); |
| 3369 | if (!uid_valid(sbinfo->uid)) |
| 3370 | goto bad_val; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3371 | } else if (!strcmp(this_char,"gid")) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3372 | if (remount) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3373 | continue; |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 3374 | gid = simple_strtoul(value, &rest, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3375 | if (*rest) |
| 3376 | goto bad_val; |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 3377 | sbinfo->gid = make_kgid(current_user_ns(), gid); |
| 3378 | if (!gid_valid(sbinfo->gid)) |
| 3379 | goto bad_val; |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 3380 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3381 | } else if (!strcmp(this_char, "huge")) { |
| 3382 | int huge; |
| 3383 | huge = shmem_parse_huge(value); |
| 3384 | if (huge < 0) |
| 3385 | goto bad_val; |
| 3386 | if (!has_transparent_hugepage() && |
| 3387 | huge != SHMEM_HUGE_NEVER) |
| 3388 | goto bad_val; |
| 3389 | sbinfo->huge = huge; |
| 3390 | #endif |
| 3391 | #ifdef CONFIG_NUMA |
Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 3392 | } else if (!strcmp(this_char,"mpol")) { |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3393 | mpol_put(mpol); |
| 3394 | mpol = NULL; |
| 3395 | if (mpol_parse_str(value, &mpol)) |
Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 3396 | goto bad_val; |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3397 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3398 | } else { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 3399 | pr_err("tmpfs: Bad mount option %s\n", this_char); |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3400 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3401 | } |
| 3402 | } |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3403 | sbinfo->mpol = mpol; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3404 | return 0; |
| 3405 | |
| 3406 | bad_val: |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 3407 | pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3408 | value, this_char); |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3409 | error: |
| 3410 | mpol_put(mpol); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3411 | return 1; |
| 3412 | |
| 3413 | } |
| 3414 | |
| 3415 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) |
| 3416 | { |
| 3417 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3418 | struct shmem_sb_info config = *sbinfo; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3419 | unsigned long inodes; |
| 3420 | int error = -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3421 | |
Greg Thelen | 5f00110 | 2013-02-22 16:36:01 -0800 | [diff] [blame] | 3422 | config.mpol = NULL; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3423 | if (shmem_parse_options(data, &config, true)) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3424 | return error; |
| 3425 | |
| 3426 | spin_lock(&sbinfo->stat_lock); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3427 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; |
Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 3428 | if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3429 | goto out; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3430 | if (config.max_inodes < inodes) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3431 | goto out; |
| 3432 | /* |
Hugh Dickins | 54af6042 | 2011-08-03 16:21:24 -0700 | [diff] [blame] | 3433 | * Those tests disallow limited->unlimited while any are in use; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3434 | * but we must separately disallow unlimited->limited, because |
| 3435 | * in that case we have no record of how much is already in use. |
| 3436 | */ |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3437 | if (config.max_blocks && !sbinfo->max_blocks) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3438 | goto out; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3439 | if (config.max_inodes && !sbinfo->max_inodes) |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3440 | goto out; |
| 3441 | |
| 3442 | error = 0; |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3443 | sbinfo->huge = config.huge; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3444 | sbinfo->max_blocks = config.max_blocks; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3445 | sbinfo->max_inodes = config.max_inodes; |
| 3446 | sbinfo->free_inodes = config.max_inodes - inodes; |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 3447 | |
Greg Thelen | 5f00110 | 2013-02-22 16:36:01 -0800 | [diff] [blame] | 3448 | /* |
| 3449 | * Preserve previous mempolicy unless mpol remount option was specified. |
| 3450 | */ |
| 3451 | if (config.mpol) { |
| 3452 | mpol_put(sbinfo->mpol); |
| 3453 | sbinfo->mpol = config.mpol; /* transfers initial ref */ |
| 3454 | } |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3455 | out: |
| 3456 | spin_unlock(&sbinfo->stat_lock); |
| 3457 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3458 | } |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3459 | |
Al Viro | 34c80b1 | 2011-12-08 21:32:45 -0500 | [diff] [blame] | 3460 | static int shmem_show_options(struct seq_file *seq, struct dentry *root) |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3461 | { |
Al Viro | 34c80b1 | 2011-12-08 21:32:45 -0500 | [diff] [blame] | 3462 | struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3463 | |
| 3464 | if (sbinfo->max_blocks != shmem_default_max_blocks()) |
| 3465 | seq_printf(seq, ",size=%luk", |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3466 | sbinfo->max_blocks << (PAGE_SHIFT - 10)); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3467 | if (sbinfo->max_inodes != shmem_default_max_inodes()) |
| 3468 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); |
| 3469 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) |
Al Viro | 09208d1 | 2011-07-26 03:15:03 -0400 | [diff] [blame] | 3470 | seq_printf(seq, ",mode=%03ho", sbinfo->mode); |
Eric W. Biederman | 8751e03 | 2012-02-07 16:46:12 -0800 | [diff] [blame] | 3471 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) |
| 3472 | seq_printf(seq, ",uid=%u", |
| 3473 | from_kuid_munged(&init_user_ns, sbinfo->uid)); |
| 3474 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) |
| 3475 | seq_printf(seq, ",gid=%u", |
| 3476 | from_kgid_munged(&init_user_ns, sbinfo->gid)); |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 3477 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3478 | /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ |
| 3479 | if (sbinfo->huge) |
| 3480 | seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); |
| 3481 | #endif |
Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 3482 | shmem_show_mpol(seq, sbinfo->mpol); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3483 | return 0; |
| 3484 | } |
David Herrmann | 9183df2 | 2014-08-08 14:25:29 -0700 | [diff] [blame] | 3485 | |
| 3486 | #define MFD_NAME_PREFIX "memfd:" |
| 3487 | #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) |
| 3488 | #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) |
| 3489 | |
| 3490 | #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING) |
| 3491 | |
| 3492 | SYSCALL_DEFINE2(memfd_create, |
| 3493 | const char __user *, uname, |
| 3494 | unsigned int, flags) |
| 3495 | { |
| 3496 | struct shmem_inode_info *info; |
| 3497 | struct file *file; |
| 3498 | int fd, error; |
| 3499 | char *name; |
| 3500 | long len; |
| 3501 | |
| 3502 | if (flags & ~(unsigned int)MFD_ALL_FLAGS) |
| 3503 | return -EINVAL; |
| 3504 | |
| 3505 | /* length includes terminating zero */ |
| 3506 | len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); |
| 3507 | if (len <= 0) |
| 3508 | return -EFAULT; |
| 3509 | if (len > MFD_NAME_MAX_LEN + 1) |
| 3510 | return -EINVAL; |
| 3511 | |
| 3512 | name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY); |
| 3513 | if (!name) |
| 3514 | return -ENOMEM; |
| 3515 | |
| 3516 | strcpy(name, MFD_NAME_PREFIX); |
| 3517 | if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { |
| 3518 | error = -EFAULT; |
| 3519 | goto err_name; |
| 3520 | } |
| 3521 | |
| 3522 | /* terminating-zero may have changed after strnlen_user() returned */ |
| 3523 | if (name[len + MFD_NAME_PREFIX_LEN - 1]) { |
| 3524 | error = -EFAULT; |
| 3525 | goto err_name; |
| 3526 | } |
| 3527 | |
| 3528 | fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); |
| 3529 | if (fd < 0) { |
| 3530 | error = fd; |
| 3531 | goto err_name; |
| 3532 | } |
| 3533 | |
| 3534 | file = shmem_file_setup(name, 0, VM_NORESERVE); |
| 3535 | if (IS_ERR(file)) { |
| 3536 | error = PTR_ERR(file); |
| 3537 | goto err_fd; |
| 3538 | } |
| 3539 | info = SHMEM_I(file_inode(file)); |
| 3540 | file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; |
| 3541 | file->f_flags |= O_RDWR | O_LARGEFILE; |
| 3542 | if (flags & MFD_ALLOW_SEALING) |
| 3543 | info->seals &= ~F_SEAL_SEAL; |
| 3544 | |
| 3545 | fd_install(fd, file); |
| 3546 | kfree(name); |
| 3547 | return fd; |
| 3548 | |
| 3549 | err_fd: |
| 3550 | put_unused_fd(fd); |
| 3551 | err_name: |
| 3552 | kfree(name); |
| 3553 | return error; |
| 3554 | } |
| 3555 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3556 | #endif /* CONFIG_TMPFS */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3557 | |
| 3558 | static void shmem_put_super(struct super_block *sb) |
| 3559 | { |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 3560 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); |
| 3561 | |
| 3562 | percpu_counter_destroy(&sbinfo->used_blocks); |
Greg Thelen | 49cd0a5 | 2013-02-22 16:36:02 -0800 | [diff] [blame] | 3563 | mpol_put(sbinfo->mpol); |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 3564 | kfree(sbinfo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3565 | sb->s_fs_info = NULL; |
| 3566 | } |
| 3567 | |
Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 3568 | int shmem_fill_super(struct super_block *sb, void *data, int silent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3569 | { |
| 3570 | struct inode *inode; |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3571 | struct shmem_sb_info *sbinfo; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3572 | int err = -ENOMEM; |
| 3573 | |
| 3574 | /* Round up to L1_CACHE_BYTES to resist false sharing */ |
Pekka Enberg | 425fbf0 | 2009-09-21 17:03:50 -0700 | [diff] [blame] | 3575 | sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3576 | L1_CACHE_BYTES), GFP_KERNEL); |
| 3577 | if (!sbinfo) |
| 3578 | return -ENOMEM; |
| 3579 | |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3580 | sbinfo->mode = S_IRWXUGO | S_ISVTX; |
David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 3581 | sbinfo->uid = current_fsuid(); |
| 3582 | sbinfo->gid = current_fsgid(); |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3583 | sb->s_fs_info = sbinfo; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3584 | |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3585 | #ifdef CONFIG_TMPFS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3586 | /* |
| 3587 | * Per default we only allow half of the physical ram per |
| 3588 | * tmpfs instance, limiting inodes to one per page of lowmem; |
| 3589 | * but the internal instance is left unlimited. |
| 3590 | */ |
Al Viro | ca4e051 | 2013-08-31 12:57:10 -0400 | [diff] [blame] | 3591 | if (!(sb->s_flags & MS_KERNMOUNT)) { |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3592 | sbinfo->max_blocks = shmem_default_max_blocks(); |
| 3593 | sbinfo->max_inodes = shmem_default_max_inodes(); |
| 3594 | if (shmem_parse_options(data, sbinfo, false)) { |
| 3595 | err = -EINVAL; |
| 3596 | goto failed; |
| 3597 | } |
Al Viro | ca4e051 | 2013-08-31 12:57:10 -0400 | [diff] [blame] | 3598 | } else { |
| 3599 | sb->s_flags |= MS_NOUSER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3600 | } |
David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 3601 | sb->s_export_op = &shmem_export_ops; |
Hugh Dickins | 2f6e38f | 2012-05-29 15:06:38 -0700 | [diff] [blame] | 3602 | sb->s_flags |= MS_NOSEC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3603 | #else |
| 3604 | sb->s_flags |= MS_NOUSER; |
| 3605 | #endif |
| 3606 | |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3607 | spin_lock_init(&sbinfo->stat_lock); |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 3608 | if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) |
Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 3609 | goto failed; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3610 | sbinfo->free_inodes = sbinfo->max_inodes; |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 3611 | spin_lock_init(&sbinfo->shrinklist_lock); |
| 3612 | INIT_LIST_HEAD(&sbinfo->shrinklist); |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3613 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 3614 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 3615 | sb->s_blocksize = PAGE_SIZE; |
| 3616 | sb->s_blocksize_bits = PAGE_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3617 | sb->s_magic = TMPFS_MAGIC; |
| 3618 | sb->s_op = &shmem_ops; |
Robin H. Johnson | cfd95a9 | 2006-06-12 21:50:25 +0100 | [diff] [blame] | 3619 | sb->s_time_gran = 1; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3620 | #ifdef CONFIG_TMPFS_XATTR |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3621 | sb->s_xattr = shmem_xattr_handlers; |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3622 | #endif |
| 3623 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3624 | sb->s_flags |= MS_POSIXACL; |
| 3625 | #endif |
Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 3626 | |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 3627 | inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3628 | if (!inode) |
| 3629 | goto failed; |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3630 | inode->i_uid = sbinfo->uid; |
| 3631 | inode->i_gid = sbinfo->gid; |
Al Viro | 318ceed | 2012-02-12 22:08:01 -0500 | [diff] [blame] | 3632 | sb->s_root = d_make_root(inode); |
| 3633 | if (!sb->s_root) |
Al Viro | 48fde70 | 2012-01-08 22:15:13 -0500 | [diff] [blame] | 3634 | goto failed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3635 | return 0; |
| 3636 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3637 | failed: |
| 3638 | shmem_put_super(sb); |
| 3639 | return err; |
| 3640 | } |
| 3641 | |
Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 3642 | static struct kmem_cache *shmem_inode_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3643 | |
| 3644 | static struct inode *shmem_alloc_inode(struct super_block *sb) |
| 3645 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3646 | struct shmem_inode_info *info; |
| 3647 | info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); |
| 3648 | if (!info) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3649 | return NULL; |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3650 | return &info->vfs_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3651 | } |
| 3652 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3653 | static void shmem_destroy_callback(struct rcu_head *head) |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 3654 | { |
| 3655 | struct inode *inode = container_of(head, struct inode, i_rcu); |
Al Viro | 84e710d | 2016-04-15 00:58:55 -0400 | [diff] [blame] | 3656 | if (S_ISLNK(inode->i_mode)) |
| 3657 | kfree(inode->i_link); |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 3658 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); |
| 3659 | } |
| 3660 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3661 | static void shmem_destroy_inode(struct inode *inode) |
| 3662 | { |
Al Viro | 09208d1 | 2011-07-26 03:15:03 -0400 | [diff] [blame] | 3663 | if (S_ISREG(inode->i_mode)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3664 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3665 | call_rcu(&inode->i_rcu, shmem_destroy_callback); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3666 | } |
| 3667 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3668 | static void shmem_init_inode(void *foo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3669 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3670 | struct shmem_inode_info *info = foo; |
| 3671 | inode_init_once(&info->vfs_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3672 | } |
| 3673 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3674 | static int shmem_init_inodecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3675 | { |
| 3676 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", |
| 3677 | sizeof(struct shmem_inode_info), |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 3678 | 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3679 | return 0; |
| 3680 | } |
| 3681 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3682 | static void shmem_destroy_inodecache(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3683 | { |
Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 3684 | kmem_cache_destroy(shmem_inode_cachep); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3685 | } |
| 3686 | |
Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 3687 | static const struct address_space_operations shmem_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3688 | .writepage = shmem_writepage, |
Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 3689 | .set_page_dirty = __set_page_dirty_no_writeback, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3690 | #ifdef CONFIG_TMPFS |
Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 3691 | .write_begin = shmem_write_begin, |
| 3692 | .write_end = shmem_write_end, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3693 | #endif |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 3694 | #ifdef CONFIG_MIGRATION |
Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 3695 | .migratepage = migrate_page, |
Andrew Morton | 1c93923 | 2014-10-09 15:27:59 -0700 | [diff] [blame] | 3696 | #endif |
Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 3697 | .error_remove_page = generic_error_remove_page, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3698 | }; |
| 3699 | |
Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 3700 | static const struct file_operations shmem_file_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3701 | .mmap = shmem_mmap, |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 3702 | .get_unmapped_area = shmem_get_unmapped_area, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3703 | #ifdef CONFIG_TMPFS |
Hugh Dickins | 220f2ac | 2012-12-12 13:52:21 -0800 | [diff] [blame] | 3704 | .llseek = shmem_file_llseek, |
Al Viro | 2ba5bbe | 2014-04-02 20:00:02 -0400 | [diff] [blame] | 3705 | .read_iter = shmem_file_read_iter, |
Al Viro | 8174202 | 2014-04-03 03:17:43 -0400 | [diff] [blame] | 3706 | .write_iter = generic_file_write_iter, |
Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 3707 | .fsync = noop_fsync, |
Al Viro | 82c156f | 2016-09-22 23:35:42 -0400 | [diff] [blame] | 3708 | .splice_read = generic_file_splice_read, |
Al Viro | f6cb85d | 2014-04-05 04:38:56 -0400 | [diff] [blame] | 3709 | .splice_write = iter_file_splice_write, |
Hugh Dickins | 83e4fa9 | 2012-05-29 15:06:40 -0700 | [diff] [blame] | 3710 | .fallocate = shmem_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3711 | #endif |
| 3712 | }; |
| 3713 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 3714 | static const struct inode_operations shmem_inode_operations = { |
Yu Zhao | 44a3022 | 2015-09-08 15:03:33 -0700 | [diff] [blame] | 3715 | .getattr = shmem_getattr, |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 3716 | .setattr = shmem_setattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3717 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3718 | .listxattr = shmem_listxattr, |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 3719 | .set_acl = simple_set_acl, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3720 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3721 | }; |
| 3722 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 3723 | static const struct inode_operations shmem_dir_inode_operations = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3724 | #ifdef CONFIG_TMPFS |
| 3725 | .create = shmem_create, |
| 3726 | .lookup = simple_lookup, |
| 3727 | .link = shmem_link, |
| 3728 | .unlink = shmem_unlink, |
| 3729 | .symlink = shmem_symlink, |
| 3730 | .mkdir = shmem_mkdir, |
| 3731 | .rmdir = shmem_rmdir, |
| 3732 | .mknod = shmem_mknod, |
Miklos Szeredi | 2773bf0 | 2016-09-27 11:03:58 +0200 | [diff] [blame] | 3733 | .rename = shmem_rename2, |
Al Viro | 60545d0 | 2013-06-07 01:20:27 -0400 | [diff] [blame] | 3734 | .tmpfile = shmem_tmpfile, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3735 | #endif |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3736 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3737 | .listxattr = shmem_listxattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3738 | #endif |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3739 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 3740 | .setattr = shmem_setattr, |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 3741 | .set_acl = simple_set_acl, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3742 | #endif |
| 3743 | }; |
| 3744 | |
Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 3745 | static const struct inode_operations shmem_special_inode_operations = { |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3746 | #ifdef CONFIG_TMPFS_XATTR |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3747 | .listxattr = shmem_listxattr, |
Eric Paris | b09e0fa | 2011-05-24 17:12:39 -0700 | [diff] [blame] | 3748 | #endif |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3749 | #ifdef CONFIG_TMPFS_POSIX_ACL |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 3750 | .setattr = shmem_setattr, |
Christoph Hellwig | feda821 | 2013-12-20 05:16:54 -0800 | [diff] [blame] | 3751 | .set_acl = simple_set_acl, |
Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 3752 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3753 | }; |
| 3754 | |
Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 3755 | static const struct super_operations shmem_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3756 | .alloc_inode = shmem_alloc_inode, |
| 3757 | .destroy_inode = shmem_destroy_inode, |
| 3758 | #ifdef CONFIG_TMPFS |
| 3759 | .statfs = shmem_statfs, |
| 3760 | .remount_fs = shmem_remount_fs, |
akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 3761 | .show_options = shmem_show_options, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3762 | #endif |
Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 3763 | .evict_inode = shmem_evict_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3764 | .drop_inode = generic_delete_inode, |
| 3765 | .put_super = shmem_put_super, |
Kirill A. Shutemov | 779750d | 2016-07-26 15:26:38 -0700 | [diff] [blame] | 3766 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
| 3767 | .nr_cached_objects = shmem_unused_huge_count, |
| 3768 | .free_cached_objects = shmem_unused_huge_scan, |
| 3769 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3770 | }; |
| 3771 | |
Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 3772 | static const struct vm_operations_struct shmem_vm_ops = { |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3773 | .fault = shmem_fault, |
Ning Qu | d7c1755 | 2014-04-07 15:37:24 -0700 | [diff] [blame] | 3774 | .map_pages = filemap_map_pages, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3775 | #ifdef CONFIG_NUMA |
| 3776 | .set_policy = shmem_set_policy, |
| 3777 | .get_policy = shmem_get_policy, |
| 3778 | #endif |
| 3779 | }; |
| 3780 | |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 3781 | static struct dentry *shmem_mount(struct file_system_type *fs_type, |
| 3782 | int flags, const char *dev_name, void *data) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3783 | { |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 3784 | return mount_nodev(fs_type, flags, data, shmem_fill_super); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3785 | } |
| 3786 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3787 | static struct file_system_type shmem_fs_type = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3788 | .owner = THIS_MODULE, |
| 3789 | .name = "tmpfs", |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 3790 | .mount = shmem_mount, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3791 | .kill_sb = kill_litter_super, |
Eric W. Biederman | 2b8576c | 2013-01-25 16:32:10 -0800 | [diff] [blame] | 3792 | .fs_flags = FS_USERNS_MOUNT, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3793 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3794 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3795 | int __init shmem_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3796 | { |
| 3797 | int error; |
| 3798 | |
Rob Landley | 16203a7 | 2013-09-11 14:26:12 -0700 | [diff] [blame] | 3799 | /* If rootfs called this, don't re-init */ |
| 3800 | if (shmem_inode_cachep) |
| 3801 | return 0; |
| 3802 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3803 | error = shmem_init_inodecache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3804 | if (error) |
| 3805 | goto out3; |
| 3806 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3807 | error = register_filesystem(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3808 | if (error) { |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 3809 | pr_err("Could not register tmpfs\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3810 | goto out2; |
| 3811 | } |
Greg Kroah-Hartman | 95dc112 | 2005-06-20 21:15:16 -0700 | [diff] [blame] | 3812 | |
Al Viro | ca4e051 | 2013-08-31 12:57:10 -0400 | [diff] [blame] | 3813 | shm_mnt = kern_mount(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3814 | if (IS_ERR(shm_mnt)) { |
| 3815 | error = PTR_ERR(shm_mnt); |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 3816 | pr_err("Could not kern_mount tmpfs\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3817 | goto out1; |
| 3818 | } |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3819 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 3820 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3821 | if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) |
| 3822 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; |
| 3823 | else |
| 3824 | shmem_huge = 0; /* just in case it was patched */ |
| 3825 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3826 | return 0; |
| 3827 | |
| 3828 | out1: |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3829 | unregister_filesystem(&shmem_fs_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3830 | out2: |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3831 | shmem_destroy_inodecache(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3832 | out3: |
| 3833 | shm_mnt = ERR_PTR(error); |
| 3834 | return error; |
| 3835 | } |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3836 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 3837 | #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3838 | static ssize_t shmem_enabled_show(struct kobject *kobj, |
| 3839 | struct kobj_attribute *attr, char *buf) |
| 3840 | { |
| 3841 | int values[] = { |
| 3842 | SHMEM_HUGE_ALWAYS, |
| 3843 | SHMEM_HUGE_WITHIN_SIZE, |
| 3844 | SHMEM_HUGE_ADVISE, |
| 3845 | SHMEM_HUGE_NEVER, |
| 3846 | SHMEM_HUGE_DENY, |
| 3847 | SHMEM_HUGE_FORCE, |
| 3848 | }; |
| 3849 | int i, count; |
| 3850 | |
| 3851 | for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { |
| 3852 | const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; |
| 3853 | |
| 3854 | count += sprintf(buf + count, fmt, |
| 3855 | shmem_format_huge(values[i])); |
| 3856 | } |
| 3857 | buf[count - 1] = '\n'; |
| 3858 | return count; |
| 3859 | } |
| 3860 | |
| 3861 | static ssize_t shmem_enabled_store(struct kobject *kobj, |
| 3862 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 3863 | { |
| 3864 | char tmp[16]; |
| 3865 | int huge; |
| 3866 | |
| 3867 | if (count + 1 > sizeof(tmp)) |
| 3868 | return -EINVAL; |
| 3869 | memcpy(tmp, buf, count); |
| 3870 | tmp[count] = '\0'; |
| 3871 | if (count && tmp[count - 1] == '\n') |
| 3872 | tmp[count - 1] = '\0'; |
| 3873 | |
| 3874 | huge = shmem_parse_huge(tmp); |
| 3875 | if (huge == -EINVAL) |
| 3876 | return -EINVAL; |
| 3877 | if (!has_transparent_hugepage() && |
| 3878 | huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) |
| 3879 | return -EINVAL; |
| 3880 | |
| 3881 | shmem_huge = huge; |
| 3882 | if (shmem_huge < SHMEM_HUGE_DENY) |
| 3883 | SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; |
| 3884 | return count; |
| 3885 | } |
| 3886 | |
| 3887 | struct kobj_attribute shmem_enabled_attr = |
| 3888 | __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); |
Arnd Bergmann | 3b33719 | 2016-08-10 16:27:44 -0700 | [diff] [blame] | 3889 | #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 3890 | |
Arnd Bergmann | 3b33719 | 2016-08-10 16:27:44 -0700 | [diff] [blame] | 3891 | #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 3892 | bool shmem_huge_enabled(struct vm_area_struct *vma) |
| 3893 | { |
| 3894 | struct inode *inode = file_inode(vma->vm_file); |
| 3895 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); |
| 3896 | loff_t i_size; |
| 3897 | pgoff_t off; |
| 3898 | |
| 3899 | if (shmem_huge == SHMEM_HUGE_FORCE) |
| 3900 | return true; |
| 3901 | if (shmem_huge == SHMEM_HUGE_DENY) |
| 3902 | return false; |
| 3903 | switch (sbinfo->huge) { |
| 3904 | case SHMEM_HUGE_NEVER: |
| 3905 | return false; |
| 3906 | case SHMEM_HUGE_ALWAYS: |
| 3907 | return true; |
| 3908 | case SHMEM_HUGE_WITHIN_SIZE: |
| 3909 | off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); |
| 3910 | i_size = round_up(i_size_read(inode), PAGE_SIZE); |
| 3911 | if (i_size >= HPAGE_PMD_SIZE && |
| 3912 | i_size >> PAGE_SHIFT >= off) |
| 3913 | return true; |
| 3914 | case SHMEM_HUGE_ADVISE: |
| 3915 | /* TODO: implement fadvise() hints */ |
| 3916 | return (vma->vm_flags & VM_HUGEPAGE); |
| 3917 | default: |
| 3918 | VM_BUG_ON(1); |
| 3919 | return false; |
| 3920 | } |
| 3921 | } |
Arnd Bergmann | 3b33719 | 2016-08-10 16:27:44 -0700 | [diff] [blame] | 3922 | #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ |
Kirill A. Shutemov | 5a6e75f | 2016-07-26 15:26:13 -0700 | [diff] [blame] | 3923 | |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3924 | #else /* !CONFIG_SHMEM */ |
| 3925 | |
| 3926 | /* |
| 3927 | * tiny-shmem: simple shmemfs and tmpfs using ramfs code |
| 3928 | * |
| 3929 | * This is intended for small system where the benefits of the full |
| 3930 | * shmem code (swap-backed and resource-limited) are outweighed by |
| 3931 | * their complexity. On systems without swap this code should be |
| 3932 | * effectively equivalent, but much lighter weight. |
| 3933 | */ |
| 3934 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3935 | static struct file_system_type shmem_fs_type = { |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3936 | .name = "tmpfs", |
Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 3937 | .mount = ramfs_mount, |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3938 | .kill_sb = kill_litter_super, |
Eric W. Biederman | 2b8576c | 2013-01-25 16:32:10 -0800 | [diff] [blame] | 3939 | .fs_flags = FS_USERNS_MOUNT, |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3940 | }; |
| 3941 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3942 | int __init shmem_init(void) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3943 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3944 | BUG_ON(register_filesystem(&shmem_fs_type) != 0); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3945 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3946 | shm_mnt = kern_mount(&shmem_fs_type); |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3947 | BUG_ON(IS_ERR(shm_mnt)); |
| 3948 | |
| 3949 | return 0; |
| 3950 | } |
| 3951 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3952 | int shmem_unuse(swp_entry_t swap, struct page *page) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3953 | { |
| 3954 | return 0; |
| 3955 | } |
| 3956 | |
Hugh Dickins | 3f96b79 | 2009-09-21 17:03:37 -0700 | [diff] [blame] | 3957 | int shmem_lock(struct file *file, int lock, struct user_struct *user) |
| 3958 | { |
| 3959 | return 0; |
| 3960 | } |
| 3961 | |
Hugh Dickins | 2451326 | 2012-01-20 14:34:21 -0800 | [diff] [blame] | 3962 | void shmem_unlock_mapping(struct address_space *mapping) |
| 3963 | { |
| 3964 | } |
| 3965 | |
Hugh Dickins | c01d5b3 | 2016-07-26 15:26:15 -0700 | [diff] [blame] | 3966 | #ifdef CONFIG_MMU |
| 3967 | unsigned long shmem_get_unmapped_area(struct file *file, |
| 3968 | unsigned long addr, unsigned long len, |
| 3969 | unsigned long pgoff, unsigned long flags) |
| 3970 | { |
| 3971 | return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); |
| 3972 | } |
| 3973 | #endif |
| 3974 | |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3975 | void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 3976 | { |
Hugh Dickins | 41ffe5d | 2011-08-03 16:21:21 -0700 | [diff] [blame] | 3977 | truncate_inode_pages_range(inode->i_mapping, lstart, lend); |
Hugh Dickins | 94c1e62 | 2011-06-27 16:18:03 -0700 | [diff] [blame] | 3978 | } |
| 3979 | EXPORT_SYMBOL_GPL(shmem_truncate_range); |
| 3980 | |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 3981 | #define shmem_vm_ops generic_file_vm_ops |
| 3982 | #define shmem_file_operations ramfs_file_operations |
Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 3983 | #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) |
Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 3984 | #define shmem_acct_size(flags, size) 0 |
| 3985 | #define shmem_unacct_size(flags, size) do {} while (0) |
Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 3986 | |
| 3987 | #endif /* CONFIG_SHMEM */ |
| 3988 | |
| 3989 | /* common code */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3990 | |
Rasmus Villemoes | 19938e3 | 2016-10-07 17:01:01 -0700 | [diff] [blame] | 3991 | static const struct dentry_operations anon_ops = { |
Al Viro | 118b230 | 2013-08-24 12:08:17 -0400 | [diff] [blame] | 3992 | .d_dname = simple_dname |
Al Viro | 3451538 | 2013-02-14 22:38:02 -0500 | [diff] [blame] | 3993 | }; |
| 3994 | |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 3995 | static struct file *__shmem_file_setup(const char *name, loff_t size, |
| 3996 | unsigned long flags, unsigned int i_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3997 | { |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 3998 | struct file *res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3999 | struct inode *inode; |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 4000 | struct path path; |
Al Viro | 3451538 | 2013-02-14 22:38:02 -0500 | [diff] [blame] | 4001 | struct super_block *sb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4002 | struct qstr this; |
| 4003 | |
| 4004 | if (IS_ERR(shm_mnt)) |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4005 | return ERR_CAST(shm_mnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4006 | |
Hugh Dickins | 285b2c4 | 2011-08-03 16:21:20 -0700 | [diff] [blame] | 4007 | if (size < 0 || size > MAX_LFS_FILESIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4008 | return ERR_PTR(-EINVAL); |
| 4009 | |
| 4010 | if (shmem_acct_size(flags, size)) |
| 4011 | return ERR_PTR(-ENOMEM); |
| 4012 | |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4013 | res = ERR_PTR(-ENOMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4014 | this.name = name; |
| 4015 | this.len = strlen(name); |
| 4016 | this.hash = 0; /* will go */ |
Al Viro | 3451538 | 2013-02-14 22:38:02 -0500 | [diff] [blame] | 4017 | sb = shm_mnt->mnt_sb; |
Konstantin Khlebnikov | 66ee4b8 | 2014-08-06 16:06:32 -0700 | [diff] [blame] | 4018 | path.mnt = mntget(shm_mnt); |
Al Viro | 3451538 | 2013-02-14 22:38:02 -0500 | [diff] [blame] | 4019 | path.dentry = d_alloc_pseudo(sb, &this); |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 4020 | if (!path.dentry) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4021 | goto put_memory; |
Al Viro | 3451538 | 2013-02-14 22:38:02 -0500 | [diff] [blame] | 4022 | d_set_d_op(path.dentry, &anon_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4023 | |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4024 | res = ERR_PTR(-ENOSPC); |
Al Viro | 3451538 | 2013-02-14 22:38:02 -0500 | [diff] [blame] | 4025 | inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4026 | if (!inode) |
Konstantin Khlebnikov | 66ee4b8 | 2014-08-06 16:06:32 -0700 | [diff] [blame] | 4027 | goto put_memory; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4028 | |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 4029 | inode->i_flags |= i_flags; |
Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 4030 | d_instantiate(path.dentry, inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4031 | inode->i_size = size; |
Miklos Szeredi | 6d6b77f | 2011-10-28 14:13:28 +0200 | [diff] [blame] | 4032 | clear_nlink(inode); /* It is unlinked */ |
Al Viro | 26567cd | 2013-03-01 20:22:53 -0500 | [diff] [blame] | 4033 | res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); |
| 4034 | if (IS_ERR(res)) |
Konstantin Khlebnikov | 66ee4b8 | 2014-08-06 16:06:32 -0700 | [diff] [blame] | 4035 | goto put_path; |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 4036 | |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4037 | res = alloc_file(&path, FMODE_WRITE | FMODE_READ, |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 4038 | &shmem_file_operations); |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4039 | if (IS_ERR(res)) |
Konstantin Khlebnikov | 66ee4b8 | 2014-08-06 16:06:32 -0700 | [diff] [blame] | 4040 | goto put_path; |
Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 4041 | |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4042 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4043 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4044 | put_memory: |
| 4045 | shmem_unacct_size(flags, size); |
Konstantin Khlebnikov | 66ee4b8 | 2014-08-06 16:06:32 -0700 | [diff] [blame] | 4046 | put_path: |
| 4047 | path_put(&path); |
Al Viro | 6b4d0b2 | 2013-02-14 21:37:26 -0500 | [diff] [blame] | 4048 | return res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4049 | } |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 4050 | |
| 4051 | /** |
| 4052 | * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be |
| 4053 | * kernel internal. There will be NO LSM permission checks against the |
| 4054 | * underlying inode. So users of this interface must do LSM checks at a |
Stephen Smalley | e1832f2 | 2015-08-06 15:46:55 -0700 | [diff] [blame] | 4055 | * higher layer. The users are the big_key and shm implementations. LSM |
| 4056 | * checks are provided at the key or shm level rather than the inode. |
Eric Paris | c727709 | 2013-12-02 11:24:19 +0000 | [diff] [blame] | 4057 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
| 4058 | * @size: size to be set for the file |
| 4059 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
| 4060 | */ |
| 4061 | struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) |
| 4062 | { |
| 4063 | return __shmem_file_setup(name, size, flags, S_PRIVATE); |
| 4064 | } |
| 4065 | |
| 4066 | /** |
| 4067 | * shmem_file_setup - get an unlinked file living in tmpfs |
| 4068 | * @name: name for dentry (to be seen in /proc/<pid>/maps |
| 4069 | * @size: size to be set for the file |
| 4070 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size |
| 4071 | */ |
| 4072 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) |
| 4073 | { |
| 4074 | return __shmem_file_setup(name, size, flags, 0); |
| 4075 | } |
Keith Packard | 395e0dd | 2008-06-20 00:08:06 -0700 | [diff] [blame] | 4076 | EXPORT_SYMBOL_GPL(shmem_file_setup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4077 | |
Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 4078 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4079 | * shmem_zero_setup - setup a shared anonymous mapping |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4080 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff |
| 4081 | */ |
| 4082 | int shmem_zero_setup(struct vm_area_struct *vma) |
| 4083 | { |
| 4084 | struct file *file; |
| 4085 | loff_t size = vma->vm_end - vma->vm_start; |
| 4086 | |
Hugh Dickins | 66fc130 | 2015-06-14 09:48:09 -0700 | [diff] [blame] | 4087 | /* |
| 4088 | * Cloning a new file under mmap_sem leads to a lock ordering conflict |
| 4089 | * between XFS directory reading and selinux: since this file is only |
| 4090 | * accessible to the user through its mapping, use S_PRIVATE flag to |
| 4091 | * bypass file security, in the same way as shmem_kernel_file_setup(). |
| 4092 | */ |
| 4093 | file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4094 | if (IS_ERR(file)) |
| 4095 | return PTR_ERR(file); |
| 4096 | |
| 4097 | if (vma->vm_file) |
| 4098 | fput(vma->vm_file); |
| 4099 | vma->vm_file = file; |
| 4100 | vma->vm_ops = &shmem_vm_ops; |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 4101 | |
Kirill A. Shutemov | e496cf3 | 2016-07-26 15:26:35 -0700 | [diff] [blame] | 4102 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && |
Kirill A. Shutemov | f3f0e1d | 2016-07-26 15:26:32 -0700 | [diff] [blame] | 4103 | ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < |
| 4104 | (vma->vm_end & HPAGE_PMD_MASK)) { |
| 4105 | khugepaged_enter(vma, vma->vm_flags); |
| 4106 | } |
| 4107 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4108 | return 0; |
| 4109 | } |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 4110 | |
| 4111 | /** |
| 4112 | * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. |
| 4113 | * @mapping: the page's address_space |
| 4114 | * @index: the page index |
| 4115 | * @gfp: the page allocator flags to use if allocating |
| 4116 | * |
| 4117 | * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", |
| 4118 | * with any new page allocations done using the specified allocation flags. |
| 4119 | * But read_cache_page_gfp() uses the ->readpage() method: which does not |
| 4120 | * suit tmpfs, since it may have pages in swapcache, and needs to find those |
| 4121 | * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. |
| 4122 | * |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 4123 | * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in |
| 4124 | * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 4125 | */ |
| 4126 | struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 4127 | pgoff_t index, gfp_t gfp) |
| 4128 | { |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 4129 | #ifdef CONFIG_SHMEM |
| 4130 | struct inode *inode = mapping->host; |
Hugh Dickins | 9276aad | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 4131 | struct page *page; |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 4132 | int error; |
| 4133 | |
| 4134 | BUG_ON(mapping->a_ops != &shmem_aops); |
Andres Lagar-Cavilla | 9e18eb2 | 2016-05-19 17:12:47 -0700 | [diff] [blame] | 4135 | error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, |
| 4136 | gfp, NULL, NULL); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 4137 | if (error) |
| 4138 | page = ERR_PTR(error); |
| 4139 | else |
| 4140 | unlock_page(page); |
| 4141 | return page; |
| 4142 | #else |
| 4143 | /* |
| 4144 | * The tiny !SHMEM case uses ramfs without swap |
| 4145 | */ |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 4146 | return read_cache_page_gfp(mapping, index, gfp); |
Hugh Dickins | 68da9f0 | 2011-07-25 17:12:34 -0700 | [diff] [blame] | 4147 | #endif |
Hugh Dickins | d9d90e5 | 2011-06-27 16:18:04 -0700 | [diff] [blame] | 4148 | } |
| 4149 | EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); |