| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Resizable virtual memory filesystem for Linux. | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2000 Linus Torvalds. | 
|  | 5 | *		 2000 Transmeta Corp. | 
|  | 6 | *		 2000-2001 Christoph Rohland | 
|  | 7 | *		 2000-2001 SAP AG | 
|  | 8 | *		 2002 Red Hat Inc. | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 9 | * Copyright (C) 2002-2005 Hugh Dickins. | 
|  | 10 | * Copyright (C) 2002-2005 VERITAS Software Corporation. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * Copyright (C) 2004 Andi Kleen, SuSE Labs | 
|  | 12 | * | 
|  | 13 | * Extended attribute support for tmpfs: | 
|  | 14 | * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> | 
|  | 15 | * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> | 
|  | 16 | * | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 17 | * tiny-shmem: | 
|  | 18 | * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> | 
|  | 19 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | * This file is released under the GPL. | 
|  | 21 | */ | 
|  | 22 |  | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 23 | #include <linux/fs.h> | 
|  | 24 | #include <linux/init.h> | 
|  | 25 | #include <linux/vfs.h> | 
|  | 26 | #include <linux/mount.h> | 
| Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 27 | #include <linux/pagemap.h> | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 28 | #include <linux/file.h> | 
|  | 29 | #include <linux/mm.h> | 
|  | 30 | #include <linux/module.h> | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 31 | #include <linux/percpu_counter.h> | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 32 | #include <linux/swap.h> | 
|  | 33 |  | 
|  | 34 | static struct vfsmount *shm_mnt; | 
|  | 35 |  | 
|  | 36 | #ifdef CONFIG_SHMEM | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | /* | 
|  | 38 | * This virtual memory filesystem is heavily based on the ramfs. It | 
|  | 39 | * extends ramfs by the ability to use swap and honor resource limits | 
|  | 40 | * which makes it a completely usable filesystem. | 
|  | 41 | */ | 
|  | 42 |  | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 43 | #include <linux/xattr.h> | 
| Christoph Hellwig | a569425 | 2007-07-17 04:04:28 -0700 | [diff] [blame] | 44 | #include <linux/exportfs.h> | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 45 | #include <linux/posix_acl.h> | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 46 | #include <linux/generic_acl.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <linux/mman.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <linux/string.h> | 
|  | 49 | #include <linux/slab.h> | 
|  | 50 | #include <linux/backing-dev.h> | 
|  | 51 | #include <linux/shmem_fs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <linux/writeback.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | #include <linux/blkdev.h> | 
|  | 54 | #include <linux/security.h> | 
|  | 55 | #include <linux/swapops.h> | 
|  | 56 | #include <linux/mempolicy.h> | 
|  | 57 | #include <linux/namei.h> | 
| Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 58 | #include <linux/ctype.h> | 
| Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 59 | #include <linux/migrate.h> | 
| Christoph Lameter | c1f60a5 | 2006-09-25 23:31:11 -0700 | [diff] [blame] | 60 | #include <linux/highmem.h> | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 61 | #include <linux/seq_file.h> | 
| Mimi Zohar | 9256292 | 2008-10-07 14:00:12 -0400 | [diff] [blame] | 62 | #include <linux/magic.h> | 
| Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 63 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | #include <asm/uaccess.h> | 
|  | 65 | #include <asm/div64.h> | 
|  | 66 | #include <asm/pgtable.h> | 
|  | 67 |  | 
| Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 68 | /* | 
|  | 69 | * The maximum size of a shmem/tmpfs file is limited by the maximum size of | 
|  | 70 | * its triple-indirect swap vector - see illustration at shmem_swp_entry(). | 
|  | 71 | * | 
|  | 72 | * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, | 
|  | 73 | * but one eighth of that on a 64-bit kernel.  With 8kB page size, maximum | 
|  | 74 | * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, | 
|  | 75 | * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. | 
|  | 76 | * | 
|  | 77 | * We use / and * instead of shifts in the definitions below, so that the swap | 
|  | 78 | * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. | 
|  | 79 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) | 
| Yuri Tikhonov | 61609d0 | 2009-04-13 14:40:11 -0700 | [diff] [blame] | 81 | #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) | 
| Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 82 |  | 
|  | 83 | #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) | 
|  | 84 | #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) | 
|  | 85 |  | 
|  | 86 | #define SHMEM_MAX_BYTES  min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) | 
|  | 87 | #define SHMEM_MAX_INDEX  ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) | 
|  | 88 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) | 
|  | 91 |  | 
|  | 92 | /* info->flags needs VM_flags to handle pagein/truncate races efficiently */ | 
|  | 93 | #define SHMEM_PAGEIN	 VM_READ | 
|  | 94 | #define SHMEM_TRUNCATE	 VM_WRITE | 
|  | 95 |  | 
|  | 96 | /* Definition to limit shmem_truncate's steps between cond_rescheds */ | 
|  | 97 | #define LATENCY_LIMIT	 64 | 
|  | 98 |  | 
|  | 99 | /* Pretend that each entry is of this size in directory's i_size */ | 
|  | 100 | #define BOGO_DIRENT_SIZE 20 | 
|  | 101 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ | 
|  | 103 | enum sgp_type { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | SGP_READ,	/* don't exceed i_size, don't allocate page */ | 
|  | 105 | SGP_CACHE,	/* don't exceed i_size, may allocate page */ | 
| Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 106 | SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | SGP_WRITE,	/* may exceed i_size, may allocate page */ | 
|  | 108 | }; | 
|  | 109 |  | 
| Andrew Morton | b76db73 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 110 | #ifdef CONFIG_TMPFS | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 111 | static unsigned long shmem_default_max_blocks(void) | 
|  | 112 | { | 
|  | 113 | return totalram_pages / 2; | 
|  | 114 | } | 
|  | 115 |  | 
|  | 116 | static unsigned long shmem_default_max_inodes(void) | 
|  | 117 | { | 
|  | 118 | return min(totalram_pages - totalhigh_pages, totalram_pages / 2); | 
|  | 119 | } | 
| Andrew Morton | b76db73 | 2008-02-08 04:21:49 -0800 | [diff] [blame] | 120 | #endif | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 121 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | static int shmem_getpage(struct inode *inode, unsigned long idx, | 
|  | 123 | struct page **pagep, enum sgp_type sgp, int *type); | 
|  | 124 |  | 
| Al Viro | 6daa0e2 | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 125 | static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { | 
|  | 127 | /* | 
|  | 128 | * The above definition of ENTRIES_PER_PAGE, and the use of | 
|  | 129 | * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: | 
|  | 130 | * might be reconsidered if it ever diverges from PAGE_SIZE. | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 131 | * | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 132 | * Mobility flags are masked out as swap vectors cannot move | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | */ | 
| Mel Gorman | e12ba74 | 2007-10-16 01:25:52 -0700 | [diff] [blame] | 134 | return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 135 | PAGE_CACHE_SHIFT-PAGE_SHIFT); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } | 
|  | 137 |  | 
|  | 138 | static inline void shmem_dir_free(struct page *page) | 
|  | 139 | { | 
|  | 140 | __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | static struct page **shmem_dir_map(struct page *page) | 
|  | 144 | { | 
|  | 145 | return (struct page **)kmap_atomic(page, KM_USER0); | 
|  | 146 | } | 
|  | 147 |  | 
|  | 148 | static inline void shmem_dir_unmap(struct page **dir) | 
|  | 149 | { | 
|  | 150 | kunmap_atomic(dir, KM_USER0); | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | static swp_entry_t *shmem_swp_map(struct page *page) | 
|  | 154 | { | 
|  | 155 | return (swp_entry_t *)kmap_atomic(page, KM_USER1); | 
|  | 156 | } | 
|  | 157 |  | 
|  | 158 | static inline void shmem_swp_balance_unmap(void) | 
|  | 159 | { | 
|  | 160 | /* | 
|  | 161 | * When passing a pointer to an i_direct entry, to code which | 
|  | 162 | * also handles indirect entries and so will shmem_swp_unmap, | 
|  | 163 | * we must arrange for the preempt count to remain in balance. | 
|  | 164 | * What kmap_atomic of a lowmem page does depends on config | 
|  | 165 | * and architecture, so pretend to kmap_atomic some lowmem page. | 
|  | 166 | */ | 
|  | 167 | (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); | 
|  | 168 | } | 
|  | 169 |  | 
|  | 170 | static inline void shmem_swp_unmap(swp_entry_t *entry) | 
|  | 171 | { | 
|  | 172 | kunmap_atomic(entry, KM_USER1); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) | 
|  | 176 | { | 
|  | 177 | return sb->s_fs_info; | 
|  | 178 | } | 
|  | 179 |  | 
|  | 180 | /* | 
|  | 181 | * shmem_file_setup pre-accounts the whole fixed size of a VM object, | 
|  | 182 | * for shared memory and for shared anonymous (/dev/zero) mappings | 
|  | 183 | * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), | 
|  | 184 | * consistent with the pre-accounting of private mappings ... | 
|  | 185 | */ | 
|  | 186 | static inline int shmem_acct_size(unsigned long flags, loff_t size) | 
|  | 187 | { | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 188 | return (flags & VM_NORESERVE) ? | 
|  | 189 | 0 : security_vm_enough_memory_kern(VM_ACCT(size)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } | 
|  | 191 |  | 
|  | 192 | static inline void shmem_unacct_size(unsigned long flags, loff_t size) | 
|  | 193 | { | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 194 | if (!(flags & VM_NORESERVE)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | vm_unacct_memory(VM_ACCT(size)); | 
|  | 196 | } | 
|  | 197 |  | 
|  | 198 | /* | 
|  | 199 | * ... whereas tmpfs objects are accounted incrementally as | 
|  | 200 | * pages are allocated, in order to allow huge sparse files. | 
|  | 201 | * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, | 
|  | 202 | * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. | 
|  | 203 | */ | 
|  | 204 | static inline int shmem_acct_block(unsigned long flags) | 
|  | 205 | { | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 206 | return (flags & VM_NORESERVE) ? | 
|  | 207 | security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } | 
|  | 209 |  | 
|  | 210 | static inline void shmem_unacct_blocks(unsigned long flags, long pages) | 
|  | 211 | { | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 212 | if (flags & VM_NORESERVE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); | 
|  | 214 | } | 
|  | 215 |  | 
| Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 216 | static const struct super_operations shmem_ops; | 
| Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 217 | static const struct address_space_operations shmem_aops; | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 218 | static const struct file_operations shmem_file_operations; | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 219 | static const struct inode_operations shmem_inode_operations; | 
|  | 220 | static const struct inode_operations shmem_dir_inode_operations; | 
|  | 221 | static const struct inode_operations shmem_special_inode_operations; | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 222 | static const struct vm_operations_struct shmem_vm_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 |  | 
| Ravikiran G Thirumalai | 6c231b7 | 2005-09-06 15:17:45 -0700 | [diff] [blame] | 224 | static struct backing_dev_info shmem_backing_dev_info  __read_mostly = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | .ra_pages	= 0,	/* No readahead */ | 
| Rik van Riel | 4f98a2f | 2008-10-18 20:26:32 -0700 | [diff] [blame] | 226 | .capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | .unplug_io_fn	= default_unplug_io_fn, | 
|  | 228 | }; | 
|  | 229 |  | 
|  | 230 | static LIST_HEAD(shmem_swaplist); | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 231 | static DEFINE_MUTEX(shmem_swaplist_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 |  | 
|  | 233 | static void shmem_free_blocks(struct inode *inode, long pages) | 
|  | 234 | { | 
|  | 235 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 236 | if (sbinfo->max_blocks) { | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 237 | percpu_counter_add(&sbinfo->used_blocks, -pages); | 
|  | 238 | spin_lock(&inode->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | inode->i_blocks -= pages*BLOCKS_PER_PAGE; | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 240 | spin_unlock(&inode->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | } | 
|  | 242 | } | 
|  | 243 |  | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 244 | static int shmem_reserve_inode(struct super_block *sb) | 
|  | 245 | { | 
|  | 246 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 
|  | 247 | if (sbinfo->max_inodes) { | 
|  | 248 | spin_lock(&sbinfo->stat_lock); | 
|  | 249 | if (!sbinfo->free_inodes) { | 
|  | 250 | spin_unlock(&sbinfo->stat_lock); | 
|  | 251 | return -ENOSPC; | 
|  | 252 | } | 
|  | 253 | sbinfo->free_inodes--; | 
|  | 254 | spin_unlock(&sbinfo->stat_lock); | 
|  | 255 | } | 
|  | 256 | return 0; | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | static void shmem_free_inode(struct super_block *sb) | 
|  | 260 | { | 
|  | 261 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 
|  | 262 | if (sbinfo->max_inodes) { | 
|  | 263 | spin_lock(&sbinfo->stat_lock); | 
|  | 264 | sbinfo->free_inodes++; | 
|  | 265 | spin_unlock(&sbinfo->stat_lock); | 
|  | 266 | } | 
|  | 267 | } | 
|  | 268 |  | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 269 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | * shmem_recalc_inode - recalculate the size of an inode | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | * @inode: inode to recalc | 
|  | 272 | * | 
|  | 273 | * We have to calculate the free blocks since the mm can drop | 
|  | 274 | * undirtied hole pages behind our back. | 
|  | 275 | * | 
|  | 276 | * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped | 
|  | 277 | * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) | 
|  | 278 | * | 
|  | 279 | * It has to be called with the spinlock held. | 
|  | 280 | */ | 
|  | 281 | static void shmem_recalc_inode(struct inode *inode) | 
|  | 282 | { | 
|  | 283 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 284 | long freed; | 
|  | 285 |  | 
|  | 286 | freed = info->alloced - info->swapped - inode->i_mapping->nrpages; | 
|  | 287 | if (freed > 0) { | 
|  | 288 | info->alloced -= freed; | 
|  | 289 | shmem_unacct_blocks(info->flags, freed); | 
|  | 290 | shmem_free_blocks(inode, freed); | 
|  | 291 | } | 
|  | 292 | } | 
|  | 293 |  | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 294 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | * shmem_swp_entry - find the swap vector position in the info structure | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | * @info:  info structure for the inode | 
|  | 297 | * @index: index of the page to find | 
|  | 298 | * @page:  optional page to add to the structure. Has to be preset to | 
|  | 299 | *         all zeros | 
|  | 300 | * | 
|  | 301 | * If there is no space allocated yet it will return NULL when | 
|  | 302 | * page is NULL, else it will use the page for the needed block, | 
|  | 303 | * setting it to NULL on return to indicate that it has been used. | 
|  | 304 | * | 
|  | 305 | * The swap vector is organized the following way: | 
|  | 306 | * | 
|  | 307 | * There are SHMEM_NR_DIRECT entries directly stored in the | 
|  | 308 | * shmem_inode_info structure. So small files do not need an addional | 
|  | 309 | * allocation. | 
|  | 310 | * | 
|  | 311 | * For pages with index > SHMEM_NR_DIRECT there is the pointer | 
|  | 312 | * i_indirect which points to a page which holds in the first half | 
|  | 313 | * doubly indirect blocks, in the second half triple indirect blocks: | 
|  | 314 | * | 
|  | 315 | * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the | 
|  | 316 | * following layout (for SHMEM_NR_DIRECT == 16): | 
|  | 317 | * | 
|  | 318 | * i_indirect -> dir --> 16-19 | 
|  | 319 | * 	      |	     +-> 20-23 | 
|  | 320 | * 	      | | 
|  | 321 | * 	      +-->dir2 --> 24-27 | 
|  | 322 | * 	      |	       +-> 28-31 | 
|  | 323 | * 	      |	       +-> 32-35 | 
|  | 324 | * 	      |	       +-> 36-39 | 
|  | 325 | * 	      | | 
|  | 326 | * 	      +-->dir3 --> 40-43 | 
|  | 327 | * 	       	       +-> 44-47 | 
|  | 328 | * 	      	       +-> 48-51 | 
|  | 329 | * 	      	       +-> 52-55 | 
|  | 330 | */ | 
|  | 331 | static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) | 
|  | 332 | { | 
|  | 333 | unsigned long offset; | 
|  | 334 | struct page **dir; | 
|  | 335 | struct page *subdir; | 
|  | 336 |  | 
|  | 337 | if (index < SHMEM_NR_DIRECT) { | 
|  | 338 | shmem_swp_balance_unmap(); | 
|  | 339 | return info->i_direct+index; | 
|  | 340 | } | 
|  | 341 | if (!info->i_indirect) { | 
|  | 342 | if (page) { | 
|  | 343 | info->i_indirect = *page; | 
|  | 344 | *page = NULL; | 
|  | 345 | } | 
|  | 346 | return NULL;			/* need another page */ | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | index -= SHMEM_NR_DIRECT; | 
|  | 350 | offset = index % ENTRIES_PER_PAGE; | 
|  | 351 | index /= ENTRIES_PER_PAGE; | 
|  | 352 | dir = shmem_dir_map(info->i_indirect); | 
|  | 353 |  | 
|  | 354 | if (index >= ENTRIES_PER_PAGE/2) { | 
|  | 355 | index -= ENTRIES_PER_PAGE/2; | 
|  | 356 | dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; | 
|  | 357 | index %= ENTRIES_PER_PAGE; | 
|  | 358 | subdir = *dir; | 
|  | 359 | if (!subdir) { | 
|  | 360 | if (page) { | 
|  | 361 | *dir = *page; | 
|  | 362 | *page = NULL; | 
|  | 363 | } | 
|  | 364 | shmem_dir_unmap(dir); | 
|  | 365 | return NULL;		/* need another page */ | 
|  | 366 | } | 
|  | 367 | shmem_dir_unmap(dir); | 
|  | 368 | dir = shmem_dir_map(subdir); | 
|  | 369 | } | 
|  | 370 |  | 
|  | 371 | dir += index; | 
|  | 372 | subdir = *dir; | 
|  | 373 | if (!subdir) { | 
|  | 374 | if (!page || !(subdir = *page)) { | 
|  | 375 | shmem_dir_unmap(dir); | 
|  | 376 | return NULL;		/* need a page */ | 
|  | 377 | } | 
|  | 378 | *dir = subdir; | 
|  | 379 | *page = NULL; | 
|  | 380 | } | 
|  | 381 | shmem_dir_unmap(dir); | 
|  | 382 | return shmem_swp_map(subdir) + offset; | 
|  | 383 | } | 
|  | 384 |  | 
|  | 385 | static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) | 
|  | 386 | { | 
|  | 387 | long incdec = value? 1: -1; | 
|  | 388 |  | 
|  | 389 | entry->val = value; | 
|  | 390 | info->swapped += incdec; | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 391 | if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { | 
|  | 392 | struct page *page = kmap_atomic_to_page(entry); | 
|  | 393 | set_page_private(page, page_private(page) + incdec); | 
|  | 394 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | } | 
|  | 396 |  | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 397 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | * shmem_swp_alloc - get the position of the swap entry for the page. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | * @info:	info structure for the inode | 
|  | 400 | * @index:	index of the page to find | 
|  | 401 | * @sgp:	check and recheck i_size? skip allocation? | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 402 | * | 
|  | 403 | * If the entry does not exist, allocate it. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | */ | 
|  | 405 | static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) | 
|  | 406 | { | 
|  | 407 | struct inode *inode = &info->vfs_inode; | 
|  | 408 | struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); | 
|  | 409 | struct page *page = NULL; | 
|  | 410 | swp_entry_t *entry; | 
|  | 411 |  | 
|  | 412 | if (sgp != SGP_WRITE && | 
|  | 413 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 
|  | 414 | return ERR_PTR(-EINVAL); | 
|  | 415 |  | 
|  | 416 | while (!(entry = shmem_swp_entry(info, index, &page))) { | 
|  | 417 | if (sgp == SGP_READ) | 
|  | 418 | return shmem_swp_map(ZERO_PAGE(0)); | 
|  | 419 | /* | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 420 | * Test used_blocks against 1 less max_blocks, since we have 1 data | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | * page (and perhaps indirect index pages) yet to allocate: | 
|  | 422 | * a waste to allocate index if we cannot allocate data. | 
|  | 423 | */ | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 424 | if (sbinfo->max_blocks) { | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 425 | if (percpu_counter_compare(&sbinfo->used_blocks, (sbinfo->max_blocks - 1)) > 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | return ERR_PTR(-ENOSPC); | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 427 | percpu_counter_inc(&sbinfo->used_blocks); | 
|  | 428 | spin_lock(&inode->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | inode->i_blocks += BLOCKS_PER_PAGE; | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 430 | spin_unlock(&inode->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | } | 
|  | 432 |  | 
|  | 433 | spin_unlock(&info->lock); | 
| Mel Gorman | 769848c | 2007-07-17 04:03:05 -0700 | [diff] [blame] | 434 | page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | spin_lock(&info->lock); | 
|  | 436 |  | 
|  | 437 | if (!page) { | 
|  | 438 | shmem_free_blocks(inode, 1); | 
|  | 439 | return ERR_PTR(-ENOMEM); | 
|  | 440 | } | 
|  | 441 | if (sgp != SGP_WRITE && | 
|  | 442 | ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { | 
|  | 443 | entry = ERR_PTR(-EINVAL); | 
|  | 444 | break; | 
|  | 445 | } | 
|  | 446 | if (info->next_index <= index) | 
|  | 447 | info->next_index = index + 1; | 
|  | 448 | } | 
|  | 449 | if (page) { | 
|  | 450 | /* another task gave its page, or truncated the file */ | 
|  | 451 | shmem_free_blocks(inode, 1); | 
|  | 452 | shmem_dir_free(page); | 
|  | 453 | } | 
|  | 454 | if (info->next_index <= index && !IS_ERR(entry)) | 
|  | 455 | info->next_index = index + 1; | 
|  | 456 | return entry; | 
|  | 457 | } | 
|  | 458 |  | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 459 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | * shmem_free_swp - free some swap entries in a directory | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 461 | * @dir:        pointer to the directory | 
|  | 462 | * @edir:       pointer after last entry of the directory | 
|  | 463 | * @punch_lock: pointer to spinlock when needed for the holepunch case | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | */ | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 465 | static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, | 
|  | 466 | spinlock_t *punch_lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | { | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 468 | spinlock_t *punch_unlock = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | swp_entry_t *ptr; | 
|  | 470 | int freed = 0; | 
|  | 471 |  | 
|  | 472 | for (ptr = dir; ptr < edir; ptr++) { | 
|  | 473 | if (ptr->val) { | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 474 | if (unlikely(punch_lock)) { | 
|  | 475 | punch_unlock = punch_lock; | 
|  | 476 | punch_lock = NULL; | 
|  | 477 | spin_lock(punch_unlock); | 
|  | 478 | if (!ptr->val) | 
|  | 479 | continue; | 
|  | 480 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 481 | free_swap_and_cache(*ptr); | 
|  | 482 | *ptr = (swp_entry_t){0}; | 
|  | 483 | freed++; | 
|  | 484 | } | 
|  | 485 | } | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 486 | if (punch_unlock) | 
|  | 487 | spin_unlock(punch_unlock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | return freed; | 
|  | 489 | } | 
|  | 490 |  | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 491 | static int shmem_map_and_free_swp(struct page *subdir, int offset, | 
|  | 492 | int limit, struct page ***dir, spinlock_t *punch_lock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | { | 
|  | 494 | swp_entry_t *ptr; | 
|  | 495 | int freed = 0; | 
|  | 496 |  | 
|  | 497 | ptr = shmem_swp_map(subdir); | 
|  | 498 | for (; offset < limit; offset += LATENCY_LIMIT) { | 
|  | 499 | int size = limit - offset; | 
|  | 500 | if (size > LATENCY_LIMIT) | 
|  | 501 | size = LATENCY_LIMIT; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 502 | freed += shmem_free_swp(ptr+offset, ptr+offset+size, | 
|  | 503 | punch_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | if (need_resched()) { | 
|  | 505 | shmem_swp_unmap(ptr); | 
|  | 506 | if (*dir) { | 
|  | 507 | shmem_dir_unmap(*dir); | 
|  | 508 | *dir = NULL; | 
|  | 509 | } | 
|  | 510 | cond_resched(); | 
|  | 511 | ptr = shmem_swp_map(subdir); | 
|  | 512 | } | 
|  | 513 | } | 
|  | 514 | shmem_swp_unmap(ptr); | 
|  | 515 | return freed; | 
|  | 516 | } | 
|  | 517 |  | 
|  | 518 | static void shmem_free_pages(struct list_head *next) | 
|  | 519 | { | 
|  | 520 | struct page *page; | 
|  | 521 | int freed = 0; | 
|  | 522 |  | 
|  | 523 | do { | 
|  | 524 | page = container_of(next, struct page, lru); | 
|  | 525 | next = next->next; | 
|  | 526 | shmem_dir_free(page); | 
|  | 527 | freed++; | 
|  | 528 | if (freed >= LATENCY_LIMIT) { | 
|  | 529 | cond_resched(); | 
|  | 530 | freed = 0; | 
|  | 531 | } | 
|  | 532 | } while (next); | 
|  | 533 | } | 
|  | 534 |  | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 535 | static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | { | 
|  | 537 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 538 | unsigned long idx; | 
|  | 539 | unsigned long size; | 
|  | 540 | unsigned long limit; | 
|  | 541 | unsigned long stage; | 
|  | 542 | unsigned long diroff; | 
|  | 543 | struct page **dir; | 
|  | 544 | struct page *topdir; | 
|  | 545 | struct page *middir; | 
|  | 546 | struct page *subdir; | 
|  | 547 | swp_entry_t *ptr; | 
|  | 548 | LIST_HEAD(pages_to_free); | 
|  | 549 | long nr_pages_to_free = 0; | 
|  | 550 | long nr_swaps_freed = 0; | 
|  | 551 | int offset; | 
|  | 552 | int freed; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 553 | int punch_hole; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 554 | spinlock_t *needs_lock; | 
|  | 555 | spinlock_t *punch_lock; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 556 | unsigned long upper_limit; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 557 |  | 
|  | 558 | inode->i_ctime = inode->i_mtime = CURRENT_TIME; | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 559 | idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | if (idx >= info->next_index) | 
|  | 561 | return; | 
|  | 562 |  | 
|  | 563 | spin_lock(&info->lock); | 
|  | 564 | info->flags |= SHMEM_TRUNCATE; | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 565 | if (likely(end == (loff_t) -1)) { | 
|  | 566 | limit = info->next_index; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 567 | upper_limit = SHMEM_MAX_INDEX; | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 568 | info->next_index = idx; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 569 | needs_lock = NULL; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 570 | punch_hole = 0; | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 571 | } else { | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 572 | if (end + 1 >= inode->i_size) {	/* we may free a little more */ | 
|  | 573 | limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> | 
|  | 574 | PAGE_CACHE_SHIFT; | 
|  | 575 | upper_limit = SHMEM_MAX_INDEX; | 
|  | 576 | } else { | 
|  | 577 | limit = (end + 1) >> PAGE_CACHE_SHIFT; | 
|  | 578 | upper_limit = limit; | 
|  | 579 | } | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 580 | needs_lock = &info->lock; | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 581 | punch_hole = 1; | 
|  | 582 | } | 
|  | 583 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | topdir = info->i_indirect; | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 585 | if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | info->i_indirect = NULL; | 
|  | 587 | nr_pages_to_free++; | 
|  | 588 | list_add(&topdir->lru, &pages_to_free); | 
|  | 589 | } | 
|  | 590 | spin_unlock(&info->lock); | 
|  | 591 |  | 
|  | 592 | if (info->swapped && idx < SHMEM_NR_DIRECT) { | 
|  | 593 | ptr = info->i_direct; | 
|  | 594 | size = limit; | 
|  | 595 | if (size > SHMEM_NR_DIRECT) | 
|  | 596 | size = SHMEM_NR_DIRECT; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 597 | nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | } | 
| Badari Pulavarty | 92a3d03 | 2006-12-22 01:06:23 -0800 | [diff] [blame] | 599 |  | 
|  | 600 | /* | 
|  | 601 | * If there are no indirect blocks or we are punching a hole | 
|  | 602 | * below indirect blocks, nothing to be done. | 
|  | 603 | */ | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 604 | if (!topdir || limit <= SHMEM_NR_DIRECT) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | goto done2; | 
|  | 606 |  | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 607 | /* | 
|  | 608 | * The truncation case has already dropped info->lock, and we're safe | 
|  | 609 | * because i_size and next_index have already been lowered, preventing | 
|  | 610 | * access beyond.  But in the punch_hole case, we still need to take | 
|  | 611 | * the lock when updating the swap directory, because there might be | 
|  | 612 | * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or | 
|  | 613 | * shmem_writepage.  However, whenever we find we can remove a whole | 
|  | 614 | * directory page (not at the misaligned start or end of the range), | 
|  | 615 | * we first NULLify its pointer in the level above, and then have no | 
|  | 616 | * need to take the lock when updating its contents: needs_lock and | 
|  | 617 | * punch_lock (either pointing to info->lock or NULL) manage this. | 
|  | 618 | */ | 
|  | 619 |  | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 620 | upper_limit -= SHMEM_NR_DIRECT; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | limit -= SHMEM_NR_DIRECT; | 
|  | 622 | idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; | 
|  | 623 | offset = idx % ENTRIES_PER_PAGE; | 
|  | 624 | idx -= offset; | 
|  | 625 |  | 
|  | 626 | dir = shmem_dir_map(topdir); | 
|  | 627 | stage = ENTRIES_PER_PAGEPAGE/2; | 
|  | 628 | if (idx < ENTRIES_PER_PAGEPAGE/2) { | 
|  | 629 | middir = topdir; | 
|  | 630 | diroff = idx/ENTRIES_PER_PAGE; | 
|  | 631 | } else { | 
|  | 632 | dir += ENTRIES_PER_PAGE/2; | 
|  | 633 | dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; | 
|  | 634 | while (stage <= idx) | 
|  | 635 | stage += ENTRIES_PER_PAGEPAGE; | 
|  | 636 | middir = *dir; | 
|  | 637 | if (*dir) { | 
|  | 638 | diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % | 
|  | 639 | ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 640 | if (!diroff && !offset && upper_limit >= stage) { | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 641 | if (needs_lock) { | 
|  | 642 | spin_lock(needs_lock); | 
|  | 643 | *dir = NULL; | 
|  | 644 | spin_unlock(needs_lock); | 
|  | 645 | needs_lock = NULL; | 
|  | 646 | } else | 
|  | 647 | *dir = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | nr_pages_to_free++; | 
|  | 649 | list_add(&middir->lru, &pages_to_free); | 
|  | 650 | } | 
|  | 651 | shmem_dir_unmap(dir); | 
|  | 652 | dir = shmem_dir_map(middir); | 
|  | 653 | } else { | 
|  | 654 | diroff = 0; | 
|  | 655 | offset = 0; | 
|  | 656 | idx = stage; | 
|  | 657 | } | 
|  | 658 | } | 
|  | 659 |  | 
|  | 660 | for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { | 
|  | 661 | if (unlikely(idx == stage)) { | 
|  | 662 | shmem_dir_unmap(dir); | 
|  | 663 | dir = shmem_dir_map(topdir) + | 
|  | 664 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | 
|  | 665 | while (!*dir) { | 
|  | 666 | dir++; | 
|  | 667 | idx += ENTRIES_PER_PAGEPAGE; | 
|  | 668 | if (idx >= limit) | 
|  | 669 | goto done1; | 
|  | 670 | } | 
|  | 671 | stage = idx + ENTRIES_PER_PAGEPAGE; | 
|  | 672 | middir = *dir; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 673 | if (punch_hole) | 
|  | 674 | needs_lock = &info->lock; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 675 | if (upper_limit >= stage) { | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 676 | if (needs_lock) { | 
|  | 677 | spin_lock(needs_lock); | 
|  | 678 | *dir = NULL; | 
|  | 679 | spin_unlock(needs_lock); | 
|  | 680 | needs_lock = NULL; | 
|  | 681 | } else | 
|  | 682 | *dir = NULL; | 
| Hugh Dickins | a2646d1 | 2007-03-29 01:20:35 -0700 | [diff] [blame] | 683 | nr_pages_to_free++; | 
|  | 684 | list_add(&middir->lru, &pages_to_free); | 
|  | 685 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 686 | shmem_dir_unmap(dir); | 
|  | 687 | cond_resched(); | 
|  | 688 | dir = shmem_dir_map(middir); | 
|  | 689 | diroff = 0; | 
|  | 690 | } | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 691 | punch_lock = needs_lock; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | subdir = dir[diroff]; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 693 | if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { | 
|  | 694 | if (needs_lock) { | 
|  | 695 | spin_lock(needs_lock); | 
|  | 696 | dir[diroff] = NULL; | 
|  | 697 | spin_unlock(needs_lock); | 
|  | 698 | punch_lock = NULL; | 
|  | 699 | } else | 
|  | 700 | dir[diroff] = NULL; | 
|  | 701 | nr_pages_to_free++; | 
|  | 702 | list_add(&subdir->lru, &pages_to_free); | 
|  | 703 | } | 
|  | 704 | if (subdir && page_private(subdir) /* has swap entries */) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | size = limit - idx; | 
|  | 706 | if (size > ENTRIES_PER_PAGE) | 
|  | 707 | size = ENTRIES_PER_PAGE; | 
|  | 708 | freed = shmem_map_and_free_swp(subdir, | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 709 | offset, size, &dir, punch_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | if (!dir) | 
|  | 711 | dir = shmem_dir_map(middir); | 
|  | 712 | nr_swaps_freed += freed; | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 713 | if (offset || punch_lock) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | spin_lock(&info->lock); | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 715 | set_page_private(subdir, | 
|  | 716 | page_private(subdir) - freed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | spin_unlock(&info->lock); | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 718 | } else | 
|  | 719 | BUG_ON(page_private(subdir) != freed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | } | 
| Hugh Dickins | 1ae7000 | 2007-03-29 01:20:36 -0700 | [diff] [blame] | 721 | offset = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 722 | } | 
|  | 723 | done1: | 
|  | 724 | shmem_dir_unmap(dir); | 
|  | 725 | done2: | 
|  | 726 | if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { | 
|  | 727 | /* | 
|  | 728 | * Call truncate_inode_pages again: racing shmem_unuse_inode | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 729 | * may have swizzled a page in from swap since | 
|  | 730 | * truncate_pagecache or generic_delete_inode did it, before we | 
|  | 731 | * lowered next_index.  Also, though shmem_getpage checks | 
|  | 732 | * i_size before adding to cache, no recheck after: so fix the | 
|  | 733 | * narrow window there too. | 
| Hugh Dickins | 16a1001 | 2007-03-29 01:20:37 -0700 | [diff] [blame] | 734 | * | 
|  | 735 | * Recalling truncate_inode_pages_range and unmap_mapping_range | 
|  | 736 | * every time for punch_hole (which never got a chance to clear | 
|  | 737 | * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, | 
|  | 738 | * yet hardly ever necessary: try to optimize them out later. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | */ | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 740 | truncate_inode_pages_range(inode->i_mapping, start, end); | 
| Hugh Dickins | 16a1001 | 2007-03-29 01:20:37 -0700 | [diff] [blame] | 741 | if (punch_hole) | 
|  | 742 | unmap_mapping_range(inode->i_mapping, start, | 
|  | 743 | end - start, 1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | } | 
|  | 745 |  | 
|  | 746 | spin_lock(&info->lock); | 
|  | 747 | info->flags &= ~SHMEM_TRUNCATE; | 
|  | 748 | info->swapped -= nr_swaps_freed; | 
|  | 749 | if (nr_pages_to_free) | 
|  | 750 | shmem_free_blocks(inode, nr_pages_to_free); | 
|  | 751 | shmem_recalc_inode(inode); | 
|  | 752 | spin_unlock(&info->lock); | 
|  | 753 |  | 
|  | 754 | /* | 
|  | 755 | * Empty swap vector directory pages to be freed? | 
|  | 756 | */ | 
|  | 757 | if (!list_empty(&pages_to_free)) { | 
|  | 758 | pages_to_free.prev->next = NULL; | 
|  | 759 | shmem_free_pages(pages_to_free.next); | 
|  | 760 | } | 
|  | 761 | } | 
|  | 762 |  | 
|  | 763 | static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) | 
|  | 764 | { | 
|  | 765 | struct inode *inode = dentry->d_inode; | 
| Nick Piggin | af5a30d | 2010-06-03 22:01:46 +1000 | [diff] [blame] | 766 | loff_t newsize = attr->ia_size; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | int error; | 
|  | 768 |  | 
| Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 769 | error = inode_change_ok(inode, attr); | 
|  | 770 | if (error) | 
|  | 771 | return error; | 
|  | 772 |  | 
| Nick Piggin | af5a30d | 2010-06-03 22:01:46 +1000 | [diff] [blame] | 773 | if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE) | 
|  | 774 | && newsize != inode->i_size) { | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 775 | struct page *page = NULL; | 
|  | 776 |  | 
|  | 777 | if (newsize < inode->i_size) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | /* | 
|  | 779 | * If truncating down to a partial page, then | 
|  | 780 | * if that page is already allocated, hold it | 
|  | 781 | * in memory until the truncation is over, so | 
| Justin P. Mattock | ae0e47f | 2011-03-01 15:06:02 +0100 | [diff] [blame] | 782 | * truncate_partial_page cannot miss it were | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | * it assigned to swap. | 
|  | 784 | */ | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 785 | if (newsize & (PAGE_CACHE_SIZE-1)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | (void) shmem_getpage(inode, | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 787 | newsize >> PAGE_CACHE_SHIFT, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | &page, SGP_READ, NULL); | 
| Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 789 | if (page) | 
|  | 790 | unlock_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | } | 
|  | 792 | /* | 
|  | 793 | * Reset SHMEM_PAGEIN flag so that shmem_truncate can | 
|  | 794 | * detect if any pages might have been added to cache | 
|  | 795 | * after truncate_inode_pages.  But we needn't bother | 
|  | 796 | * if it's being fully truncated to zero-length: the | 
|  | 797 | * nrpages check is efficient enough in that case. | 
|  | 798 | */ | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 799 | if (newsize) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 801 | spin_lock(&info->lock); | 
|  | 802 | info->flags &= ~SHMEM_PAGEIN; | 
|  | 803 | spin_unlock(&info->lock); | 
|  | 804 | } | 
|  | 805 | } | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 806 |  | 
| Christoph Hellwig | 2c27c65 | 2010-06-04 11:30:04 +0200 | [diff] [blame] | 807 | /* XXX(truncate): truncate_setsize should be called last */ | 
|  | 808 | truncate_setsize(inode, newsize); | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 809 | if (page) | 
|  | 810 | page_cache_release(page); | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 811 | shmem_truncate_range(inode, newsize, (loff_t)-1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | } | 
|  | 813 |  | 
| Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 814 | setattr_copy(inode, attr); | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 815 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
| Christoph Hellwig | db78b87 | 2010-06-04 11:30:03 +0200 | [diff] [blame] | 816 | if (attr->ia_valid & ATTR_MODE) | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 817 | error = generic_acl_chmod(inode); | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 818 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 819 | return error; | 
|  | 820 | } | 
|  | 821 |  | 
| Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 822 | static void shmem_evict_inode(struct inode *inode) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 823 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 825 |  | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 826 | if (inode->i_mapping->a_ops == &shmem_aops) { | 
| Mark Fasheh | fef2665 | 2005-09-09 13:01:31 -0700 | [diff] [blame] | 827 | truncate_inode_pages(inode->i_mapping, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | shmem_unacct_size(info->flags, inode->i_size); | 
|  | 829 | inode->i_size = 0; | 
| npiggin@suse.de | 3889e6e | 2010-05-27 01:05:36 +1000 | [diff] [blame] | 830 | shmem_truncate_range(inode, 0, (loff_t)-1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | if (!list_empty(&info->swaplist)) { | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 832 | mutex_lock(&shmem_swaplist_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | list_del_init(&info->swaplist); | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 834 | mutex_unlock(&shmem_swaplist_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | } | 
|  | 836 | } | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 837 | BUG_ON(inode->i_blocks); | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 838 | shmem_free_inode(inode->i_sb); | 
| Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 839 | end_writeback(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | } | 
|  | 841 |  | 
|  | 842 | static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) | 
|  | 843 | { | 
|  | 844 | swp_entry_t *ptr; | 
|  | 845 |  | 
|  | 846 | for (ptr = dir; ptr < edir; ptr++) { | 
|  | 847 | if (ptr->val == entry.val) | 
|  | 848 | return ptr - dir; | 
|  | 849 | } | 
|  | 850 | return -1; | 
|  | 851 | } | 
|  | 852 |  | 
|  | 853 | static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) | 
|  | 854 | { | 
|  | 855 | struct inode *inode; | 
|  | 856 | unsigned long idx; | 
|  | 857 | unsigned long size; | 
|  | 858 | unsigned long limit; | 
|  | 859 | unsigned long stage; | 
|  | 860 | struct page **dir; | 
|  | 861 | struct page *subdir; | 
|  | 862 | swp_entry_t *ptr; | 
|  | 863 | int offset; | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 864 | int error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 |  | 
|  | 866 | idx = 0; | 
|  | 867 | ptr = info->i_direct; | 
|  | 868 | spin_lock(&info->lock); | 
| Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 869 | if (!info->swapped) { | 
|  | 870 | list_del_init(&info->swaplist); | 
|  | 871 | goto lost2; | 
|  | 872 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | limit = info->next_index; | 
|  | 874 | size = limit; | 
|  | 875 | if (size > SHMEM_NR_DIRECT) | 
|  | 876 | size = SHMEM_NR_DIRECT; | 
|  | 877 | offset = shmem_find_swp(entry, ptr, ptr+size); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 878 | if (offset >= 0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | goto found; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | if (!info->i_indirect) | 
|  | 881 | goto lost2; | 
|  | 882 |  | 
|  | 883 | dir = shmem_dir_map(info->i_indirect); | 
|  | 884 | stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; | 
|  | 885 |  | 
|  | 886 | for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { | 
|  | 887 | if (unlikely(idx == stage)) { | 
|  | 888 | shmem_dir_unmap(dir-1); | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 889 | if (cond_resched_lock(&info->lock)) { | 
|  | 890 | /* check it has not been truncated */ | 
|  | 891 | if (limit > info->next_index) { | 
|  | 892 | limit = info->next_index; | 
|  | 893 | if (idx >= limit) | 
|  | 894 | goto lost2; | 
|  | 895 | } | 
|  | 896 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | dir = shmem_dir_map(info->i_indirect) + | 
|  | 898 | ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; | 
|  | 899 | while (!*dir) { | 
|  | 900 | dir++; | 
|  | 901 | idx += ENTRIES_PER_PAGEPAGE; | 
|  | 902 | if (idx >= limit) | 
|  | 903 | goto lost1; | 
|  | 904 | } | 
|  | 905 | stage = idx + ENTRIES_PER_PAGEPAGE; | 
|  | 906 | subdir = *dir; | 
|  | 907 | shmem_dir_unmap(dir); | 
|  | 908 | dir = shmem_dir_map(subdir); | 
|  | 909 | } | 
|  | 910 | subdir = *dir; | 
| Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 911 | if (subdir && page_private(subdir)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | ptr = shmem_swp_map(subdir); | 
|  | 913 | size = limit - idx; | 
|  | 914 | if (size > ENTRIES_PER_PAGE) | 
|  | 915 | size = ENTRIES_PER_PAGE; | 
|  | 916 | offset = shmem_find_swp(entry, ptr, ptr+size); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 917 | shmem_swp_unmap(ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 918 | if (offset >= 0) { | 
|  | 919 | shmem_dir_unmap(dir); | 
|  | 920 | goto found; | 
|  | 921 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | } | 
|  | 923 | } | 
|  | 924 | lost1: | 
|  | 925 | shmem_dir_unmap(dir-1); | 
|  | 926 | lost2: | 
|  | 927 | spin_unlock(&info->lock); | 
|  | 928 | return 0; | 
|  | 929 | found: | 
|  | 930 | idx += offset; | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 931 | inode = igrab(&info->vfs_inode); | 
|  | 932 | spin_unlock(&info->lock); | 
|  | 933 |  | 
| Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 934 | /* | 
|  | 935 | * Move _head_ to start search for next from here. | 
| Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 936 | * But be careful: shmem_evict_inode checks list_empty without taking | 
| Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 937 | * mutex, and there's an instant in list_move_tail when info->swaplist | 
|  | 938 | * would appear empty, if it were the only one on shmem_swaplist.  We | 
|  | 939 | * could avoid doing it if inode NULL; or use this minor optimization. | 
|  | 940 | */ | 
|  | 941 | if (shmem_swaplist.next != &info->swaplist) | 
|  | 942 | list_move_tail(&shmem_swaplist, &info->swaplist); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 943 | mutex_unlock(&shmem_swaplist_mutex); | 
|  | 944 |  | 
|  | 945 | error = 1; | 
|  | 946 | if (!inode) | 
|  | 947 | goto out; | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 948 | /* | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 949 | * Charge page using GFP_KERNEL while we can wait. | 
|  | 950 | * Charged back to the user(not to caller) when swap account is used. | 
|  | 951 | * add_to_page_cache() will be called with GFP_NOWAIT. | 
| KAMEZAWA Hiroyuki | d13d144 | 2009-01-07 18:07:56 -0800 | [diff] [blame] | 952 | */ | 
| Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 953 | error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); | 
| Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 954 | if (error) | 
|  | 955 | goto out; | 
| Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 956 | error = radix_tree_preload(GFP_KERNEL); | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 957 | if (error) { | 
|  | 958 | mem_cgroup_uncharge_cache_page(page); | 
|  | 959 | goto out; | 
|  | 960 | } | 
| Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 961 | error = 1; | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 962 |  | 
|  | 963 | spin_lock(&info->lock); | 
|  | 964 | ptr = shmem_swp_entry(info, idx, NULL); | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 965 | if (ptr && ptr->val == entry.val) { | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 966 | error = add_to_page_cache_locked(page, inode->i_mapping, | 
| Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 967 | idx, GFP_NOWAIT); | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 968 | /* does mem_cgroup_uncharge_cache_page on error */ | 
|  | 969 | } else	/* we must compensate for our precharge above */ | 
|  | 970 | mem_cgroup_uncharge_cache_page(page); | 
|  | 971 |  | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 972 | if (error == -EEXIST) { | 
|  | 973 | struct page *filepage = find_get_page(inode->i_mapping, idx); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 974 | error = 1; | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 975 | if (filepage) { | 
|  | 976 | /* | 
|  | 977 | * There might be a more uptodate page coming down | 
|  | 978 | * from a stacked writepage: forget our swappage if so. | 
|  | 979 | */ | 
|  | 980 | if (PageUptodate(filepage)) | 
|  | 981 | error = 0; | 
|  | 982 | page_cache_release(filepage); | 
|  | 983 | } | 
|  | 984 | } | 
|  | 985 | if (!error) { | 
| Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 986 | delete_from_swap_cache(page); | 
|  | 987 | set_page_dirty(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | info->flags |= SHMEM_PAGEIN; | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 989 | shmem_swp_set(info, ptr, 0); | 
|  | 990 | swap_free(entry); | 
|  | 991 | error = 1;	/* not an error, but entry was found */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | } | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 993 | if (ptr) | 
|  | 994 | shmem_swp_unmap(ptr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | spin_unlock(&info->lock); | 
| Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 996 | radix_tree_preload_end(); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 997 | out: | 
|  | 998 | unlock_page(page); | 
|  | 999 | page_cache_release(page); | 
|  | 1000 | iput(inode);		/* allows for NULL */ | 
|  | 1001 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | } | 
|  | 1003 |  | 
|  | 1004 | /* | 
|  | 1005 | * shmem_unuse() search for an eventually swapped out shmem page. | 
|  | 1006 | */ | 
|  | 1007 | int shmem_unuse(swp_entry_t entry, struct page *page) | 
|  | 1008 | { | 
|  | 1009 | struct list_head *p, *next; | 
|  | 1010 | struct shmem_inode_info *info; | 
|  | 1011 | int found = 0; | 
|  | 1012 |  | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1013 | mutex_lock(&shmem_swaplist_mutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | list_for_each_safe(p, next, &shmem_swaplist) { | 
|  | 1015 | info = list_entry(p, struct shmem_inode_info, swaplist); | 
| Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 1016 | found = shmem_unuse_inode(info, entry, page); | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1017 | cond_resched(); | 
| Hugh Dickins | 2e0e26c | 2008-02-04 22:28:53 -0800 | [diff] [blame] | 1018 | if (found) | 
|  | 1019 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | } | 
| Hugh Dickins | cb5f7b9 | 2008-02-04 22:28:52 -0800 | [diff] [blame] | 1021 | mutex_unlock(&shmem_swaplist_mutex); | 
| Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 1022 | /* | 
|  | 1023 | * Can some race bring us here?  We've been holding page lock, | 
|  | 1024 | * so I think not; but would rather try again later than BUG() | 
|  | 1025 | */ | 
|  | 1026 | unlock_page(page); | 
|  | 1027 | page_cache_release(page); | 
|  | 1028 | out: | 
|  | 1029 | return (found < 0) ? found : 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | } | 
|  | 1031 |  | 
|  | 1032 | /* | 
|  | 1033 | * Move the page from the page cache to the swap cache. | 
|  | 1034 | */ | 
|  | 1035 | static int shmem_writepage(struct page *page, struct writeback_control *wbc) | 
|  | 1036 | { | 
|  | 1037 | struct shmem_inode_info *info; | 
|  | 1038 | swp_entry_t *entry, swap; | 
|  | 1039 | struct address_space *mapping; | 
|  | 1040 | unsigned long index; | 
|  | 1041 | struct inode *inode; | 
|  | 1042 |  | 
|  | 1043 | BUG_ON(!PageLocked(page)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | mapping = page->mapping; | 
|  | 1045 | index = page->index; | 
|  | 1046 | inode = mapping->host; | 
|  | 1047 | info = SHMEM_I(inode); | 
|  | 1048 | if (info->flags & VM_LOCKED) | 
|  | 1049 | goto redirty; | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1050 | if (!total_swap_pages) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | goto redirty; | 
|  | 1052 |  | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1053 | /* | 
|  | 1054 | * shmem_backing_dev_info's capabilities prevent regular writeback or | 
|  | 1055 | * sync from ever calling shmem_writepage; but a stacking filesystem | 
|  | 1056 | * may use the ->writepage of its underlying filesystem, in which case | 
|  | 1057 | * tmpfs should write out to swap only in response to memory pressure, | 
| Jens Axboe | 5b0830c | 2009-09-23 19:37:09 +0200 | [diff] [blame] | 1058 | * and not for the writeback threads or sync.  However, in those cases, | 
|  | 1059 | * we do still want to check if there's a redundant swappage to be | 
|  | 1060 | * discarded. | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1061 | */ | 
|  | 1062 | if (wbc->for_reclaim) | 
|  | 1063 | swap = get_swap_page(); | 
|  | 1064 | else | 
|  | 1065 | swap.val = 0; | 
|  | 1066 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | spin_lock(&info->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | if (index >= info->next_index) { | 
|  | 1069 | BUG_ON(!(info->flags & SHMEM_TRUNCATE)); | 
|  | 1070 | goto unlock; | 
|  | 1071 | } | 
|  | 1072 | entry = shmem_swp_entry(info, index, NULL); | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1073 | if (entry->val) { | 
|  | 1074 | /* | 
|  | 1075 | * The more uptodate page coming down from a stacked | 
|  | 1076 | * writepage should replace our old swappage. | 
|  | 1077 | */ | 
|  | 1078 | free_swap_and_cache(*entry); | 
|  | 1079 | shmem_swp_set(info, entry, 0); | 
|  | 1080 | } | 
|  | 1081 | shmem_recalc_inode(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1082 |  | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1083 | if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { | 
| Minchan Kim | 4c73b1b | 2011-03-22 16:32:40 -0700 | [diff] [blame] | 1084 | delete_from_page_cache(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 | shmem_swp_set(info, entry, swap.val); | 
|  | 1086 | shmem_swp_unmap(entry); | 
| Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 1087 | if (list_empty(&info->swaplist)) | 
|  | 1088 | inode = igrab(inode); | 
|  | 1089 | else | 
|  | 1090 | inode = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | spin_unlock(&info->lock); | 
| Hugh Dickins | aaa4686 | 2009-12-14 17:58:47 -0800 | [diff] [blame] | 1092 | swap_shmem_alloc(swap); | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1093 | BUG_ON(page_mapped(page)); | 
| Hugh Dickins | 9fab561 | 2009-03-31 15:23:33 -0700 | [diff] [blame] | 1094 | swap_writepage(page, wbc); | 
| Hugh Dickins | 1b1b32f | 2008-02-04 22:28:55 -0800 | [diff] [blame] | 1095 | if (inode) { | 
|  | 1096 | mutex_lock(&shmem_swaplist_mutex); | 
|  | 1097 | /* move instead of add in case we're racing */ | 
|  | 1098 | list_move_tail(&info->swaplist, &shmem_swaplist); | 
|  | 1099 | mutex_unlock(&shmem_swaplist_mutex); | 
|  | 1100 | iput(inode); | 
|  | 1101 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | return 0; | 
|  | 1103 | } | 
|  | 1104 |  | 
|  | 1105 | shmem_swp_unmap(entry); | 
|  | 1106 | unlock: | 
|  | 1107 | spin_unlock(&info->lock); | 
| Daisuke Nishimura | 2ca4532 | 2009-09-21 17:02:52 -0700 | [diff] [blame] | 1108 | /* | 
|  | 1109 | * add_to_swap_cache() doesn't return -EEXIST, so we can safely | 
|  | 1110 | * clear SWAP_HAS_CACHE flag. | 
|  | 1111 | */ | 
| KAMEZAWA Hiroyuki | cb4b86b | 2009-06-16 15:32:52 -0700 | [diff] [blame] | 1112 | swapcache_free(swap, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1113 | redirty: | 
|  | 1114 | set_page_dirty(page); | 
| Hugh Dickins | d9fe526 | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1115 | if (wbc->for_reclaim) | 
|  | 1116 | return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */ | 
|  | 1117 | unlock_page(page); | 
|  | 1118 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | } | 
|  | 1120 |  | 
|  | 1121 | #ifdef CONFIG_NUMA | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1122 | #ifdef CONFIG_TMPFS | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1123 | static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1124 | { | 
| Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1125 | char buffer[64]; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1126 |  | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1127 | if (!mpol || mpol->mode == MPOL_DEFAULT) | 
| Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1128 | return;		/* show nothing */ | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1129 |  | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1130 | mpol_to_str(buffer, sizeof(buffer), mpol, 1); | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1131 |  | 
| Lee Schermerhorn | 095f1fc | 2008-04-28 02:13:23 -0700 | [diff] [blame] | 1132 | seq_printf(seq, ",mpol=%s", buffer); | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1133 | } | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1134 |  | 
|  | 1135 | static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | 
|  | 1136 | { | 
|  | 1137 | struct mempolicy *mpol = NULL; | 
|  | 1138 | if (sbinfo->mpol) { | 
|  | 1139 | spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */ | 
|  | 1140 | mpol = sbinfo->mpol; | 
|  | 1141 | mpol_get(mpol); | 
|  | 1142 | spin_unlock(&sbinfo->stat_lock); | 
|  | 1143 | } | 
|  | 1144 | return mpol; | 
|  | 1145 | } | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1146 | #endif /* CONFIG_TMPFS */ | 
|  | 1147 |  | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1148 | static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 
|  | 1149 | struct shmem_inode_info *info, unsigned long idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | { | 
| Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 1151 | struct mempolicy mpol, *spol; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | struct vm_area_struct pvma; | 
| Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 1153 | struct page *page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 |  | 
| Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 1155 | spol = mpol_cond_copy(&mpol, | 
|  | 1156 | mpol_shared_policy_lookup(&info->policy, idx)); | 
|  | 1157 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1158 | /* Create a pseudo vma that just contains the policy */ | 
| Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 1159 | pvma.vm_start = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1160 | pvma.vm_pgoff = idx; | 
| Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 1161 | pvma.vm_ops = NULL; | 
| Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 1162 | pvma.vm_policy = spol; | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1163 | page = swapin_readahead(entry, gfp, &pvma, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1164 | return page; | 
|  | 1165 | } | 
|  | 1166 |  | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1167 | static struct page *shmem_alloc_page(gfp_t gfp, | 
|  | 1168 | struct shmem_inode_info *info, unsigned long idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | { | 
|  | 1170 | struct vm_area_struct pvma; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 |  | 
| Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 1172 | /* Create a pseudo vma that just contains the policy */ | 
|  | 1173 | pvma.vm_start = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | pvma.vm_pgoff = idx; | 
| Hugh Dickins | c4cc6d0 | 2008-02-04 22:28:40 -0800 | [diff] [blame] | 1175 | pvma.vm_ops = NULL; | 
|  | 1176 | pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); | 
| Lee Schermerhorn | 52cd3b0 | 2008-04-28 02:13:16 -0700 | [diff] [blame] | 1177 |  | 
|  | 1178 | /* | 
|  | 1179 | * alloc_page_vma() will drop the shared policy reference | 
|  | 1180 | */ | 
|  | 1181 | return alloc_page_vma(gfp, &pvma, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | } | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1183 | #else /* !CONFIG_NUMA */ | 
|  | 1184 | #ifdef CONFIG_TMPFS | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1185 | static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1186 | { | 
|  | 1187 | } | 
|  | 1188 | #endif /* CONFIG_TMPFS */ | 
|  | 1189 |  | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1190 | static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, | 
|  | 1191 | struct shmem_inode_info *info, unsigned long idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | { | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1193 | return swapin_readahead(entry, gfp, NULL, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | } | 
|  | 1195 |  | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1196 | static inline struct page *shmem_alloc_page(gfp_t gfp, | 
|  | 1197 | struct shmem_inode_info *info, unsigned long idx) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 | { | 
| Hugh Dickins | e84e2e1 | 2007-11-28 18:55:10 +0000 | [diff] [blame] | 1199 | return alloc_page(gfp); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | } | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 1201 | #endif /* CONFIG_NUMA */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 |  | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1203 | #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) | 
|  | 1204 | static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) | 
|  | 1205 | { | 
|  | 1206 | return NULL; | 
|  | 1207 | } | 
|  | 1208 | #endif | 
|  | 1209 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 | /* | 
|  | 1211 | * shmem_getpage - either get the page from swap or allocate a new one | 
|  | 1212 | * | 
|  | 1213 | * If we allocate a new one we do not mark it dirty. That's up to the | 
|  | 1214 | * vm. If we swap it in we mark it dirty since we also free the swap | 
|  | 1215 | * entry since a page cannot live in both the swap and page cache | 
|  | 1216 | */ | 
|  | 1217 | static int shmem_getpage(struct inode *inode, unsigned long idx, | 
|  | 1218 | struct page **pagep, enum sgp_type sgp, int *type) | 
|  | 1219 | { | 
|  | 1220 | struct address_space *mapping = inode->i_mapping; | 
|  | 1221 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 1222 | struct shmem_sb_info *sbinfo; | 
|  | 1223 | struct page *filepage = *pagep; | 
|  | 1224 | struct page *swappage; | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1225 | struct page *prealloc_page = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 | swp_entry_t *entry; | 
|  | 1227 | swp_entry_t swap; | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1228 | gfp_t gfp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | int error; | 
|  | 1230 |  | 
|  | 1231 | if (idx >= SHMEM_MAX_INDEX) | 
|  | 1232 | return -EFBIG; | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1233 |  | 
|  | 1234 | if (type) | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1235 | *type = 0; | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 1236 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | /* | 
|  | 1238 | * Normally, filepage is NULL on entry, and either found | 
|  | 1239 | * uptodate immediately, or allocated and zeroed, or read | 
|  | 1240 | * in under swappage, which is then assigned to filepage. | 
| Hugh Dickins | 5402b97 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1241 | * But shmem_readpage (required for splice) passes in a locked | 
| Hugh Dickins | ae97641 | 2007-06-04 10:00:39 +0200 | [diff] [blame] | 1242 | * filepage, which may be found not uptodate by other callers | 
|  | 1243 | * too, and may need to be copied from the swappage read in. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 | */ | 
|  | 1245 | repeat: | 
|  | 1246 | if (!filepage) | 
|  | 1247 | filepage = find_lock_page(mapping, idx); | 
|  | 1248 | if (filepage && PageUptodate(filepage)) | 
|  | 1249 | goto done; | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1250 | gfp = mapping_gfp_mask(mapping); | 
| Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 1251 | if (!filepage) { | 
|  | 1252 | /* | 
|  | 1253 | * Try to preload while we can wait, to not make a habit of | 
|  | 1254 | * draining atomic reserves; but don't latch on to this cpu. | 
|  | 1255 | */ | 
|  | 1256 | error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); | 
|  | 1257 | if (error) | 
|  | 1258 | goto failed; | 
|  | 1259 | radix_tree_preload_end(); | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1260 | if (sgp != SGP_READ && !prealloc_page) { | 
|  | 1261 | /* We don't care if this fails */ | 
|  | 1262 | prealloc_page = shmem_alloc_page(gfp, info, idx); | 
|  | 1263 | if (prealloc_page) { | 
|  | 1264 | if (mem_cgroup_cache_charge(prealloc_page, | 
|  | 1265 | current->mm, GFP_KERNEL)) { | 
|  | 1266 | page_cache_release(prealloc_page); | 
|  | 1267 | prealloc_page = NULL; | 
|  | 1268 | } | 
|  | 1269 | } | 
|  | 1270 | } | 
| Hugh Dickins | b409f9f | 2008-02-04 22:28:54 -0800 | [diff] [blame] | 1271 | } | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1272 | error = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1273 |  | 
|  | 1274 | spin_lock(&info->lock); | 
|  | 1275 | shmem_recalc_inode(inode); | 
|  | 1276 | entry = shmem_swp_alloc(info, idx, sgp); | 
|  | 1277 | if (IS_ERR(entry)) { | 
|  | 1278 | spin_unlock(&info->lock); | 
|  | 1279 | error = PTR_ERR(entry); | 
|  | 1280 | goto failed; | 
|  | 1281 | } | 
|  | 1282 | swap = *entry; | 
|  | 1283 |  | 
|  | 1284 | if (swap.val) { | 
|  | 1285 | /* Look it up and read it in.. */ | 
|  | 1286 | swappage = lookup_swap_cache(swap); | 
|  | 1287 | if (!swappage) { | 
|  | 1288 | shmem_swp_unmap(entry); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | /* here we actually do the io */ | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1290 | if (type && !(*type & VM_FAULT_MAJOR)) { | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1291 | __count_vm_event(PGMAJFAULT); | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1292 | *type |= VM_FAULT_MAJOR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | } | 
| Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 1294 | spin_unlock(&info->lock); | 
| Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 1295 | swappage = shmem_swapin(swap, gfp, info, idx); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | if (!swappage) { | 
|  | 1297 | spin_lock(&info->lock); | 
|  | 1298 | entry = shmem_swp_alloc(info, idx, sgp); | 
|  | 1299 | if (IS_ERR(entry)) | 
|  | 1300 | error = PTR_ERR(entry); | 
|  | 1301 | else { | 
|  | 1302 | if (entry->val == swap.val) | 
|  | 1303 | error = -ENOMEM; | 
|  | 1304 | shmem_swp_unmap(entry); | 
|  | 1305 | } | 
|  | 1306 | spin_unlock(&info->lock); | 
|  | 1307 | if (error) | 
|  | 1308 | goto failed; | 
|  | 1309 | goto repeat; | 
|  | 1310 | } | 
|  | 1311 | wait_on_page_locked(swappage); | 
|  | 1312 | page_cache_release(swappage); | 
|  | 1313 | goto repeat; | 
|  | 1314 | } | 
|  | 1315 |  | 
|  | 1316 | /* We have to do this with page locked to prevent races */ | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 1317 | if (!trylock_page(swappage)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | shmem_swp_unmap(entry); | 
|  | 1319 | spin_unlock(&info->lock); | 
|  | 1320 | wait_on_page_locked(swappage); | 
|  | 1321 | page_cache_release(swappage); | 
|  | 1322 | goto repeat; | 
|  | 1323 | } | 
|  | 1324 | if (PageWriteback(swappage)) { | 
|  | 1325 | shmem_swp_unmap(entry); | 
|  | 1326 | spin_unlock(&info->lock); | 
|  | 1327 | wait_on_page_writeback(swappage); | 
|  | 1328 | unlock_page(swappage); | 
|  | 1329 | page_cache_release(swappage); | 
|  | 1330 | goto repeat; | 
|  | 1331 | } | 
|  | 1332 | if (!PageUptodate(swappage)) { | 
|  | 1333 | shmem_swp_unmap(entry); | 
|  | 1334 | spin_unlock(&info->lock); | 
|  | 1335 | unlock_page(swappage); | 
|  | 1336 | page_cache_release(swappage); | 
|  | 1337 | error = -EIO; | 
|  | 1338 | goto failed; | 
|  | 1339 | } | 
|  | 1340 |  | 
|  | 1341 | if (filepage) { | 
|  | 1342 | shmem_swp_set(info, entry, 0); | 
|  | 1343 | shmem_swp_unmap(entry); | 
|  | 1344 | delete_from_swap_cache(swappage); | 
|  | 1345 | spin_unlock(&info->lock); | 
|  | 1346 | copy_highpage(filepage, swappage); | 
|  | 1347 | unlock_page(swappage); | 
|  | 1348 | page_cache_release(swappage); | 
|  | 1349 | flush_dcache_page(filepage); | 
|  | 1350 | SetPageUptodate(filepage); | 
|  | 1351 | set_page_dirty(filepage); | 
|  | 1352 | swap_free(swap); | 
| Nick Piggin | e286781 | 2008-07-25 19:45:30 -0700 | [diff] [blame] | 1353 | } else if (!(error = add_to_page_cache_locked(swappage, mapping, | 
|  | 1354 | idx, GFP_NOWAIT))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1355 | info->flags |= SHMEM_PAGEIN; | 
|  | 1356 | shmem_swp_set(info, entry, 0); | 
|  | 1357 | shmem_swp_unmap(entry); | 
| Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 1358 | delete_from_swap_cache(swappage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | spin_unlock(&info->lock); | 
|  | 1360 | filepage = swappage; | 
| Hugh Dickins | 73b1262 | 2008-02-04 22:28:50 -0800 | [diff] [blame] | 1361 | set_page_dirty(filepage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | swap_free(swap); | 
|  | 1363 | } else { | 
|  | 1364 | shmem_swp_unmap(entry); | 
|  | 1365 | spin_unlock(&info->lock); | 
| Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 1366 | if (error == -ENOMEM) { | 
| Daisuke Nishimura | ae3abae | 2009-04-30 15:08:19 -0700 | [diff] [blame] | 1367 | /* | 
|  | 1368 | * reclaim from proper memory cgroup and | 
|  | 1369 | * call memcg's OOM if needed. | 
|  | 1370 | */ | 
|  | 1371 | error = mem_cgroup_shmem_charge_fallback( | 
|  | 1372 | swappage, | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 1373 | current->mm, | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 1374 | gfp); | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 1375 | if (error) { | 
|  | 1376 | unlock_page(swappage); | 
|  | 1377 | page_cache_release(swappage); | 
| Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 1378 | goto failed; | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 1379 | } | 
| Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 1380 | } | 
| KAMEZAWA Hiroyuki | b5a8431 | 2009-01-07 18:08:35 -0800 | [diff] [blame] | 1381 | unlock_page(swappage); | 
|  | 1382 | page_cache_release(swappage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1383 | goto repeat; | 
|  | 1384 | } | 
|  | 1385 | } else if (sgp == SGP_READ && !filepage) { | 
|  | 1386 | shmem_swp_unmap(entry); | 
|  | 1387 | filepage = find_get_page(mapping, idx); | 
|  | 1388 | if (filepage && | 
| Nick Piggin | 529ae9a | 2008-08-02 12:01:03 +0200 | [diff] [blame] | 1389 | (!PageUptodate(filepage) || !trylock_page(filepage))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 | spin_unlock(&info->lock); | 
|  | 1391 | wait_on_page_locked(filepage); | 
|  | 1392 | page_cache_release(filepage); | 
|  | 1393 | filepage = NULL; | 
|  | 1394 | goto repeat; | 
|  | 1395 | } | 
|  | 1396 | spin_unlock(&info->lock); | 
|  | 1397 | } else { | 
|  | 1398 | shmem_swp_unmap(entry); | 
|  | 1399 | sbinfo = SHMEM_SB(inode->i_sb); | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 1400 | if (sbinfo->max_blocks) { | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 1401 | if ((percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks) > 0) || | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1402 | shmem_acct_block(info->flags)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | spin_unlock(&info->lock); | 
|  | 1404 | error = -ENOSPC; | 
|  | 1405 | goto failed; | 
|  | 1406 | } | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 1407 | percpu_counter_inc(&sbinfo->used_blocks); | 
|  | 1408 | spin_lock(&inode->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | inode->i_blocks += BLOCKS_PER_PAGE; | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 1410 | spin_unlock(&inode->i_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | } else if (shmem_acct_block(info->flags)) { | 
|  | 1412 | spin_unlock(&info->lock); | 
|  | 1413 | error = -ENOSPC; | 
|  | 1414 | goto failed; | 
|  | 1415 | } | 
|  | 1416 |  | 
|  | 1417 | if (!filepage) { | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 1418 | int ret; | 
|  | 1419 |  | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1420 | if (!prealloc_page) { | 
|  | 1421 | spin_unlock(&info->lock); | 
|  | 1422 | filepage = shmem_alloc_page(gfp, info, idx); | 
|  | 1423 | if (!filepage) { | 
|  | 1424 | shmem_unacct_blocks(info->flags, 1); | 
|  | 1425 | shmem_free_blocks(inode, 1); | 
|  | 1426 | error = -ENOMEM; | 
|  | 1427 | goto failed; | 
|  | 1428 | } | 
|  | 1429 | SetPageSwapBacked(filepage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 |  | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1431 | /* | 
|  | 1432 | * Precharge page while we can wait, compensate | 
|  | 1433 | * after | 
|  | 1434 | */ | 
|  | 1435 | error = mem_cgroup_cache_charge(filepage, | 
|  | 1436 | current->mm, GFP_KERNEL); | 
|  | 1437 | if (error) { | 
|  | 1438 | page_cache_release(filepage); | 
|  | 1439 | shmem_unacct_blocks(info->flags, 1); | 
|  | 1440 | shmem_free_blocks(inode, 1); | 
|  | 1441 | filepage = NULL; | 
|  | 1442 | goto failed; | 
|  | 1443 | } | 
|  | 1444 |  | 
|  | 1445 | spin_lock(&info->lock); | 
|  | 1446 | } else { | 
|  | 1447 | filepage = prealloc_page; | 
|  | 1448 | prealloc_page = NULL; | 
|  | 1449 | SetPageSwapBacked(filepage); | 
| Hugh Dickins | 8236955 | 2008-02-07 00:14:22 -0800 | [diff] [blame] | 1450 | } | 
|  | 1451 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | entry = shmem_swp_alloc(info, idx, sgp); | 
|  | 1453 | if (IS_ERR(entry)) | 
|  | 1454 | error = PTR_ERR(entry); | 
|  | 1455 | else { | 
|  | 1456 | swap = *entry; | 
|  | 1457 | shmem_swp_unmap(entry); | 
|  | 1458 | } | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 1459 | ret = error || swap.val; | 
|  | 1460 | if (ret) | 
|  | 1461 | mem_cgroup_uncharge_cache_page(filepage); | 
|  | 1462 | else | 
|  | 1463 | ret = add_to_page_cache_lru(filepage, mapping, | 
|  | 1464 | idx, GFP_NOWAIT); | 
|  | 1465 | /* | 
|  | 1466 | * At add_to_page_cache_lru() failure, uncharge will | 
|  | 1467 | * be done automatically. | 
|  | 1468 | */ | 
|  | 1469 | if (ret) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1470 | spin_unlock(&info->lock); | 
|  | 1471 | page_cache_release(filepage); | 
|  | 1472 | shmem_unacct_blocks(info->flags, 1); | 
|  | 1473 | shmem_free_blocks(inode, 1); | 
|  | 1474 | filepage = NULL; | 
|  | 1475 | if (error) | 
|  | 1476 | goto failed; | 
|  | 1477 | goto repeat; | 
|  | 1478 | } | 
|  | 1479 | info->flags |= SHMEM_PAGEIN; | 
|  | 1480 | } | 
|  | 1481 |  | 
|  | 1482 | info->alloced++; | 
|  | 1483 | spin_unlock(&info->lock); | 
| Hugh Dickins | e84e2e1 | 2007-11-28 18:55:10 +0000 | [diff] [blame] | 1484 | clear_highpage(filepage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1485 | flush_dcache_page(filepage); | 
|  | 1486 | SetPageUptodate(filepage); | 
| Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1487 | if (sgp == SGP_DIRTY) | 
|  | 1488 | set_page_dirty(filepage); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | } | 
|  | 1490 | done: | 
| Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1491 | *pagep = filepage; | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1492 | error = 0; | 
|  | 1493 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 |  | 
|  | 1495 | failed: | 
|  | 1496 | if (*pagep != filepage) { | 
|  | 1497 | unlock_page(filepage); | 
|  | 1498 | page_cache_release(filepage); | 
|  | 1499 | } | 
| Shaohua Li | ff36b80 | 2010-08-09 17:19:06 -0700 | [diff] [blame] | 1500 | out: | 
|  | 1501 | if (prealloc_page) { | 
|  | 1502 | mem_cgroup_uncharge_cache_page(prealloc_page); | 
|  | 1503 | page_cache_release(prealloc_page); | 
|  | 1504 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | return error; | 
|  | 1506 | } | 
|  | 1507 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1508 | static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1509 | { | 
| Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1510 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | int error; | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1512 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 |  | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1514 | if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 
|  | 1515 | return VM_FAULT_SIGBUS; | 
| Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 1516 |  | 
| Hugh Dickins | 27d54b3 | 2008-02-04 22:28:43 -0800 | [diff] [blame] | 1517 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1518 | if (error) | 
|  | 1519 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1520 |  | 
| Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1521 | return ret | VM_FAULT_LOCKED; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1522 | } | 
|  | 1523 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1524 | #ifdef CONFIG_NUMA | 
| Adrian Bunk | d8dc74f | 2007-10-16 01:26:26 -0700 | [diff] [blame] | 1525 | static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1526 | { | 
| Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1527 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); | 
|  | 1529 | } | 
|  | 1530 |  | 
| Adrian Bunk | d8dc74f | 2007-10-16 01:26:26 -0700 | [diff] [blame] | 1531 | static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, | 
|  | 1532 | unsigned long addr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1533 | { | 
| Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1534 | struct inode *i = vma->vm_file->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1535 | unsigned long idx; | 
|  | 1536 |  | 
|  | 1537 | idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 
|  | 1538 | return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); | 
|  | 1539 | } | 
|  | 1540 | #endif | 
|  | 1541 |  | 
|  | 1542 | int shmem_lock(struct file *file, int lock, struct user_struct *user) | 
|  | 1543 | { | 
| Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1544 | struct inode *inode = file->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 1546 | int retval = -ENOMEM; | 
|  | 1547 |  | 
|  | 1548 | spin_lock(&info->lock); | 
|  | 1549 | if (lock && !(info->flags & VM_LOCKED)) { | 
|  | 1550 | if (!user_shm_lock(inode->i_size, user)) | 
|  | 1551 | goto out_nomem; | 
|  | 1552 | info->flags |= VM_LOCKED; | 
| Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 1553 | mapping_set_unevictable(file->f_mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | } | 
|  | 1555 | if (!lock && (info->flags & VM_LOCKED) && user) { | 
|  | 1556 | user_shm_unlock(inode->i_size, user); | 
|  | 1557 | info->flags &= ~VM_LOCKED; | 
| Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 1558 | mapping_clear_unevictable(file->f_mapping); | 
|  | 1559 | scan_mapping_unevictable_pages(file->f_mapping); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | } | 
|  | 1561 | retval = 0; | 
| Lee Schermerhorn | 89e004ea | 2008-10-18 20:26:43 -0700 | [diff] [blame] | 1562 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | out_nomem: | 
|  | 1564 | spin_unlock(&info->lock); | 
|  | 1565 | return retval; | 
|  | 1566 | } | 
|  | 1567 |  | 
| Adrian Bunk | 9b83a6a | 2007-02-28 20:11:03 -0800 | [diff] [blame] | 1568 | static int shmem_mmap(struct file *file, struct vm_area_struct *vma) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | { | 
|  | 1570 | file_accessed(file); | 
|  | 1571 | vma->vm_ops = &shmem_vm_ops; | 
| Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1572 | vma->vm_flags |= VM_CAN_NONLINEAR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1573 | return 0; | 
|  | 1574 | } | 
|  | 1575 |  | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1576 | static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, | 
|  | 1577 | int mode, dev_t dev, unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | { | 
|  | 1579 | struct inode *inode; | 
|  | 1580 | struct shmem_inode_info *info; | 
|  | 1581 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 
|  | 1582 |  | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1583 | if (shmem_reserve_inode(sb)) | 
|  | 1584 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 |  | 
|  | 1586 | inode = new_inode(sb); | 
|  | 1587 | if (inode) { | 
| Christoph Hellwig | 85fe402 | 2010-10-23 11:19:54 -0400 | [diff] [blame] | 1588 | inode->i_ino = get_next_ino(); | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1589 | inode_init_owner(inode, dir, mode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | inode->i_blocks = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; | 
|  | 1592 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 1593 | inode->i_generation = get_seconds(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1594 | info = SHMEM_I(inode); | 
|  | 1595 | memset(info, 0, (char *)inode - (char *)info); | 
|  | 1596 | spin_lock_init(&info->lock); | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 1597 | info->flags = flags & VM_NORESERVE; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | INIT_LIST_HEAD(&info->swaplist); | 
| Al Viro | 72c0490 | 2009-06-24 16:58:48 -0400 | [diff] [blame] | 1599 | cache_no_acl(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1600 |  | 
|  | 1601 | switch (mode & S_IFMT) { | 
|  | 1602 | default: | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1603 | inode->i_op = &shmem_special_inode_operations; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1604 | init_special_inode(inode, mode, dev); | 
|  | 1605 | break; | 
|  | 1606 | case S_IFREG: | 
| Hugh Dickins | 14fcc23 | 2008-07-28 15:46:19 -0700 | [diff] [blame] | 1607 | inode->i_mapping->a_ops = &shmem_aops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1608 | inode->i_op = &shmem_inode_operations; | 
|  | 1609 | inode->i_fop = &shmem_file_operations; | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1610 | mpol_shared_policy_init(&info->policy, | 
|  | 1611 | shmem_get_sbmpol(sbinfo)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 | break; | 
|  | 1613 | case S_IFDIR: | 
| Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1614 | inc_nlink(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1615 | /* Some things misbehave if size == 0 on a directory */ | 
|  | 1616 | inode->i_size = 2 * BOGO_DIRENT_SIZE; | 
|  | 1617 | inode->i_op = &shmem_dir_inode_operations; | 
|  | 1618 | inode->i_fop = &simple_dir_operations; | 
|  | 1619 | break; | 
|  | 1620 | case S_IFLNK: | 
|  | 1621 | /* | 
|  | 1622 | * Must not load anything in the rbtree, | 
|  | 1623 | * mpol_free_shared_policy will not be called. | 
|  | 1624 | */ | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 1625 | mpol_shared_policy_init(&info->policy, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | break; | 
|  | 1627 | } | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1628 | } else | 
|  | 1629 | shmem_free_inode(sb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | return inode; | 
|  | 1631 | } | 
|  | 1632 |  | 
|  | 1633 | #ifdef CONFIG_TMPFS | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 1634 | static const struct inode_operations shmem_symlink_inode_operations; | 
|  | 1635 | static const struct inode_operations shmem_symlink_inline_operations; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1636 |  | 
|  | 1637 | /* | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1638 | * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; | 
| Hugh Dickins | ae97641 | 2007-06-04 10:00:39 +0200 | [diff] [blame] | 1639 | * but providing them allows a tmpfs file to be used for splice, sendfile, and | 
|  | 1640 | * below the loop driver, in the generic fashion that many filesystems support. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | */ | 
| Hugh Dickins | ae97641 | 2007-06-04 10:00:39 +0200 | [diff] [blame] | 1642 | static int shmem_readpage(struct file *file, struct page *page) | 
|  | 1643 | { | 
|  | 1644 | struct inode *inode = page->mapping->host; | 
|  | 1645 | int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); | 
|  | 1646 | unlock_page(page); | 
|  | 1647 | return error; | 
|  | 1648 | } | 
|  | 1649 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1650 | static int | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1651 | shmem_write_begin(struct file *file, struct address_space *mapping, | 
|  | 1652 | loff_t pos, unsigned len, unsigned flags, | 
|  | 1653 | struct page **pagep, void **fsdata) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1654 | { | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1655 | struct inode *inode = mapping->host; | 
|  | 1656 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 
|  | 1657 | *pagep = NULL; | 
|  | 1658 | return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); | 
|  | 1659 | } | 
|  | 1660 |  | 
|  | 1661 | static int | 
|  | 1662 | shmem_write_end(struct file *file, struct address_space *mapping, | 
|  | 1663 | loff_t pos, unsigned len, unsigned copied, | 
|  | 1664 | struct page *page, void *fsdata) | 
|  | 1665 | { | 
|  | 1666 | struct inode *inode = mapping->host; | 
|  | 1667 |  | 
| Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1668 | if (pos + copied > inode->i_size) | 
|  | 1669 | i_size_write(inode, pos + copied); | 
|  | 1670 |  | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1671 | set_page_dirty(page); | 
| Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 1672 | unlock_page(page); | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1673 | page_cache_release(page); | 
|  | 1674 |  | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 1675 | return copied; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1676 | } | 
|  | 1677 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 | static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) | 
|  | 1679 | { | 
| Josef "Jeff" Sipek | d3ac7f8 | 2006-12-08 02:36:44 -0800 | [diff] [blame] | 1680 | struct inode *inode = filp->f_path.dentry->d_inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | struct address_space *mapping = inode->i_mapping; | 
|  | 1682 | unsigned long index, offset; | 
| Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1683 | enum sgp_type sgp = SGP_READ; | 
|  | 1684 |  | 
|  | 1685 | /* | 
|  | 1686 | * Might this read be for a stacking filesystem?  Then when reading | 
|  | 1687 | * holes of a sparse file, we actually need to allocate those pages, | 
|  | 1688 | * and even mark them dirty, so it cannot exceed the max_blocks limit. | 
|  | 1689 | */ | 
|  | 1690 | if (segment_eq(get_fs(), KERNEL_DS)) | 
|  | 1691 | sgp = SGP_DIRTY; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1692 |  | 
|  | 1693 | index = *ppos >> PAGE_CACHE_SHIFT; | 
|  | 1694 | offset = *ppos & ~PAGE_CACHE_MASK; | 
|  | 1695 |  | 
|  | 1696 | for (;;) { | 
|  | 1697 | struct page *page = NULL; | 
|  | 1698 | unsigned long end_index, nr, ret; | 
|  | 1699 | loff_t i_size = i_size_read(inode); | 
|  | 1700 |  | 
|  | 1701 | end_index = i_size >> PAGE_CACHE_SHIFT; | 
|  | 1702 | if (index > end_index) | 
|  | 1703 | break; | 
|  | 1704 | if (index == end_index) { | 
|  | 1705 | nr = i_size & ~PAGE_CACHE_MASK; | 
|  | 1706 | if (nr <= offset) | 
|  | 1707 | break; | 
|  | 1708 | } | 
|  | 1709 |  | 
| Hugh Dickins | a0ee5ec | 2008-02-04 22:28:51 -0800 | [diff] [blame] | 1710 | desc->error = shmem_getpage(inode, index, &page, sgp, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1711 | if (desc->error) { | 
|  | 1712 | if (desc->error == -EINVAL) | 
|  | 1713 | desc->error = 0; | 
|  | 1714 | break; | 
|  | 1715 | } | 
| Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 1716 | if (page) | 
|  | 1717 | unlock_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1718 |  | 
|  | 1719 | /* | 
|  | 1720 | * We must evaluate after, since reads (unlike writes) | 
| Jes Sorensen | 1b1dcc1 | 2006-01-09 15:59:24 -0800 | [diff] [blame] | 1721 | * are called without i_mutex protection against truncate | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1722 | */ | 
|  | 1723 | nr = PAGE_CACHE_SIZE; | 
|  | 1724 | i_size = i_size_read(inode); | 
|  | 1725 | end_index = i_size >> PAGE_CACHE_SHIFT; | 
|  | 1726 | if (index == end_index) { | 
|  | 1727 | nr = i_size & ~PAGE_CACHE_MASK; | 
|  | 1728 | if (nr <= offset) { | 
|  | 1729 | if (page) | 
|  | 1730 | page_cache_release(page); | 
|  | 1731 | break; | 
|  | 1732 | } | 
|  | 1733 | } | 
|  | 1734 | nr -= offset; | 
|  | 1735 |  | 
|  | 1736 | if (page) { | 
|  | 1737 | /* | 
|  | 1738 | * If users can be writing to this page using arbitrary | 
|  | 1739 | * virtual addresses, take care about potential aliasing | 
|  | 1740 | * before reading the page on the kernel side. | 
|  | 1741 | */ | 
|  | 1742 | if (mapping_writably_mapped(mapping)) | 
|  | 1743 | flush_dcache_page(page); | 
|  | 1744 | /* | 
|  | 1745 | * Mark the page accessed if we read the beginning. | 
|  | 1746 | */ | 
|  | 1747 | if (!offset) | 
|  | 1748 | mark_page_accessed(page); | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1749 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | page = ZERO_PAGE(0); | 
| Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1751 | page_cache_get(page); | 
|  | 1752 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 |  | 
|  | 1754 | /* | 
|  | 1755 | * Ok, we have the page, and it's up-to-date, so | 
|  | 1756 | * now we can copy it to user space... | 
|  | 1757 | * | 
|  | 1758 | * The actor routine returns how many bytes were actually used.. | 
|  | 1759 | * NOTE! This may not be the same as how much of a user buffer | 
|  | 1760 | * we filled up (we may be padding etc), so we can only update | 
|  | 1761 | * "pos" here (the actor routine has to update the user buffer | 
|  | 1762 | * pointers and the remaining count). | 
|  | 1763 | */ | 
|  | 1764 | ret = actor(desc, page, offset, nr); | 
|  | 1765 | offset += ret; | 
|  | 1766 | index += offset >> PAGE_CACHE_SHIFT; | 
|  | 1767 | offset &= ~PAGE_CACHE_MASK; | 
|  | 1768 |  | 
|  | 1769 | page_cache_release(page); | 
|  | 1770 | if (ret != nr || !desc->count) | 
|  | 1771 | break; | 
|  | 1772 |  | 
|  | 1773 | cond_resched(); | 
|  | 1774 | } | 
|  | 1775 |  | 
|  | 1776 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | 
|  | 1777 | file_accessed(filp); | 
|  | 1778 | } | 
|  | 1779 |  | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1780 | static ssize_t shmem_file_aio_read(struct kiocb *iocb, | 
|  | 1781 | const struct iovec *iov, unsigned long nr_segs, loff_t pos) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1782 | { | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1783 | struct file *filp = iocb->ki_filp; | 
|  | 1784 | ssize_t retval; | 
|  | 1785 | unsigned long seg; | 
|  | 1786 | size_t count; | 
|  | 1787 | loff_t *ppos = &iocb->ki_pos; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1788 |  | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1789 | retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); | 
|  | 1790 | if (retval) | 
|  | 1791 | return retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1792 |  | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1793 | for (seg = 0; seg < nr_segs; seg++) { | 
|  | 1794 | read_descriptor_t desc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1795 |  | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 1796 | desc.written = 0; | 
|  | 1797 | desc.arg.buf = iov[seg].iov_base; | 
|  | 1798 | desc.count = iov[seg].iov_len; | 
|  | 1799 | if (desc.count == 0) | 
|  | 1800 | continue; | 
|  | 1801 | desc.error = 0; | 
|  | 1802 | do_shmem_file_read(filp, ppos, &desc, file_read_actor); | 
|  | 1803 | retval += desc.written; | 
|  | 1804 | if (desc.error) { | 
|  | 1805 | retval = retval ?: desc.error; | 
|  | 1806 | break; | 
|  | 1807 | } | 
|  | 1808 | if (desc.count > 0) | 
|  | 1809 | break; | 
|  | 1810 | } | 
|  | 1811 | return retval; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | } | 
|  | 1813 |  | 
| David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1814 | static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | { | 
| David Howells | 726c334 | 2006-06-23 02:02:58 -0700 | [diff] [blame] | 1816 | struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 |  | 
|  | 1818 | buf->f_type = TMPFS_MAGIC; | 
|  | 1819 | buf->f_bsize = PAGE_CACHE_SIZE; | 
|  | 1820 | buf->f_namelen = NAME_MAX; | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 1821 | if (sbinfo->max_blocks) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1822 | buf->f_blocks = sbinfo->max_blocks; | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 1823 | buf->f_bavail = buf->f_bfree = | 
|  | 1824 | sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks); | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 1825 | } | 
|  | 1826 | if (sbinfo->max_inodes) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | buf->f_files = sbinfo->max_inodes; | 
|  | 1828 | buf->f_ffree = sbinfo->free_inodes; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1829 | } | 
|  | 1830 | /* else leave those fields 0 like simple_statfs */ | 
|  | 1831 | return 0; | 
|  | 1832 | } | 
|  | 1833 |  | 
|  | 1834 | /* | 
|  | 1835 | * File creation. Allocate an inode, and we're done.. | 
|  | 1836 | */ | 
|  | 1837 | static int | 
|  | 1838 | shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | 
|  | 1839 | { | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 1840 | struct inode *inode; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1841 | int error = -ENOSPC; | 
|  | 1842 |  | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1843 | inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1844 | if (inode) { | 
| Eric Paris | 2a7dba3 | 2011-02-01 11:05:39 -0500 | [diff] [blame] | 1845 | error = security_inode_init_security(inode, dir, | 
|  | 1846 | &dentry->d_name, NULL, | 
|  | 1847 | NULL, NULL); | 
| Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 1848 | if (error) { | 
|  | 1849 | if (error != -EOPNOTSUPP) { | 
|  | 1850 | iput(inode); | 
|  | 1851 | return error; | 
|  | 1852 | } | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1853 | } | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 1854 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
|  | 1855 | error = generic_acl_init(inode, dir); | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 1856 | if (error) { | 
|  | 1857 | iput(inode); | 
|  | 1858 | return error; | 
| Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 1859 | } | 
| Al Viro | 718deb6 | 2009-12-16 19:35:36 -0500 | [diff] [blame] | 1860 | #else | 
|  | 1861 | error = 0; | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 1862 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | dir->i_size += BOGO_DIRENT_SIZE; | 
|  | 1864 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 
|  | 1865 | d_instantiate(dentry, inode); | 
|  | 1866 | dget(dentry); /* Extra count - pin the dentry in core */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1867 | } | 
|  | 1868 | return error; | 
|  | 1869 | } | 
|  | 1870 |  | 
|  | 1871 | static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 
|  | 1872 | { | 
|  | 1873 | int error; | 
|  | 1874 |  | 
|  | 1875 | if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) | 
|  | 1876 | return error; | 
| Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1877 | inc_nlink(dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1878 | return 0; | 
|  | 1879 | } | 
|  | 1880 |  | 
|  | 1881 | static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, | 
|  | 1882 | struct nameidata *nd) | 
|  | 1883 | { | 
|  | 1884 | return shmem_mknod(dir, dentry, mode | S_IFREG, 0); | 
|  | 1885 | } | 
|  | 1886 |  | 
|  | 1887 | /* | 
|  | 1888 | * Link a file.. | 
|  | 1889 | */ | 
|  | 1890 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) | 
|  | 1891 | { | 
|  | 1892 | struct inode *inode = old_dentry->d_inode; | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1893 | int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1894 |  | 
|  | 1895 | /* | 
|  | 1896 | * No ordinary (disk based) filesystem counts links as inodes; | 
|  | 1897 | * but each new link needs a new dentry, pinning lowmem, and | 
|  | 1898 | * tmpfs dentries cannot be pruned until they are unlinked. | 
|  | 1899 | */ | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1900 | ret = shmem_reserve_inode(inode->i_sb); | 
|  | 1901 | if (ret) | 
|  | 1902 | goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1903 |  | 
|  | 1904 | dir->i_size += BOGO_DIRENT_SIZE; | 
|  | 1905 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 
| Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1906 | inc_nlink(inode); | 
| Al Viro | 7de9c6e | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 1907 | ihold(inode);	/* New dentry reference */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1908 | dget(dentry);		/* Extra pinning count for the created dentry */ | 
|  | 1909 | d_instantiate(dentry, inode); | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1910 | out: | 
|  | 1911 | return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | } | 
|  | 1913 |  | 
|  | 1914 | static int shmem_unlink(struct inode *dir, struct dentry *dentry) | 
|  | 1915 | { | 
|  | 1916 | struct inode *inode = dentry->d_inode; | 
|  | 1917 |  | 
| Pavel Emelyanov | 5b04c68 | 2008-02-04 22:28:47 -0800 | [diff] [blame] | 1918 | if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) | 
|  | 1919 | shmem_free_inode(inode->i_sb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1920 |  | 
|  | 1921 | dir->i_size -= BOGO_DIRENT_SIZE; | 
|  | 1922 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 
| Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1923 | drop_nlink(inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 | dput(dentry);	/* Undo the count from "create" - this does all the work */ | 
|  | 1925 | return 0; | 
|  | 1926 | } | 
|  | 1927 |  | 
|  | 1928 | static int shmem_rmdir(struct inode *dir, struct dentry *dentry) | 
|  | 1929 | { | 
|  | 1930 | if (!simple_empty(dentry)) | 
|  | 1931 | return -ENOTEMPTY; | 
|  | 1932 |  | 
| Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1933 | drop_nlink(dentry->d_inode); | 
|  | 1934 | drop_nlink(dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1935 | return shmem_unlink(dir, dentry); | 
|  | 1936 | } | 
|  | 1937 |  | 
|  | 1938 | /* | 
|  | 1939 | * The VFS layer already does all the dentry stuff for rename, | 
|  | 1940 | * we just have to decrement the usage count for the target if | 
|  | 1941 | * it exists so that the VFS layer correctly free's it when it | 
|  | 1942 | * gets overwritten. | 
|  | 1943 | */ | 
|  | 1944 | static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) | 
|  | 1945 | { | 
|  | 1946 | struct inode *inode = old_dentry->d_inode; | 
|  | 1947 | int they_are_dirs = S_ISDIR(inode->i_mode); | 
|  | 1948 |  | 
|  | 1949 | if (!simple_empty(new_dentry)) | 
|  | 1950 | return -ENOTEMPTY; | 
|  | 1951 |  | 
|  | 1952 | if (new_dentry->d_inode) { | 
|  | 1953 | (void) shmem_unlink(new_dir, new_dentry); | 
|  | 1954 | if (they_are_dirs) | 
| Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1955 | drop_nlink(old_dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1956 | } else if (they_are_dirs) { | 
| Dave Hansen | 9a53c3a | 2006-09-30 23:29:03 -0700 | [diff] [blame] | 1957 | drop_nlink(old_dir); | 
| Dave Hansen | d8c76e6 | 2006-09-30 23:29:04 -0700 | [diff] [blame] | 1958 | inc_nlink(new_dir); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1959 | } | 
|  | 1960 |  | 
|  | 1961 | old_dir->i_size -= BOGO_DIRENT_SIZE; | 
|  | 1962 | new_dir->i_size += BOGO_DIRENT_SIZE; | 
|  | 1963 | old_dir->i_ctime = old_dir->i_mtime = | 
|  | 1964 | new_dir->i_ctime = new_dir->i_mtime = | 
|  | 1965 | inode->i_ctime = CURRENT_TIME; | 
|  | 1966 | return 0; | 
|  | 1967 | } | 
|  | 1968 |  | 
|  | 1969 | static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | 
|  | 1970 | { | 
|  | 1971 | int error; | 
|  | 1972 | int len; | 
|  | 1973 | struct inode *inode; | 
|  | 1974 | struct page *page = NULL; | 
|  | 1975 | char *kaddr; | 
|  | 1976 | struct shmem_inode_info *info; | 
|  | 1977 |  | 
|  | 1978 | len = strlen(symname) + 1; | 
|  | 1979 | if (len > PAGE_CACHE_SIZE) | 
|  | 1980 | return -ENAMETOOLONG; | 
|  | 1981 |  | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 1982 | inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 | if (!inode) | 
|  | 1984 | return -ENOSPC; | 
|  | 1985 |  | 
| Eric Paris | 2a7dba3 | 2011-02-01 11:05:39 -0500 | [diff] [blame] | 1986 | error = security_inode_init_security(inode, dir, &dentry->d_name, NULL, | 
|  | 1987 | NULL, NULL); | 
| Stephen Smalley | 570bc1c | 2005-09-09 13:01:43 -0700 | [diff] [blame] | 1988 | if (error) { | 
|  | 1989 | if (error != -EOPNOTSUPP) { | 
|  | 1990 | iput(inode); | 
|  | 1991 | return error; | 
|  | 1992 | } | 
|  | 1993 | error = 0; | 
|  | 1994 | } | 
|  | 1995 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1996 | info = SHMEM_I(inode); | 
|  | 1997 | inode->i_size = len-1; | 
|  | 1998 | if (len <= (char *)inode - (char *)info) { | 
|  | 1999 | /* do it inline */ | 
|  | 2000 | memcpy(info, symname, len); | 
|  | 2001 | inode->i_op = &shmem_symlink_inline_operations; | 
|  | 2002 | } else { | 
|  | 2003 | error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); | 
|  | 2004 | if (error) { | 
|  | 2005 | iput(inode); | 
|  | 2006 | return error; | 
|  | 2007 | } | 
| Hugh Dickins | 14fcc23 | 2008-07-28 15:46:19 -0700 | [diff] [blame] | 2008 | inode->i_mapping->a_ops = &shmem_aops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2009 | inode->i_op = &shmem_symlink_inode_operations; | 
|  | 2010 | kaddr = kmap_atomic(page, KM_USER0); | 
|  | 2011 | memcpy(kaddr, symname, len); | 
|  | 2012 | kunmap_atomic(kaddr, KM_USER0); | 
|  | 2013 | set_page_dirty(page); | 
| Wu Fengguang | 6746aff | 2009-09-16 11:50:14 +0200 | [diff] [blame] | 2014 | unlock_page(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2015 | page_cache_release(page); | 
|  | 2016 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2017 | dir->i_size += BOGO_DIRENT_SIZE; | 
|  | 2018 | dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 
|  | 2019 | d_instantiate(dentry, inode); | 
|  | 2020 | dget(dentry); | 
|  | 2021 | return 0; | 
|  | 2022 | } | 
|  | 2023 |  | 
| Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 2024 | static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | { | 
|  | 2026 | nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); | 
| Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 2027 | return NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2028 | } | 
|  | 2029 |  | 
| Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 2030 | static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2031 | { | 
|  | 2032 | struct page *page = NULL; | 
|  | 2033 | int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); | 
|  | 2034 | nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); | 
| Hugh Dickins | d360244 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2035 | if (page) | 
|  | 2036 | unlock_page(page); | 
| Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 2037 | return page; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2038 | } | 
|  | 2039 |  | 
| Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 2040 | static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2041 | { | 
|  | 2042 | if (!IS_ERR(nd_get_link(nd))) { | 
| Linus Torvalds | cc314ee | 2005-08-19 18:02:56 -0700 | [diff] [blame] | 2043 | struct page *page = cookie; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2044 | kunmap(page); | 
|  | 2045 | mark_page_accessed(page); | 
|  | 2046 | page_cache_release(page); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | } | 
|  | 2048 | } | 
|  | 2049 |  | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2050 | static const struct inode_operations shmem_symlink_inline_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2051 | .readlink	= generic_readlink, | 
|  | 2052 | .follow_link	= shmem_follow_link_inline, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2053 | }; | 
|  | 2054 |  | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2055 | static const struct inode_operations shmem_symlink_inode_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2056 | .readlink	= generic_readlink, | 
|  | 2057 | .follow_link	= shmem_follow_link, | 
|  | 2058 | .put_link	= shmem_put_link, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2059 | }; | 
|  | 2060 |  | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2061 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 2062 | /* | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2063 | * Superblocks without xattr inode operations will get security.* xattr | 
|  | 2064 | * support from the VFS "for free". As soon as we have any other xattrs | 
|  | 2065 | * like ACLs, we also need to implement the security.* handlers at | 
|  | 2066 | * filesystem level, though. | 
|  | 2067 | */ | 
|  | 2068 |  | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2069 | static size_t shmem_xattr_security_list(struct dentry *dentry, char *list, | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2070 | size_t list_len, const char *name, | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2071 | size_t name_len, int handler_flags) | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2072 | { | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2073 | return security_inode_listsecurity(dentry->d_inode, list, list_len); | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2074 | } | 
|  | 2075 |  | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2076 | static int shmem_xattr_security_get(struct dentry *dentry, const char *name, | 
|  | 2077 | void *buffer, size_t size, int handler_flags) | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2078 | { | 
|  | 2079 | if (strcmp(name, "") == 0) | 
|  | 2080 | return -EINVAL; | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2081 | return xattr_getsecurity(dentry->d_inode, name, buffer, size); | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2082 | } | 
|  | 2083 |  | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2084 | static int shmem_xattr_security_set(struct dentry *dentry, const char *name, | 
|  | 2085 | const void *value, size_t size, int flags, int handler_flags) | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2086 | { | 
|  | 2087 | if (strcmp(name, "") == 0) | 
|  | 2088 | return -EINVAL; | 
| Christoph Hellwig | 431547b | 2009-11-13 09:52:56 +0000 | [diff] [blame] | 2089 | return security_inode_setsecurity(dentry->d_inode, name, value, | 
|  | 2090 | size, flags); | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2091 | } | 
|  | 2092 |  | 
| Stephen Hemminger | bb43545 | 2010-05-13 17:53:14 -0700 | [diff] [blame] | 2093 | static const struct xattr_handler shmem_xattr_security_handler = { | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2094 | .prefix = XATTR_SECURITY_PREFIX, | 
|  | 2095 | .list   = shmem_xattr_security_list, | 
|  | 2096 | .get    = shmem_xattr_security_get, | 
|  | 2097 | .set    = shmem_xattr_security_set, | 
|  | 2098 | }; | 
|  | 2099 |  | 
| Stephen Hemminger | bb43545 | 2010-05-13 17:53:14 -0700 | [diff] [blame] | 2100 | static const struct xattr_handler *shmem_xattr_handlers[] = { | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 2101 | &generic_acl_access_handler, | 
|  | 2102 | &generic_acl_default_handler, | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2103 | &shmem_xattr_security_handler, | 
|  | 2104 | NULL | 
|  | 2105 | }; | 
|  | 2106 | #endif | 
|  | 2107 |  | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2108 | static struct dentry *shmem_get_parent(struct dentry *child) | 
|  | 2109 | { | 
|  | 2110 | return ERR_PTR(-ESTALE); | 
|  | 2111 | } | 
|  | 2112 |  | 
|  | 2113 | static int shmem_match(struct inode *ino, void *vfh) | 
|  | 2114 | { | 
|  | 2115 | __u32 *fh = vfh; | 
|  | 2116 | __u64 inum = fh[2]; | 
|  | 2117 | inum = (inum << 32) | fh[1]; | 
|  | 2118 | return ino->i_ino == inum && fh[0] == ino->i_generation; | 
|  | 2119 | } | 
|  | 2120 |  | 
| Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2121 | static struct dentry *shmem_fh_to_dentry(struct super_block *sb, | 
|  | 2122 | struct fid *fid, int fh_len, int fh_type) | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2123 | { | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2124 | struct inode *inode; | 
| Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2125 | struct dentry *dentry = NULL; | 
|  | 2126 | u64 inum = fid->raw[2]; | 
|  | 2127 | inum = (inum << 32) | fid->raw[1]; | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2128 |  | 
| Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2129 | if (fh_len < 3) | 
|  | 2130 | return NULL; | 
|  | 2131 |  | 
|  | 2132 | inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), | 
|  | 2133 | shmem_match, fid->raw); | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2134 | if (inode) { | 
| Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2135 | dentry = d_find_alias(inode); | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2136 | iput(inode); | 
|  | 2137 | } | 
|  | 2138 |  | 
| Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2139 | return dentry; | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2140 | } | 
|  | 2141 |  | 
|  | 2142 | static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, | 
|  | 2143 | int connectable) | 
|  | 2144 | { | 
|  | 2145 | struct inode *inode = dentry->d_inode; | 
|  | 2146 |  | 
| Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 2147 | if (*len < 3) { | 
|  | 2148 | *len = 3; | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2149 | return 255; | 
| Aneesh Kumar K.V | 5fe0c23 | 2011-01-29 18:43:25 +0530 | [diff] [blame] | 2150 | } | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2151 |  | 
| Al Viro | 1d3382c | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 2152 | if (inode_unhashed(inode)) { | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2153 | /* Unfortunately insert_inode_hash is not idempotent, | 
|  | 2154 | * so as we hash inodes here rather than at creation | 
|  | 2155 | * time, we need a lock to ensure we only try | 
|  | 2156 | * to do it once | 
|  | 2157 | */ | 
|  | 2158 | static DEFINE_SPINLOCK(lock); | 
|  | 2159 | spin_lock(&lock); | 
| Al Viro | 1d3382c | 2010-10-23 15:19:20 -0400 | [diff] [blame] | 2160 | if (inode_unhashed(inode)) | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2161 | __insert_inode_hash(inode, | 
|  | 2162 | inode->i_ino + inode->i_generation); | 
|  | 2163 | spin_unlock(&lock); | 
|  | 2164 | } | 
|  | 2165 |  | 
|  | 2166 | fh[0] = inode->i_generation; | 
|  | 2167 | fh[1] = inode->i_ino; | 
|  | 2168 | fh[2] = ((__u64)inode->i_ino) >> 32; | 
|  | 2169 |  | 
|  | 2170 | *len = 3; | 
|  | 2171 | return 1; | 
|  | 2172 | } | 
|  | 2173 |  | 
| Christoph Hellwig | 3965516 | 2007-10-21 16:42:17 -0700 | [diff] [blame] | 2174 | static const struct export_operations shmem_export_ops = { | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2175 | .get_parent     = shmem_get_parent, | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2176 | .encode_fh      = shmem_encode_fh, | 
| Christoph Hellwig | 480b116 | 2007-10-21 16:42:13 -0700 | [diff] [blame] | 2177 | .fh_to_dentry	= shmem_fh_to_dentry, | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2178 | }; | 
|  | 2179 |  | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2180 | static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, | 
|  | 2181 | bool remount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2182 | { | 
|  | 2183 | char *this_char, *value, *rest; | 
|  | 2184 |  | 
| Hugh Dickins | b00dc3a | 2006-02-21 23:49:47 +0000 | [diff] [blame] | 2185 | while (options != NULL) { | 
|  | 2186 | this_char = options; | 
|  | 2187 | for (;;) { | 
|  | 2188 | /* | 
|  | 2189 | * NUL-terminate this option: unfortunately, | 
|  | 2190 | * mount options form a comma-separated list, | 
|  | 2191 | * but mpol's nodelist may also contain commas. | 
|  | 2192 | */ | 
|  | 2193 | options = strchr(options, ','); | 
|  | 2194 | if (options == NULL) | 
|  | 2195 | break; | 
|  | 2196 | options++; | 
|  | 2197 | if (!isdigit(*options)) { | 
|  | 2198 | options[-1] = '\0'; | 
|  | 2199 | break; | 
|  | 2200 | } | 
|  | 2201 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2202 | if (!*this_char) | 
|  | 2203 | continue; | 
|  | 2204 | if ((value = strchr(this_char,'=')) != NULL) { | 
|  | 2205 | *value++ = 0; | 
|  | 2206 | } else { | 
|  | 2207 | printk(KERN_ERR | 
|  | 2208 | "tmpfs: No value for mount option '%s'\n", | 
|  | 2209 | this_char); | 
|  | 2210 | return 1; | 
|  | 2211 | } | 
|  | 2212 |  | 
|  | 2213 | if (!strcmp(this_char,"size")) { | 
|  | 2214 | unsigned long long size; | 
|  | 2215 | size = memparse(value,&rest); | 
|  | 2216 | if (*rest == '%') { | 
|  | 2217 | size <<= PAGE_SHIFT; | 
|  | 2218 | size *= totalram_pages; | 
|  | 2219 | do_div(size, 100); | 
|  | 2220 | rest++; | 
|  | 2221 | } | 
|  | 2222 | if (*rest) | 
|  | 2223 | goto bad_val; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2224 | sbinfo->max_blocks = | 
|  | 2225 | DIV_ROUND_UP(size, PAGE_CACHE_SIZE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2226 | } else if (!strcmp(this_char,"nr_blocks")) { | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2227 | sbinfo->max_blocks = memparse(value, &rest); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2228 | if (*rest) | 
|  | 2229 | goto bad_val; | 
|  | 2230 | } else if (!strcmp(this_char,"nr_inodes")) { | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2231 | sbinfo->max_inodes = memparse(value, &rest); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2232 | if (*rest) | 
|  | 2233 | goto bad_val; | 
|  | 2234 | } else if (!strcmp(this_char,"mode")) { | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2235 | if (remount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2236 | continue; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2237 | sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2238 | if (*rest) | 
|  | 2239 | goto bad_val; | 
|  | 2240 | } else if (!strcmp(this_char,"uid")) { | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2241 | if (remount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2242 | continue; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2243 | sbinfo->uid = simple_strtoul(value, &rest, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2244 | if (*rest) | 
|  | 2245 | goto bad_val; | 
|  | 2246 | } else if (!strcmp(this_char,"gid")) { | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2247 | if (remount) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2248 | continue; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2249 | sbinfo->gid = simple_strtoul(value, &rest, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2250 | if (*rest) | 
|  | 2251 | goto bad_val; | 
| Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 2252 | } else if (!strcmp(this_char,"mpol")) { | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2253 | if (mpol_parse_str(value, &sbinfo->mpol, 1)) | 
| Robin Holt | 7339ff8 | 2006-01-14 13:20:48 -0800 | [diff] [blame] | 2254 | goto bad_val; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2255 | } else { | 
|  | 2256 | printk(KERN_ERR "tmpfs: Bad mount option %s\n", | 
|  | 2257 | this_char); | 
|  | 2258 | return 1; | 
|  | 2259 | } | 
|  | 2260 | } | 
|  | 2261 | return 0; | 
|  | 2262 |  | 
|  | 2263 | bad_val: | 
|  | 2264 | printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", | 
|  | 2265 | value, this_char); | 
|  | 2266 | return 1; | 
|  | 2267 |  | 
|  | 2268 | } | 
|  | 2269 |  | 
|  | 2270 | static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) | 
|  | 2271 | { | 
|  | 2272 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2273 | struct shmem_sb_info config = *sbinfo; | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2274 | unsigned long inodes; | 
|  | 2275 | int error = -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2276 |  | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2277 | if (shmem_parse_options(data, &config, true)) | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2278 | return error; | 
|  | 2279 |  | 
|  | 2280 | spin_lock(&sbinfo->stat_lock); | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2281 | inodes = sbinfo->max_inodes - sbinfo->free_inodes; | 
| Tim Chen | 7e49629 | 2010-08-09 17:19:05 -0700 | [diff] [blame] | 2282 | if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2283 | goto out; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2284 | if (config.max_inodes < inodes) | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2285 | goto out; | 
|  | 2286 | /* | 
|  | 2287 | * Those tests also disallow limited->unlimited while any are in | 
|  | 2288 | * use, so i_blocks will always be zero when max_blocks is zero; | 
|  | 2289 | * but we must separately disallow unlimited->limited, because | 
|  | 2290 | * in that case we have no record of how much is already in use. | 
|  | 2291 | */ | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2292 | if (config.max_blocks && !sbinfo->max_blocks) | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2293 | goto out; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2294 | if (config.max_inodes && !sbinfo->max_inodes) | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2295 | goto out; | 
|  | 2296 |  | 
|  | 2297 | error = 0; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2298 | sbinfo->max_blocks  = config.max_blocks; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2299 | sbinfo->max_inodes  = config.max_inodes; | 
|  | 2300 | sbinfo->free_inodes = config.max_inodes - inodes; | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2301 |  | 
|  | 2302 | mpol_put(sbinfo->mpol); | 
|  | 2303 | sbinfo->mpol        = config.mpol;	/* transfers initial ref */ | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2304 | out: | 
|  | 2305 | spin_unlock(&sbinfo->stat_lock); | 
|  | 2306 | return error; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2307 | } | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2308 |  | 
|  | 2309 | static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) | 
|  | 2310 | { | 
|  | 2311 | struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); | 
|  | 2312 |  | 
|  | 2313 | if (sbinfo->max_blocks != shmem_default_max_blocks()) | 
|  | 2314 | seq_printf(seq, ",size=%luk", | 
|  | 2315 | sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); | 
|  | 2316 | if (sbinfo->max_inodes != shmem_default_max_inodes()) | 
|  | 2317 | seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); | 
|  | 2318 | if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) | 
|  | 2319 | seq_printf(seq, ",mode=%03o", sbinfo->mode); | 
|  | 2320 | if (sbinfo->uid != 0) | 
|  | 2321 | seq_printf(seq, ",uid=%u", sbinfo->uid); | 
|  | 2322 | if (sbinfo->gid != 0) | 
|  | 2323 | seq_printf(seq, ",gid=%u", sbinfo->gid); | 
| Lee Schermerhorn | 71fe804 | 2008-04-28 02:13:26 -0700 | [diff] [blame] | 2324 | shmem_show_mpol(seq, sbinfo->mpol); | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2325 | return 0; | 
|  | 2326 | } | 
|  | 2327 | #endif /* CONFIG_TMPFS */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2328 |  | 
|  | 2329 | static void shmem_put_super(struct super_block *sb) | 
|  | 2330 | { | 
| Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 2331 | struct shmem_sb_info *sbinfo = SHMEM_SB(sb); | 
|  | 2332 |  | 
|  | 2333 | percpu_counter_destroy(&sbinfo->used_blocks); | 
|  | 2334 | kfree(sbinfo); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2335 | sb->s_fs_info = NULL; | 
|  | 2336 | } | 
|  | 2337 |  | 
| Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 2338 | int shmem_fill_super(struct super_block *sb, void *data, int silent) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2339 | { | 
|  | 2340 | struct inode *inode; | 
|  | 2341 | struct dentry *root; | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2342 | struct shmem_sb_info *sbinfo; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2343 | int err = -ENOMEM; | 
|  | 2344 |  | 
|  | 2345 | /* Round up to L1_CACHE_BYTES to resist false sharing */ | 
| Pekka Enberg | 425fbf0 | 2009-09-21 17:03:50 -0700 | [diff] [blame] | 2346 | sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2347 | L1_CACHE_BYTES), GFP_KERNEL); | 
|  | 2348 | if (!sbinfo) | 
|  | 2349 | return -ENOMEM; | 
|  | 2350 |  | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2351 | sbinfo->mode = S_IRWXUGO | S_ISVTX; | 
| David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 2352 | sbinfo->uid = current_fsuid(); | 
|  | 2353 | sbinfo->gid = current_fsgid(); | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2354 | sb->s_fs_info = sbinfo; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2355 |  | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2356 | #ifdef CONFIG_TMPFS | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2357 | /* | 
|  | 2358 | * Per default we only allow half of the physical ram per | 
|  | 2359 | * tmpfs instance, limiting inodes to one per page of lowmem; | 
|  | 2360 | * but the internal instance is left unlimited. | 
|  | 2361 | */ | 
|  | 2362 | if (!(sb->s_flags & MS_NOUSER)) { | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2363 | sbinfo->max_blocks = shmem_default_max_blocks(); | 
|  | 2364 | sbinfo->max_inodes = shmem_default_max_inodes(); | 
|  | 2365 | if (shmem_parse_options(data, sbinfo, false)) { | 
|  | 2366 | err = -EINVAL; | 
|  | 2367 | goto failed; | 
|  | 2368 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2369 | } | 
| David M. Grimes | 91828a4 | 2006-10-17 00:09:45 -0700 | [diff] [blame] | 2370 | sb->s_export_op = &shmem_export_ops; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2371 | #else | 
|  | 2372 | sb->s_flags |= MS_NOUSER; | 
|  | 2373 | #endif | 
|  | 2374 |  | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2375 | spin_lock_init(&sbinfo->stat_lock); | 
| Hugh Dickins | 602586a | 2010-08-17 15:23:56 -0700 | [diff] [blame] | 2376 | if (percpu_counter_init(&sbinfo->used_blocks, 0)) | 
|  | 2377 | goto failed; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2378 | sbinfo->free_inodes = sbinfo->max_inodes; | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2379 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2380 | sb->s_maxbytes = SHMEM_MAX_BYTES; | 
|  | 2381 | sb->s_blocksize = PAGE_CACHE_SIZE; | 
|  | 2382 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 
|  | 2383 | sb->s_magic = TMPFS_MAGIC; | 
|  | 2384 | sb->s_op = &shmem_ops; | 
| Robin H. Johnson | cfd95a9 | 2006-06-12 21:50:25 +0100 | [diff] [blame] | 2385 | sb->s_time_gran = 1; | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2386 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
|  | 2387 | sb->s_xattr = shmem_xattr_handlers; | 
|  | 2388 | sb->s_flags |= MS_POSIXACL; | 
|  | 2389 | #endif | 
| Hugh Dickins | 0edd73b | 2005-06-21 17:15:04 -0700 | [diff] [blame] | 2390 |  | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2391 | inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2392 | if (!inode) | 
|  | 2393 | goto failed; | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2394 | inode->i_uid = sbinfo->uid; | 
|  | 2395 | inode->i_gid = sbinfo->gid; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2396 | root = d_alloc_root(inode); | 
|  | 2397 | if (!root) | 
|  | 2398 | goto failed_iput; | 
|  | 2399 | sb->s_root = root; | 
|  | 2400 | return 0; | 
|  | 2401 |  | 
|  | 2402 | failed_iput: | 
|  | 2403 | iput(inode); | 
|  | 2404 | failed: | 
|  | 2405 | shmem_put_super(sb); | 
|  | 2406 | return err; | 
|  | 2407 | } | 
|  | 2408 |  | 
| Pekka Enberg | fcc234f | 2006-03-22 00:08:13 -0800 | [diff] [blame] | 2409 | static struct kmem_cache *shmem_inode_cachep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2410 |  | 
|  | 2411 | static struct inode *shmem_alloc_inode(struct super_block *sb) | 
|  | 2412 | { | 
|  | 2413 | struct shmem_inode_info *p; | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 2414 | p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2415 | if (!p) | 
|  | 2416 | return NULL; | 
|  | 2417 | return &p->vfs_inode; | 
|  | 2418 | } | 
|  | 2419 |  | 
| Nick Piggin | fa0d7e3 | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 2420 | static void shmem_i_callback(struct rcu_head *head) | 
|  | 2421 | { | 
|  | 2422 | struct inode *inode = container_of(head, struct inode, i_rcu); | 
|  | 2423 | INIT_LIST_HEAD(&inode->i_dentry); | 
|  | 2424 | kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); | 
|  | 2425 | } | 
|  | 2426 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2427 | static void shmem_destroy_inode(struct inode *inode) | 
|  | 2428 | { | 
|  | 2429 | if ((inode->i_mode & S_IFMT) == S_IFREG) { | 
|  | 2430 | /* only struct inode is valid if it's an inline symlink */ | 
|  | 2431 | mpol_free_shared_policy(&SHMEM_I(inode)->policy); | 
|  | 2432 | } | 
| Nick Piggin | fa0d7e3 | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 2433 | call_rcu(&inode->i_rcu, shmem_i_callback); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2434 | } | 
|  | 2435 |  | 
| Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 2436 | static void init_once(void *foo) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2437 | { | 
|  | 2438 | struct shmem_inode_info *p = (struct shmem_inode_info *) foo; | 
|  | 2439 |  | 
| Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 2440 | inode_init_once(&p->vfs_inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2441 | } | 
|  | 2442 |  | 
|  | 2443 | static int init_inodecache(void) | 
|  | 2444 | { | 
|  | 2445 | shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", | 
|  | 2446 | sizeof(struct shmem_inode_info), | 
| Alexey Dobriyan | 040b5c6 | 2007-10-16 23:26:10 -0700 | [diff] [blame] | 2447 | 0, SLAB_PANIC, init_once); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2448 | return 0; | 
|  | 2449 | } | 
|  | 2450 |  | 
|  | 2451 | static void destroy_inodecache(void) | 
|  | 2452 | { | 
| Alexey Dobriyan | 1a1d92c | 2006-09-27 01:49:40 -0700 | [diff] [blame] | 2453 | kmem_cache_destroy(shmem_inode_cachep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2454 | } | 
|  | 2455 |  | 
| Christoph Hellwig | f5e54d6 | 2006-06-28 04:26:44 -0700 | [diff] [blame] | 2456 | static const struct address_space_operations shmem_aops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2457 | .writepage	= shmem_writepage, | 
| Ken Chen | 7671932 | 2007-02-10 01:43:15 -0800 | [diff] [blame] | 2458 | .set_page_dirty	= __set_page_dirty_no_writeback, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2459 | #ifdef CONFIG_TMPFS | 
| Hugh Dickins | ae97641 | 2007-06-04 10:00:39 +0200 | [diff] [blame] | 2460 | .readpage	= shmem_readpage, | 
| Nick Piggin | 800d15a | 2007-10-16 01:25:03 -0700 | [diff] [blame] | 2461 | .write_begin	= shmem_write_begin, | 
|  | 2462 | .write_end	= shmem_write_end, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2463 | #endif | 
| Lee Schermerhorn | 304dbdb | 2006-04-22 02:35:48 -0700 | [diff] [blame] | 2464 | .migratepage	= migrate_page, | 
| Andi Kleen | aa261f5 | 2009-09-16 11:50:16 +0200 | [diff] [blame] | 2465 | .error_remove_page = generic_error_remove_page, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2466 | }; | 
|  | 2467 |  | 
| Helge Deller | 15ad7cd | 2006-12-06 20:40:36 -0800 | [diff] [blame] | 2468 | static const struct file_operations shmem_file_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2469 | .mmap		= shmem_mmap, | 
|  | 2470 | #ifdef CONFIG_TMPFS | 
|  | 2471 | .llseek		= generic_file_llseek, | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 2472 | .read		= do_sync_read, | 
| Hugh Dickins | 5402b97 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2473 | .write		= do_sync_write, | 
| Hugh Dickins | bcd78e4 | 2008-07-23 21:27:35 -0700 | [diff] [blame] | 2474 | .aio_read	= shmem_file_aio_read, | 
| Hugh Dickins | 5402b97 | 2008-02-04 22:28:44 -0800 | [diff] [blame] | 2475 | .aio_write	= generic_file_aio_write, | 
| Christoph Hellwig | 1b061d9 | 2010-05-26 17:53:41 +0200 | [diff] [blame] | 2476 | .fsync		= noop_fsync, | 
| Hugh Dickins | ae97641 | 2007-06-04 10:00:39 +0200 | [diff] [blame] | 2477 | .splice_read	= generic_file_splice_read, | 
|  | 2478 | .splice_write	= generic_file_splice_write, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2479 | #endif | 
|  | 2480 | }; | 
|  | 2481 |  | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2482 | static const struct inode_operations shmem_inode_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2483 | .setattr	= shmem_notify_change, | 
| Badari Pulavarty | f6b3ec2 | 2006-01-06 00:10:38 -0800 | [diff] [blame] | 2484 | .truncate_range	= shmem_truncate_range, | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2485 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
|  | 2486 | .setxattr	= generic_setxattr, | 
|  | 2487 | .getxattr	= generic_getxattr, | 
|  | 2488 | .listxattr	= generic_listxattr, | 
|  | 2489 | .removexattr	= generic_removexattr, | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 2490 | .check_acl	= generic_check_acl, | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2491 | #endif | 
|  | 2492 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | }; | 
|  | 2494 |  | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2495 | static const struct inode_operations shmem_dir_inode_operations = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2496 | #ifdef CONFIG_TMPFS | 
|  | 2497 | .create		= shmem_create, | 
|  | 2498 | .lookup		= simple_lookup, | 
|  | 2499 | .link		= shmem_link, | 
|  | 2500 | .unlink		= shmem_unlink, | 
|  | 2501 | .symlink	= shmem_symlink, | 
|  | 2502 | .mkdir		= shmem_mkdir, | 
|  | 2503 | .rmdir		= shmem_rmdir, | 
|  | 2504 | .mknod		= shmem_mknod, | 
|  | 2505 | .rename		= shmem_rename, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2506 | #endif | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2507 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
|  | 2508 | .setattr	= shmem_notify_change, | 
|  | 2509 | .setxattr	= generic_setxattr, | 
|  | 2510 | .getxattr	= generic_getxattr, | 
|  | 2511 | .listxattr	= generic_listxattr, | 
|  | 2512 | .removexattr	= generic_removexattr, | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 2513 | .check_acl	= generic_check_acl, | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2514 | #endif | 
|  | 2515 | }; | 
|  | 2516 |  | 
| Arjan van de Ven | 92e1d5b | 2007-02-12 00:55:39 -0800 | [diff] [blame] | 2517 | static const struct inode_operations shmem_special_inode_operations = { | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2518 | #ifdef CONFIG_TMPFS_POSIX_ACL | 
|  | 2519 | .setattr	= shmem_notify_change, | 
|  | 2520 | .setxattr	= generic_setxattr, | 
|  | 2521 | .getxattr	= generic_getxattr, | 
|  | 2522 | .listxattr	= generic_listxattr, | 
|  | 2523 | .removexattr	= generic_removexattr, | 
| Christoph Hellwig | 1c7c474 | 2009-11-03 16:44:44 +0100 | [diff] [blame] | 2524 | .check_acl	= generic_check_acl, | 
| Andreas Gruenbacher | 39f0247 | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 2525 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2526 | }; | 
|  | 2527 |  | 
| Hugh Dickins | 759b977 | 2007-03-05 00:30:28 -0800 | [diff] [blame] | 2528 | static const struct super_operations shmem_ops = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2529 | .alloc_inode	= shmem_alloc_inode, | 
|  | 2530 | .destroy_inode	= shmem_destroy_inode, | 
|  | 2531 | #ifdef CONFIG_TMPFS | 
|  | 2532 | .statfs		= shmem_statfs, | 
|  | 2533 | .remount_fs	= shmem_remount_fs, | 
| akpm@linux-foundation.org | 680d794 | 2008-02-08 04:21:48 -0800 | [diff] [blame] | 2534 | .show_options	= shmem_show_options, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2535 | #endif | 
| Al Viro | 1f895f7 | 2010-06-05 19:10:41 -0400 | [diff] [blame] | 2536 | .evict_inode	= shmem_evict_inode, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2537 | .drop_inode	= generic_delete_inode, | 
|  | 2538 | .put_super	= shmem_put_super, | 
|  | 2539 | }; | 
|  | 2540 |  | 
| Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 2541 | static const struct vm_operations_struct shmem_vm_ops = { | 
| Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 2542 | .fault		= shmem_fault, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2543 | #ifdef CONFIG_NUMA | 
|  | 2544 | .set_policy     = shmem_set_policy, | 
|  | 2545 | .get_policy     = shmem_get_policy, | 
|  | 2546 | #endif | 
|  | 2547 | }; | 
|  | 2548 |  | 
|  | 2549 |  | 
| Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2550 | static struct dentry *shmem_mount(struct file_system_type *fs_type, | 
|  | 2551 | int flags, const char *dev_name, void *data) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2552 | { | 
| Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2553 | return mount_nodev(fs_type, flags, data, shmem_fill_super); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2554 | } | 
|  | 2555 |  | 
|  | 2556 | static struct file_system_type tmpfs_fs_type = { | 
|  | 2557 | .owner		= THIS_MODULE, | 
|  | 2558 | .name		= "tmpfs", | 
| Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2559 | .mount		= shmem_mount, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2560 | .kill_sb	= kill_litter_super, | 
|  | 2561 | }; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2562 |  | 
| Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 2563 | int __init init_tmpfs(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2564 | { | 
|  | 2565 | int error; | 
|  | 2566 |  | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 2567 | error = bdi_init(&shmem_backing_dev_info); | 
|  | 2568 | if (error) | 
|  | 2569 | goto out4; | 
|  | 2570 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2571 | error = init_inodecache(); | 
|  | 2572 | if (error) | 
|  | 2573 | goto out3; | 
|  | 2574 |  | 
|  | 2575 | error = register_filesystem(&tmpfs_fs_type); | 
|  | 2576 | if (error) { | 
|  | 2577 | printk(KERN_ERR "Could not register tmpfs\n"); | 
|  | 2578 | goto out2; | 
|  | 2579 | } | 
| Greg Kroah-Hartman | 95dc112 | 2005-06-20 21:15:16 -0700 | [diff] [blame] | 2580 |  | 
| Trond Myklebust | 1f5ce9e | 2006-06-09 09:34:16 -0400 | [diff] [blame] | 2581 | shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2582 | tmpfs_fs_type.name, NULL); | 
|  | 2583 | if (IS_ERR(shm_mnt)) { | 
|  | 2584 | error = PTR_ERR(shm_mnt); | 
|  | 2585 | printk(KERN_ERR "Could not kern_mount tmpfs\n"); | 
|  | 2586 | goto out1; | 
|  | 2587 | } | 
|  | 2588 | return 0; | 
|  | 2589 |  | 
|  | 2590 | out1: | 
|  | 2591 | unregister_filesystem(&tmpfs_fs_type); | 
|  | 2592 | out2: | 
|  | 2593 | destroy_inodecache(); | 
|  | 2594 | out3: | 
| Peter Zijlstra | e0bf68d | 2007-10-16 23:25:46 -0700 | [diff] [blame] | 2595 | bdi_destroy(&shmem_backing_dev_info); | 
|  | 2596 | out4: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2597 | shm_mnt = ERR_PTR(error); | 
|  | 2598 | return error; | 
|  | 2599 | } | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2600 |  | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2601 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
|  | 2602 | /** | 
|  | 2603 | * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file | 
|  | 2604 | * @inode: the inode to be searched | 
|  | 2605 | * @pgoff: the offset to be searched | 
|  | 2606 | * @pagep: the pointer for the found page to be stored | 
|  | 2607 | * @ent: the pointer for the found swap entry to be stored | 
|  | 2608 | * | 
|  | 2609 | * If a page is found, refcount of it is incremented. Callers should handle | 
|  | 2610 | * these refcount. | 
|  | 2611 | */ | 
|  | 2612 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | 
|  | 2613 | struct page **pagep, swp_entry_t *ent) | 
|  | 2614 | { | 
|  | 2615 | swp_entry_t entry = { .val = 0 }, *ptr; | 
|  | 2616 | struct page *page = NULL; | 
|  | 2617 | struct shmem_inode_info *info = SHMEM_I(inode); | 
|  | 2618 |  | 
|  | 2619 | if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 
|  | 2620 | goto out; | 
|  | 2621 |  | 
|  | 2622 | spin_lock(&info->lock); | 
|  | 2623 | ptr = shmem_swp_entry(info, pgoff, NULL); | 
|  | 2624 | #ifdef CONFIG_SWAP | 
|  | 2625 | if (ptr && ptr->val) { | 
|  | 2626 | entry.val = ptr->val; | 
|  | 2627 | page = find_get_page(&swapper_space, entry.val); | 
|  | 2628 | } else | 
|  | 2629 | #endif | 
|  | 2630 | page = find_get_page(inode->i_mapping, pgoff); | 
|  | 2631 | if (ptr) | 
|  | 2632 | shmem_swp_unmap(ptr); | 
|  | 2633 | spin_unlock(&info->lock); | 
|  | 2634 | out: | 
|  | 2635 | *pagep = page; | 
|  | 2636 | *ent = entry; | 
|  | 2637 | } | 
|  | 2638 | #endif | 
|  | 2639 |  | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2640 | #else /* !CONFIG_SHMEM */ | 
|  | 2641 |  | 
|  | 2642 | /* | 
|  | 2643 | * tiny-shmem: simple shmemfs and tmpfs using ramfs code | 
|  | 2644 | * | 
|  | 2645 | * This is intended for small system where the benefits of the full | 
|  | 2646 | * shmem code (swap-backed and resource-limited) are outweighed by | 
|  | 2647 | * their complexity. On systems without swap this code should be | 
|  | 2648 | * effectively equivalent, but much lighter weight. | 
|  | 2649 | */ | 
|  | 2650 |  | 
|  | 2651 | #include <linux/ramfs.h> | 
|  | 2652 |  | 
|  | 2653 | static struct file_system_type tmpfs_fs_type = { | 
|  | 2654 | .name		= "tmpfs", | 
| Al Viro | 3c26ff6 | 2010-07-25 11:46:36 +0400 | [diff] [blame] | 2655 | .mount		= ramfs_mount, | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2656 | .kill_sb	= kill_litter_super, | 
|  | 2657 | }; | 
|  | 2658 |  | 
| Kay Sievers | 2b2af54 | 2009-04-30 15:23:42 +0200 | [diff] [blame] | 2659 | int __init init_tmpfs(void) | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2660 | { | 
|  | 2661 | BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); | 
|  | 2662 |  | 
|  | 2663 | shm_mnt = kern_mount(&tmpfs_fs_type); | 
|  | 2664 | BUG_ON(IS_ERR(shm_mnt)); | 
|  | 2665 |  | 
|  | 2666 | return 0; | 
|  | 2667 | } | 
|  | 2668 |  | 
|  | 2669 | int shmem_unuse(swp_entry_t entry, struct page *page) | 
|  | 2670 | { | 
|  | 2671 | return 0; | 
|  | 2672 | } | 
|  | 2673 |  | 
| Hugh Dickins | 3f96b79 | 2009-09-21 17:03:37 -0700 | [diff] [blame] | 2674 | int shmem_lock(struct file *file, int lock, struct user_struct *user) | 
|  | 2675 | { | 
|  | 2676 | return 0; | 
|  | 2677 | } | 
|  | 2678 |  | 
| Daisuke Nishimura | 87946a7 | 2010-05-26 14:42:39 -0700 | [diff] [blame] | 2679 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 
|  | 2680 | /** | 
|  | 2681 | * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file | 
|  | 2682 | * @inode: the inode to be searched | 
|  | 2683 | * @pgoff: the offset to be searched | 
|  | 2684 | * @pagep: the pointer for the found page to be stored | 
|  | 2685 | * @ent: the pointer for the found swap entry to be stored | 
|  | 2686 | * | 
|  | 2687 | * If a page is found, refcount of it is incremented. Callers should handle | 
|  | 2688 | * these refcount. | 
|  | 2689 | */ | 
|  | 2690 | void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff, | 
|  | 2691 | struct page **pagep, swp_entry_t *ent) | 
|  | 2692 | { | 
|  | 2693 | struct page *page = NULL; | 
|  | 2694 |  | 
|  | 2695 | if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) | 
|  | 2696 | goto out; | 
|  | 2697 | page = find_get_page(inode->i_mapping, pgoff); | 
|  | 2698 | out: | 
|  | 2699 | *pagep = page; | 
|  | 2700 | *ent = (swp_entry_t){ .val = 0 }; | 
|  | 2701 | } | 
|  | 2702 | #endif | 
|  | 2703 |  | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2704 | #define shmem_vm_ops				generic_file_vm_ops | 
|  | 2705 | #define shmem_file_operations			ramfs_file_operations | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2706 | #define shmem_get_inode(sb, dir, mode, dev, flags)	ramfs_get_inode(sb, dir, mode, dev) | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2707 | #define shmem_acct_size(flags, size)		0 | 
|  | 2708 | #define shmem_unacct_size(flags, size)		do {} while (0) | 
| Hugh Dickins | caefba1 | 2009-04-13 14:40:12 -0700 | [diff] [blame] | 2709 | #define SHMEM_MAX_BYTES				MAX_LFS_FILESIZE | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2710 |  | 
|  | 2711 | #endif /* CONFIG_SHMEM */ | 
|  | 2712 |  | 
|  | 2713 | /* common code */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2714 |  | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 2715 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2716 | * shmem_file_setup - get an unlinked file living in tmpfs | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2717 | * @name: name for dentry (to be seen in /proc/<pid>/maps | 
|  | 2718 | * @size: size to be set for the file | 
| Hugh Dickins | 0b0a080 | 2009-02-24 20:51:52 +0000 | [diff] [blame] | 2719 | * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2720 | */ | 
| Sergei Trofimovich | 168f5ac | 2009-06-16 15:33:02 -0700 | [diff] [blame] | 2721 | struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2722 | { | 
|  | 2723 | int error; | 
|  | 2724 | struct file *file; | 
|  | 2725 | struct inode *inode; | 
| Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2726 | struct path path; | 
|  | 2727 | struct dentry *root; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2728 | struct qstr this; | 
|  | 2729 |  | 
|  | 2730 | if (IS_ERR(shm_mnt)) | 
|  | 2731 | return (void *)shm_mnt; | 
|  | 2732 |  | 
|  | 2733 | if (size < 0 || size > SHMEM_MAX_BYTES) | 
|  | 2734 | return ERR_PTR(-EINVAL); | 
|  | 2735 |  | 
|  | 2736 | if (shmem_acct_size(flags, size)) | 
|  | 2737 | return ERR_PTR(-ENOMEM); | 
|  | 2738 |  | 
|  | 2739 | error = -ENOMEM; | 
|  | 2740 | this.name = name; | 
|  | 2741 | this.len = strlen(name); | 
|  | 2742 | this.hash = 0; /* will go */ | 
|  | 2743 | root = shm_mnt->mnt_root; | 
| Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2744 | path.dentry = d_alloc(root, &this); | 
|  | 2745 | if (!path.dentry) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2746 | goto put_memory; | 
| Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2747 | path.mnt = mntget(shm_mnt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2748 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2749 | error = -ENOSPC; | 
| Dmitry Monakhov | 454abaf | 2010-03-04 17:32:18 +0300 | [diff] [blame] | 2750 | inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2751 | if (!inode) | 
| Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2752 | goto put_dentry; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2753 |  | 
| Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2754 | d_instantiate(path.dentry, inode); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2755 | inode->i_size = size; | 
|  | 2756 | inode->i_nlink = 0;	/* It is unlinked */ | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2757 | #ifndef CONFIG_MMU | 
|  | 2758 | error = ramfs_nommu_expand_for_mapping(inode, size); | 
|  | 2759 | if (error) | 
| Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2760 | goto put_dentry; | 
| Matt Mackall | 853ac43 | 2009-01-06 14:40:20 -0800 | [diff] [blame] | 2761 | #endif | 
| Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2762 |  | 
|  | 2763 | error = -ENFILE; | 
| Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2764 | file = alloc_file(&path, FMODE_WRITE | FMODE_READ, | 
| Al Viro | 4b42af8 | 2009-08-05 18:25:56 +0400 | [diff] [blame] | 2765 | &shmem_file_operations); | 
|  | 2766 | if (!file) | 
|  | 2767 | goto put_dentry; | 
|  | 2768 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2769 | return file; | 
|  | 2770 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2771 | put_dentry: | 
| Al Viro | 2c48b9c | 2009-08-09 00:52:35 +0400 | [diff] [blame] | 2772 | path_put(&path); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2773 | put_memory: | 
|  | 2774 | shmem_unacct_size(flags, size); | 
|  | 2775 | return ERR_PTR(error); | 
|  | 2776 | } | 
| Keith Packard | 395e0dd | 2008-06-20 00:08:06 -0700 | [diff] [blame] | 2777 | EXPORT_SYMBOL_GPL(shmem_file_setup); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2778 |  | 
| Randy Dunlap | 4671181 | 2008-03-19 17:00:41 -0700 | [diff] [blame] | 2779 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2780 | * shmem_zero_setup - setup a shared anonymous mapping | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2781 | * @vma: the vma to be mmapped is prepared by do_mmap_pgoff | 
|  | 2782 | */ | 
|  | 2783 | int shmem_zero_setup(struct vm_area_struct *vma) | 
|  | 2784 | { | 
|  | 2785 | struct file *file; | 
|  | 2786 | loff_t size = vma->vm_end - vma->vm_start; | 
|  | 2787 |  | 
|  | 2788 | file = shmem_file_setup("dev/zero", size, vma->vm_flags); | 
|  | 2789 | if (IS_ERR(file)) | 
|  | 2790 | return PTR_ERR(file); | 
|  | 2791 |  | 
|  | 2792 | if (vma->vm_file) | 
|  | 2793 | fput(vma->vm_file); | 
|  | 2794 | vma->vm_file = file; | 
|  | 2795 | vma->vm_ops = &shmem_vm_ops; | 
| Hugh Dickins | bee4c36 | 2011-03-22 16:33:43 -0700 | [diff] [blame^] | 2796 | vma->vm_flags |= VM_CAN_NONLINEAR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2797 | return 0; | 
|  | 2798 | } |