Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/file_table.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) |
| 6 | */ |
| 7 | |
| 8 | #include <linux/string.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/file.h> |
Al Viro | 9f3acc3 | 2008-04-24 07:44:08 -0400 | [diff] [blame] | 11 | #include <linux/fdtable.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/init.h> |
| 13 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/fs.h> |
| 15 | #include <linux/security.h> |
| 16 | #include <linux/eventpoll.h> |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 17 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/mount.h> |
Randy Dunlap | 16f7e0f | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 19 | #include <linux/capability.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/cdev.h> |
Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 21 | #include <linux/fsnotify.h> |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 22 | #include <linux/sysctl.h> |
| 23 | #include <linux/percpu_counter.h> |
| 24 | |
| 25 | #include <asm/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
| 27 | /* sysctl tunables... */ |
| 28 | struct files_stat_struct files_stat = { |
| 29 | .max_files = NR_FILE |
| 30 | }; |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | /* public. Not pretty! */ |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 33 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
Eric Dumazet | b6b3fde | 2008-12-10 09:35:45 -0800 | [diff] [blame] | 35 | /* SLAB cache for file structures */ |
| 36 | static struct kmem_cache *filp_cachep __read_mostly; |
| 37 | |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 38 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 40 | static inline void file_free_rcu(struct rcu_head *head) |
| 41 | { |
David Howells | d76b0d9 | 2008-11-14 10:39:25 +1100 | [diff] [blame] | 42 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); |
| 43 | |
| 44 | put_cred(f->f_cred); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 45 | kmem_cache_free(filp_cachep, f); |
| 46 | } |
| 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | static inline void file_free(struct file *f) |
| 49 | { |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 50 | percpu_counter_dec(&nr_files); |
Dave Hansen | ad775f5 | 2008-02-15 14:38:01 -0800 | [diff] [blame] | 51 | file_check_state(f); |
Eric Dumazet | 2f51201 | 2005-10-30 15:02:16 -0800 | [diff] [blame] | 52 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | } |
| 54 | |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 55 | /* |
| 56 | * Return the total number of open files in the system |
| 57 | */ |
| 58 | static int get_nr_files(void) |
| 59 | { |
| 60 | return percpu_counter_read_positive(&nr_files); |
| 61 | } |
| 62 | |
| 63 | /* |
| 64 | * Return the maximum number of open files in the system |
| 65 | */ |
| 66 | int get_max_files(void) |
| 67 | { |
| 68 | return files_stat.max_files; |
| 69 | } |
| 70 | EXPORT_SYMBOL_GPL(get_max_files); |
| 71 | |
| 72 | /* |
| 73 | * Handle nr_files sysctl |
| 74 | */ |
| 75 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 76 | int proc_nr_files(ctl_table *table, int write, |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 77 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 78 | { |
| 79 | files_stat.nr_files = get_nr_files(); |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 80 | return proc_dointvec(table, write, buffer, lenp, ppos); |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 81 | } |
| 82 | #else |
Alexey Dobriyan | 8d65af7 | 2009-09-23 15:57:19 -0700 | [diff] [blame] | 83 | int proc_nr_files(ctl_table *table, int write, |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 84 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 85 | { |
| 86 | return -ENOSYS; |
| 87 | } |
| 88 | #endif |
| 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* Find an unused file structure and return a pointer to it. |
| 91 | * Returns NULL, if there are no more free file structures or |
| 92 | * we run out of memory. |
Dave Hansen | 430e285 | 2008-02-15 14:37:26 -0800 | [diff] [blame] | 93 | * |
| 94 | * Be very careful using this. You are responsible for |
| 95 | * getting write access to any mount that you might assign |
| 96 | * to this filp, if it is opened for write. If this is not |
| 97 | * done, you will imbalance int the mount's writer count |
| 98 | * and a warning at __fput() time. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | */ |
| 100 | struct file *get_empty_filp(void) |
| 101 | { |
David Howells | 86a264a | 2008-11-14 10:39:18 +1100 | [diff] [blame] | 102 | const struct cred *cred = current_cred(); |
Kirill Korotaev | af4d2ec | 2005-06-23 00:09:50 -0700 | [diff] [blame] | 103 | static int old_max; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | struct file * f; |
| 105 | |
| 106 | /* |
| 107 | * Privileged users can go above max_files |
| 108 | */ |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 109 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { |
| 110 | /* |
| 111 | * percpu_counters are inaccurate. Do an expensive check before |
| 112 | * we go and fail. |
| 113 | */ |
Peter Zijlstra | 52d9f3b | 2007-10-16 23:25:44 -0700 | [diff] [blame] | 114 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 115 | goto over; |
| 116 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
Denis Cheng | 4975e45 | 2007-10-16 23:26:19 -0700 | [diff] [blame] | 118 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); |
Kirill Korotaev | af4d2ec | 2005-06-23 00:09:50 -0700 | [diff] [blame] | 119 | if (f == NULL) |
| 120 | goto fail; |
| 121 | |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 122 | percpu_counter_inc(&nr_files); |
Kirill Korotaev | af4d2ec | 2005-06-23 00:09:50 -0700 | [diff] [blame] | 123 | if (security_file_alloc(f)) |
| 124 | goto fail_sec; |
| 125 | |
Eric Dumazet | 2f51201 | 2005-10-30 15:02:16 -0800 | [diff] [blame] | 126 | INIT_LIST_HEAD(&f->f_u.fu_list); |
Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 127 | atomic_long_set(&f->f_count, 1); |
Benjamin LaHaise | 5a6b795 | 2006-03-23 03:01:03 -0800 | [diff] [blame] | 128 | rwlock_init(&f->f_owner.lock); |
David Howells | d76b0d9 | 2008-11-14 10:39:25 +1100 | [diff] [blame] | 129 | f->f_cred = get_cred(cred); |
Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 130 | spin_lock_init(&f->f_lock); |
Benjamin LaHaise | 5a6b795 | 2006-03-23 03:01:03 -0800 | [diff] [blame] | 131 | eventpoll_init_file(f); |
| 132 | /* f->f_version: 0 */ |
Kirill Korotaev | af4d2ec | 2005-06-23 00:09:50 -0700 | [diff] [blame] | 133 | return f; |
| 134 | |
| 135 | over: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | /* Ran out of filps - report that */ |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 137 | if (get_nr_files() > old_max) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | printk(KERN_INFO "VFS: file-max limit %d reached\n", |
Dipankar Sarma | 529bf6b | 2006-03-07 21:55:35 -0800 | [diff] [blame] | 139 | get_max_files()); |
| 140 | old_max = get_nr_files(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
Kirill Korotaev | af4d2ec | 2005-06-23 00:09:50 -0700 | [diff] [blame] | 142 | goto fail; |
| 143 | |
| 144 | fail_sec: |
| 145 | file_free(f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | fail: |
| 147 | return NULL; |
| 148 | } |
| 149 | |
| 150 | EXPORT_SYMBOL(get_empty_filp); |
| 151 | |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 152 | /** |
| 153 | * alloc_file - allocate and initialize a 'struct file' |
| 154 | * @mnt: the vfsmount on which the file will reside |
| 155 | * @dentry: the dentry representing the new file |
| 156 | * @mode: the mode with which the new file will be opened |
| 157 | * @fop: the 'struct file_operations' for the new file |
| 158 | * |
| 159 | * Use this instead of get_empty_filp() to get a new |
| 160 | * 'struct file'. Do so because of the same initialization |
| 161 | * pitfalls reasons listed for init_file(). This is a |
| 162 | * preferred interface to using init_file(). |
| 163 | * |
| 164 | * If all the callers of init_file() are eliminated, its |
| 165 | * code should be moved into this function. |
| 166 | */ |
| 167 | struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry, |
Al Viro | aeb5d72 | 2008-09-02 15:28:45 -0400 | [diff] [blame] | 168 | fmode_t mode, const struct file_operations *fop) |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 169 | { |
| 170 | struct file *file; |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 171 | |
| 172 | file = get_empty_filp(); |
| 173 | if (!file) |
| 174 | return NULL; |
| 175 | |
| 176 | init_file(file, mnt, dentry, mode, fop); |
| 177 | return file; |
| 178 | } |
| 179 | EXPORT_SYMBOL(alloc_file); |
| 180 | |
| 181 | /** |
| 182 | * init_file - initialize a 'struct file' |
| 183 | * @file: the already allocated 'struct file' to initialized |
| 184 | * @mnt: the vfsmount on which the file resides |
| 185 | * @dentry: the dentry representing this file |
| 186 | * @mode: the mode the file is opened with |
| 187 | * @fop: the 'struct file_operations' for this file |
| 188 | * |
| 189 | * Use this instead of setting the members directly. Doing so |
| 190 | * avoids making mistakes like forgetting the mntget() or |
| 191 | * forgetting to take a write on the mnt. |
| 192 | * |
| 193 | * Note: This is a crappy interface. It is here to make |
| 194 | * merging with the existing users of get_empty_filp() |
| 195 | * who have complex failure logic easier. All users |
| 196 | * of this should be moving to alloc_file(). |
| 197 | */ |
| 198 | int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry, |
Al Viro | aeb5d72 | 2008-09-02 15:28:45 -0400 | [diff] [blame] | 199 | fmode_t mode, const struct file_operations *fop) |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 200 | { |
| 201 | int error = 0; |
| 202 | file->f_path.dentry = dentry; |
| 203 | file->f_path.mnt = mntget(mnt); |
| 204 | file->f_mapping = dentry->d_inode->i_mapping; |
| 205 | file->f_mode = mode; |
| 206 | file->f_op = fop; |
Dave Hansen | 4a3fd21 | 2008-02-15 14:37:48 -0800 | [diff] [blame] | 207 | |
| 208 | /* |
| 209 | * These mounts don't really matter in practice |
| 210 | * for r/o bind mounts. They aren't userspace- |
| 211 | * visible. We do this for consistency, and so |
| 212 | * that we can do debugging checks at __fput() |
| 213 | */ |
| 214 | if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) { |
Dave Hansen | ad775f5 | 2008-02-15 14:38:01 -0800 | [diff] [blame] | 215 | file_take_write(file); |
npiggin@suse.de | 96029c4 | 2009-04-26 20:25:55 +1000 | [diff] [blame] | 216 | error = mnt_clone_write(mnt); |
Dave Hansen | 4a3fd21 | 2008-02-15 14:37:48 -0800 | [diff] [blame] | 217 | WARN_ON(error); |
| 218 | } |
Dave Hansen | ce8d2cd | 2007-10-16 23:31:13 -0700 | [diff] [blame] | 219 | return error; |
| 220 | } |
| 221 | EXPORT_SYMBOL(init_file); |
| 222 | |
Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 223 | void fput(struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | { |
Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 225 | if (atomic_long_dec_and_test(&file->f_count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | __fput(file); |
| 227 | } |
| 228 | |
| 229 | EXPORT_SYMBOL(fput); |
| 230 | |
Dave Hansen | aceaf78 | 2008-02-15 14:37:31 -0800 | [diff] [blame] | 231 | /** |
| 232 | * drop_file_write_access - give up ability to write to a file |
| 233 | * @file: the file to which we will stop writing |
| 234 | * |
| 235 | * This is a central place which will give up the ability |
| 236 | * to write to @file, along with access to write through |
| 237 | * its vfsmount. |
| 238 | */ |
| 239 | void drop_file_write_access(struct file *file) |
| 240 | { |
Dave Hansen | 4a3fd21 | 2008-02-15 14:37:48 -0800 | [diff] [blame] | 241 | struct vfsmount *mnt = file->f_path.mnt; |
Dave Hansen | aceaf78 | 2008-02-15 14:37:31 -0800 | [diff] [blame] | 242 | struct dentry *dentry = file->f_path.dentry; |
| 243 | struct inode *inode = dentry->d_inode; |
| 244 | |
| 245 | put_write_access(inode); |
Dave Hansen | ad775f5 | 2008-02-15 14:38:01 -0800 | [diff] [blame] | 246 | |
| 247 | if (special_file(inode->i_mode)) |
| 248 | return; |
| 249 | if (file_check_writeable(file) != 0) |
| 250 | return; |
| 251 | mnt_drop_write(mnt); |
| 252 | file_release_write(file); |
Dave Hansen | aceaf78 | 2008-02-15 14:37:31 -0800 | [diff] [blame] | 253 | } |
| 254 | EXPORT_SYMBOL_GPL(drop_file_write_access); |
| 255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | /* __fput is called from task context when aio completion releases the last |
| 257 | * last use of a struct file *. Do not use otherwise. |
| 258 | */ |
Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 259 | void __fput(struct file *file) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | { |
Josef "Jeff" Sipek | 0f7fc9e | 2006-12-08 02:36:35 -0800 | [diff] [blame] | 261 | struct dentry *dentry = file->f_path.dentry; |
| 262 | struct vfsmount *mnt = file->f_path.mnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | struct inode *inode = dentry->d_inode; |
| 264 | |
| 265 | might_sleep(); |
Robert Love | 0eeca28 | 2005-07-12 17:06:03 -0400 | [diff] [blame] | 266 | |
| 267 | fsnotify_close(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | /* |
| 269 | * The function eventpoll_release() should be the first called |
| 270 | * in the file cleanup chain. |
| 271 | */ |
| 272 | eventpoll_release(file); |
| 273 | locks_remove_flock(file); |
| 274 | |
Al Viro | 233e70f | 2008-10-31 23:28:30 +0000 | [diff] [blame] | 275 | if (unlikely(file->f_flags & FASYNC)) { |
| 276 | if (file->f_op && file->f_op->fasync) |
| 277 | file->f_op->fasync(-1, file, 0); |
| 278 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | if (file->f_op && file->f_op->release) |
| 280 | file->f_op->release(inode, file); |
| 281 | security_file_free(file); |
Theodore Ts'o | 577c4eb | 2006-09-27 01:50:49 -0700 | [diff] [blame] | 282 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | cdev_put(inode->i_cdev); |
| 284 | fops_put(file->f_op); |
Eric W. Biederman | 609d7fa | 2006-10-02 02:17:15 -0700 | [diff] [blame] | 285 | put_pid(file->f_owner.pid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | file_kill(file); |
Dave Hansen | aceaf78 | 2008-02-15 14:37:31 -0800 | [diff] [blame] | 287 | if (file->f_mode & FMODE_WRITE) |
| 288 | drop_file_write_access(file); |
Josef "Jeff" Sipek | 0f7fc9e | 2006-12-08 02:36:35 -0800 | [diff] [blame] | 289 | file->f_path.dentry = NULL; |
| 290 | file->f_path.mnt = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | file_free(file); |
| 292 | dput(dentry); |
| 293 | mntput(mnt); |
| 294 | } |
| 295 | |
Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 296 | struct file *fget(unsigned int fd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | { |
| 298 | struct file *file; |
| 299 | struct files_struct *files = current->files; |
| 300 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 301 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | file = fcheck_files(files, fd); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 303 | if (file) { |
Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 304 | if (!atomic_long_inc_not_zero(&file->f_count)) { |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 305 | /* File object ref couldn't be taken */ |
| 306 | rcu_read_unlock(); |
| 307 | return NULL; |
| 308 | } |
| 309 | } |
| 310 | rcu_read_unlock(); |
| 311 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | return file; |
| 313 | } |
| 314 | |
| 315 | EXPORT_SYMBOL(fget); |
| 316 | |
| 317 | /* |
| 318 | * Lightweight file lookup - no refcnt increment if fd table isn't shared. |
| 319 | * You can use this only if it is guranteed that the current task already |
| 320 | * holds a refcnt to that file. That check has to be done at fget() only |
| 321 | * and a flag is returned to be passed to the corresponding fput_light(). |
| 322 | * There must not be a cloning between an fget_light/fput_light pair. |
| 323 | */ |
Harvey Harrison | fc9b52c | 2008-02-08 04:19:52 -0800 | [diff] [blame] | 324 | struct file *fget_light(unsigned int fd, int *fput_needed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | { |
| 326 | struct file *file; |
| 327 | struct files_struct *files = current->files; |
| 328 | |
| 329 | *fput_needed = 0; |
| 330 | if (likely((atomic_read(&files->count) == 1))) { |
| 331 | file = fcheck_files(files, fd); |
| 332 | } else { |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 333 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | file = fcheck_files(files, fd); |
| 335 | if (file) { |
Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 336 | if (atomic_long_inc_not_zero(&file->f_count)) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 337 | *fput_needed = 1; |
| 338 | else |
| 339 | /* Didn't get the reference, someone's freed */ |
| 340 | file = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 342 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | } |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 344 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | return file; |
| 346 | } |
| 347 | |
| 348 | |
| 349 | void put_filp(struct file *file) |
| 350 | { |
Al Viro | 516e0cc | 2008-07-26 00:39:17 -0400 | [diff] [blame] | 351 | if (atomic_long_dec_and_test(&file->f_count)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | security_file_free(file); |
| 353 | file_kill(file); |
| 354 | file_free(file); |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | void file_move(struct file *file, struct list_head *list) |
| 359 | { |
| 360 | if (!list) |
| 361 | return; |
| 362 | file_list_lock(); |
Eric Dumazet | 2f51201 | 2005-10-30 15:02:16 -0800 | [diff] [blame] | 363 | list_move(&file->f_u.fu_list, list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | file_list_unlock(); |
| 365 | } |
| 366 | |
| 367 | void file_kill(struct file *file) |
| 368 | { |
Eric Dumazet | 2f51201 | 2005-10-30 15:02:16 -0800 | [diff] [blame] | 369 | if (!list_empty(&file->f_u.fu_list)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | file_list_lock(); |
Eric Dumazet | 2f51201 | 2005-10-30 15:02:16 -0800 | [diff] [blame] | 371 | list_del_init(&file->f_u.fu_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | file_list_unlock(); |
| 373 | } |
| 374 | } |
| 375 | |
| 376 | int fs_may_remount_ro(struct super_block *sb) |
| 377 | { |
Matthias Kaehlcke | cfdaf9e | 2007-10-18 23:39:56 -0700 | [diff] [blame] | 378 | struct file *file; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | |
| 380 | /* Check that no files are currently opened for writing. */ |
| 381 | file_list_lock(); |
Matthias Kaehlcke | cfdaf9e | 2007-10-18 23:39:56 -0700 | [diff] [blame] | 382 | list_for_each_entry(file, &sb->s_files, f_u.fu_list) { |
Josef "Jeff" Sipek | 0f7fc9e | 2006-12-08 02:36:35 -0800 | [diff] [blame] | 383 | struct inode *inode = file->f_path.dentry->d_inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
| 385 | /* File with pending delete? */ |
| 386 | if (inode->i_nlink == 0) |
| 387 | goto too_bad; |
| 388 | |
| 389 | /* Writeable file? */ |
| 390 | if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) |
| 391 | goto too_bad; |
| 392 | } |
| 393 | file_list_unlock(); |
| 394 | return 1; /* Tis' cool bro. */ |
| 395 | too_bad: |
| 396 | file_list_unlock(); |
| 397 | return 0; |
| 398 | } |
| 399 | |
npiggin@suse.de | 864d7c4 | 2009-04-26 20:25:56 +1000 | [diff] [blame] | 400 | /** |
| 401 | * mark_files_ro - mark all files read-only |
| 402 | * @sb: superblock in question |
| 403 | * |
| 404 | * All files are marked read-only. We don't care about pending |
| 405 | * delete files so this should be used in 'force' mode only. |
| 406 | */ |
| 407 | void mark_files_ro(struct super_block *sb) |
| 408 | { |
| 409 | struct file *f; |
| 410 | |
| 411 | retry: |
| 412 | file_list_lock(); |
| 413 | list_for_each_entry(f, &sb->s_files, f_u.fu_list) { |
| 414 | struct vfsmount *mnt; |
| 415 | if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) |
| 416 | continue; |
| 417 | if (!file_count(f)) |
| 418 | continue; |
| 419 | if (!(f->f_mode & FMODE_WRITE)) |
| 420 | continue; |
| 421 | f->f_mode &= ~FMODE_WRITE; |
| 422 | if (file_check_writeable(f) != 0) |
| 423 | continue; |
| 424 | file_release_write(f); |
| 425 | mnt = mntget(f->f_path.mnt); |
| 426 | file_list_unlock(); |
| 427 | /* |
| 428 | * This can sleep, so we can't hold |
| 429 | * the file_list_lock() spinlock. |
| 430 | */ |
| 431 | mnt_drop_write(mnt); |
| 432 | mntput(mnt); |
| 433 | goto retry; |
| 434 | } |
| 435 | file_list_unlock(); |
| 436 | } |
| 437 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | void __init files_init(unsigned long mempages) |
| 439 | { |
| 440 | int n; |
Eric Dumazet | b6b3fde | 2008-12-10 09:35:45 -0800 | [diff] [blame] | 441 | |
| 442 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, |
| 443 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
| 444 | |
| 445 | /* |
| 446 | * One file with associated inode and dcache is very roughly 1K. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | * Per default don't use more than 10% of our memory for files. |
| 448 | */ |
| 449 | |
| 450 | n = (mempages * (PAGE_SIZE / 1024)) / 10; |
| 451 | files_stat.max_files = n; |
| 452 | if (files_stat.max_files < NR_FILE) |
| 453 | files_stat.max_files = NR_FILE; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 454 | files_defer_init(); |
Mingming Cao | 0216bfc | 2006-06-23 02:05:41 -0700 | [diff] [blame] | 455 | percpu_counter_init(&nr_files, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | } |