Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/fs/file.c |
| 3 | * |
| 4 | * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes |
| 5 | * |
| 6 | * Manage the dynamic fd arrays in the process files_struct. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/time.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/file.h> |
| 15 | #include <linux/bitops.h> |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/rcupdate.h> |
| 19 | #include <linux/workqueue.h> |
| 20 | |
| 21 | struct fdtable_defer { |
| 22 | spinlock_t lock; |
| 23 | struct work_struct wq; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 24 | struct fdtable *next; |
| 25 | }; |
| 26 | |
| 27 | /* |
| 28 | * We use this list to defer free fdtables that have vmalloced |
| 29 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed |
| 30 | * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in |
| 31 | * this per-task structure. |
| 32 | */ |
| 33 | static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 35 | static inline void * alloc_fdmem(unsigned int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | if (size <= PAGE_SIZE) |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 38 | return kmalloc(size, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | else |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 40 | return vmalloc(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | } |
| 42 | |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 43 | static inline void free_fdarr(struct fdtable *fdt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 45 | if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) |
| 46 | kfree(fdt->fd); |
| 47 | else |
| 48 | vfree(fdt->fd); |
| 49 | } |
| 50 | |
| 51 | static inline void free_fdset(struct fdtable *fdt) |
| 52 | { |
| 53 | if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2)) |
| 54 | kfree(fdt->open_fds); |
| 55 | else |
| 56 | vfree(fdt->open_fds); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 57 | } |
| 58 | |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 59 | static void free_fdtable_work(struct work_struct *work) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 60 | { |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 61 | struct fdtable_defer *f = |
| 62 | container_of(work, struct fdtable_defer, wq); |
Dipankar Sarma | badf166 | 2005-09-09 13:04:10 -0700 | [diff] [blame] | 63 | struct fdtable *fdt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 65 | spin_lock_bh(&f->lock); |
| 66 | fdt = f->next; |
| 67 | f->next = NULL; |
| 68 | spin_unlock_bh(&f->lock); |
| 69 | while(fdt) { |
| 70 | struct fdtable *next = fdt->next; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 71 | vfree(fdt->fd); |
| 72 | free_fdset(fdt); |
| 73 | kfree(fdt); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 74 | fdt = next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Vadim Lobanov | 4fd4581 | 2006-12-10 02:21:17 -0800 | [diff] [blame] | 78 | void free_fdtable_rcu(struct rcu_head *rcu) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 79 | { |
| 80 | struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 81 | struct fdtable_defer *fddef; |
| 82 | |
| 83 | BUG_ON(!fdt); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 84 | |
Vadim Lobanov | 4fd4581 | 2006-12-10 02:21:17 -0800 | [diff] [blame] | 85 | if (fdt->max_fds <= NR_OPEN_DEFAULT) { |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 86 | /* |
Vadim Lobanov | 4fd4581 | 2006-12-10 02:21:17 -0800 | [diff] [blame] | 87 | * This fdtable is embedded in the files structure and that |
| 88 | * structure itself is getting destroyed. |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 89 | */ |
Vadim Lobanov | 4fd4581 | 2006-12-10 02:21:17 -0800 | [diff] [blame] | 90 | kmem_cache_free(files_cachep, |
| 91 | container_of(fdt, struct files_struct, fdtab)); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 92 | return; |
| 93 | } |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 94 | if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) { |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 95 | kfree(fdt->fd); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 96 | kfree(fdt->open_fds); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 97 | kfree(fdt); |
| 98 | } else { |
| 99 | fddef = &get_cpu_var(fdtable_defer_list); |
| 100 | spin_lock(&fddef->lock); |
| 101 | fdt->next = fddef->next; |
| 102 | fddef->next = fdt; |
Tejun Heo | 593be07 | 2006-12-06 20:36:01 -0800 | [diff] [blame] | 103 | /* vmallocs are handled from the workqueue context */ |
| 104 | schedule_work(&fddef->wq); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 105 | spin_unlock(&fddef->lock); |
| 106 | put_cpu_var(fdtable_defer_list); |
| 107 | } |
| 108 | } |
| 109 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 110 | /* |
| 111 | * Expand the fdset in the files_struct. Called with the files spinlock |
| 112 | * held for write. |
| 113 | */ |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 114 | static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 115 | { |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 116 | unsigned int cpy, set; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 117 | |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 118 | BUG_ON(nfdt->max_fds < ofdt->max_fds); |
| 119 | if (ofdt->max_fds == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | return; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 121 | |
| 122 | cpy = ofdt->max_fds * sizeof(struct file *); |
| 123 | set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); |
| 124 | memcpy(nfdt->fd, ofdt->fd, cpy); |
| 125 | memset((char *)(nfdt->fd) + cpy, 0, set); |
| 126 | |
| 127 | cpy = ofdt->max_fds / BITS_PER_BYTE; |
| 128 | set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; |
| 129 | memcpy(nfdt->open_fds, ofdt->open_fds, cpy); |
| 130 | memset((char *)(nfdt->open_fds) + cpy, 0, set); |
| 131 | memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); |
| 132 | memset((char *)(nfdt->close_on_exec) + cpy, 0, set); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 135 | static struct fdtable * alloc_fdtable(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | { |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 137 | struct fdtable *fdt; |
| 138 | char *data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 140 | /* |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 141 | * Figure out how many fds we actually want to support in this fdtable. |
| 142 | * Allocation steps are keyed to the size of the fdarray, since it |
| 143 | * grows far faster than any of the other dynamic data. We try to fit |
| 144 | * the fdarray into comfortable page-tuned chunks: starting at 1024B |
| 145 | * and growing in powers of two from there on. |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 146 | */ |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 147 | nr /= (1024 / sizeof(struct file *)); |
| 148 | nr = roundup_pow_of_two(nr + 1); |
| 149 | nr *= (1024 / sizeof(struct file *)); |
| 150 | if (nr > NR_OPEN) |
| 151 | nr = NR_OPEN; |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 152 | |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 153 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); |
| 154 | if (!fdt) |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 155 | goto out; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 156 | fdt->max_fds = nr; |
| 157 | data = alloc_fdmem(nr * sizeof(struct file *)); |
| 158 | if (!data) |
| 159 | goto out_fdt; |
| 160 | fdt->fd = (struct file **)data; |
| 161 | data = alloc_fdmem(max_t(unsigned int, |
| 162 | 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); |
| 163 | if (!data) |
| 164 | goto out_arr; |
| 165 | fdt->open_fds = (fd_set *)data; |
| 166 | data += nr / BITS_PER_BYTE; |
| 167 | fdt->close_on_exec = (fd_set *)data; |
| 168 | INIT_RCU_HEAD(&fdt->rcu); |
| 169 | fdt->next = NULL; |
| 170 | |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 171 | return fdt; |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 172 | |
| 173 | out_arr: |
| 174 | free_fdarr(fdt); |
| 175 | out_fdt: |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 176 | kfree(fdt); |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 177 | out: |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 178 | return NULL; |
| 179 | } |
| 180 | |
| 181 | /* |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 182 | * Expand the file descriptor table. |
| 183 | * This function will allocate a new fdtable and both fd array and fdset, of |
| 184 | * the given size. |
| 185 | * Return <0 error code on error; 1 on successful completion. |
| 186 | * The files->file_lock should be held on entry, and will be held on exit. |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 187 | */ |
| 188 | static int expand_fdtable(struct files_struct *files, int nr) |
| 189 | __releases(files->file_lock) |
| 190 | __acquires(files->file_lock) |
| 191 | { |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 192 | struct fdtable *new_fdt, *cur_fdt; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 193 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | spin_unlock(&files->file_lock); |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 195 | new_fdt = alloc_fdtable(nr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | spin_lock(&files->file_lock); |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 197 | if (!new_fdt) |
| 198 | return -ENOMEM; |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 199 | /* |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 200 | * Check again since another task may have expanded the fd table while |
| 201 | * we dropped the lock |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 202 | */ |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 203 | cur_fdt = files_fdtable(files); |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 204 | if (nr >= cur_fdt->max_fds) { |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 205 | /* Continue as planned */ |
| 206 | copy_fdtable(new_fdt, cur_fdt); |
| 207 | rcu_assign_pointer(files->fdt, new_fdt); |
Vadim Lobanov | 4fd4581 | 2006-12-10 02:21:17 -0800 | [diff] [blame] | 208 | if (cur_fdt->max_fds > NR_OPEN_DEFAULT) |
Vadim Lobanov | 01b2d93 | 2006-12-22 01:10:43 -0800 | [diff] [blame] | 209 | free_fdtable(cur_fdt); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 210 | } else { |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 211 | /* Somebody else expanded, so undo our attempt */ |
Vadim Lobanov | 5466b45 | 2006-12-10 02:21:22 -0800 | [diff] [blame] | 212 | free_fdarr(new_fdt); |
| 213 | free_fdset(new_fdt); |
| 214 | kfree(new_fdt); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 215 | } |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 216 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | /* |
| 220 | * Expand files. |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 221 | * This function will expand the file structures, if the requested size exceeds |
| 222 | * the current capacity and there is room for expansion. |
| 223 | * Return <0 error code on error; 0 when nothing done; 1 when files were |
| 224 | * expanded and execution may have blocked. |
| 225 | * The files->file_lock should be held on entry, and will be held on exit. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | */ |
| 227 | int expand_files(struct files_struct *files, int nr) |
| 228 | { |
Dipankar Sarma | badf166 | 2005-09-09 13:04:10 -0700 | [diff] [blame] | 229 | struct fdtable *fdt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Dipankar Sarma | badf166 | 2005-09-09 13:04:10 -0700 | [diff] [blame] | 231 | fdt = files_fdtable(files); |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 232 | /* Do we need to expand? */ |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 233 | if (nr < fdt->max_fds) |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 234 | return 0; |
| 235 | /* Can we expand? */ |
Vadim Lobanov | bbea9f6 | 2006-12-10 02:21:12 -0800 | [diff] [blame] | 236 | if (nr >= NR_OPEN) |
Vadim Lobanov | 74d392a | 2006-09-29 02:01:43 -0700 | [diff] [blame] | 237 | return -EMFILE; |
| 238 | |
| 239 | /* All good, so we try */ |
| 240 | return expand_fdtable(files, nr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | } |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 242 | |
| 243 | static void __devinit fdtable_defer_list_init(int cpu) |
| 244 | { |
| 245 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); |
| 246 | spin_lock_init(&fddef->lock); |
David Howells | 65f27f3 | 2006-11-22 14:55:48 +0000 | [diff] [blame] | 247 | INIT_WORK(&fddef->wq, free_fdtable_work); |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 248 | fddef->next = NULL; |
| 249 | } |
| 250 | |
| 251 | void __init files_defer_init(void) |
| 252 | { |
| 253 | int i; |
KAMEZAWA Hiroyuki | 0a94502 | 2006-03-28 01:56:37 -0800 | [diff] [blame] | 254 | for_each_possible_cpu(i) |
Dipankar Sarma | ab2af1f | 2005-09-09 13:04:13 -0700 | [diff] [blame] | 255 | fdtable_defer_list_init(i); |
| 256 | } |