Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | FUSE: Filesystem in Userspace |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 3 | Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 4 | |
| 5 | This program can be distributed under the terms of the GNU GPL. |
| 6 | See the file COPYING. |
| 7 | */ |
| 8 | |
| 9 | #include "fuse_i.h" |
| 10 | |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/poll.h> |
| 14 | #include <linux/uio.h> |
| 15 | #include <linux/miscdevice.h> |
| 16 | #include <linux/pagemap.h> |
| 17 | #include <linux/file.h> |
| 18 | #include <linux/slab.h> |
| 19 | |
| 20 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); |
| 21 | |
| 22 | static kmem_cache_t *fuse_req_cachep; |
| 23 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 25 | { |
Miklos Szeredi | 0720b31 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 26 | /* |
| 27 | * Lockless access is OK, because file->private data is set |
| 28 | * once during mount and is valid until the file is released. |
| 29 | */ |
| 30 | return file->private_data; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 31 | } |
| 32 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 33 | static void fuse_request_init(struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 34 | { |
| 35 | memset(req, 0, sizeof(*req)); |
| 36 | INIT_LIST_HEAD(&req->list); |
| 37 | init_waitqueue_head(&req->waitq); |
| 38 | atomic_set(&req->count, 1); |
| 39 | } |
| 40 | |
| 41 | struct fuse_req *fuse_request_alloc(void) |
| 42 | { |
| 43 | struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL); |
| 44 | if (req) |
| 45 | fuse_request_init(req); |
| 46 | return req; |
| 47 | } |
| 48 | |
| 49 | void fuse_request_free(struct fuse_req *req) |
| 50 | { |
| 51 | kmem_cache_free(fuse_req_cachep, req); |
| 52 | } |
| 53 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 54 | static void block_sigs(sigset_t *oldset) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 55 | { |
| 56 | sigset_t mask; |
| 57 | |
| 58 | siginitsetinv(&mask, sigmask(SIGKILL)); |
| 59 | sigprocmask(SIG_BLOCK, &mask, oldset); |
| 60 | } |
| 61 | |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 62 | static void restore_sigs(sigset_t *oldset) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 63 | { |
| 64 | sigprocmask(SIG_SETMASK, oldset, NULL); |
| 65 | } |
| 66 | |
Miklos Szeredi | 77e7f25 | 2006-02-17 13:52:52 -0800 | [diff] [blame] | 67 | /* |
| 68 | * Reset request, so that it can be reused |
| 69 | * |
| 70 | * The caller must be _very_ careful to make sure, that it is holding |
| 71 | * the only reference to req |
| 72 | */ |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 73 | void fuse_reset_request(struct fuse_req *req) |
| 74 | { |
| 75 | int preallocated = req->preallocated; |
| 76 | BUG_ON(atomic_read(&req->count) != 1); |
| 77 | fuse_request_init(req); |
| 78 | req->preallocated = preallocated; |
| 79 | } |
| 80 | |
| 81 | static void __fuse_get_request(struct fuse_req *req) |
| 82 | { |
| 83 | atomic_inc(&req->count); |
| 84 | } |
| 85 | |
| 86 | /* Must be called with > 1 refcount */ |
| 87 | static void __fuse_put_request(struct fuse_req *req) |
| 88 | { |
| 89 | BUG_ON(atomic_read(&req->count) < 2); |
| 90 | atomic_dec(&req->count); |
| 91 | } |
| 92 | |
| 93 | static struct fuse_req *do_get_request(struct fuse_conn *fc) |
| 94 | { |
| 95 | struct fuse_req *req; |
| 96 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 97 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 98 | BUG_ON(list_empty(&fc->unused_list)); |
| 99 | req = list_entry(fc->unused_list.next, struct fuse_req, list); |
| 100 | list_del_init(&req->list); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 101 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 102 | fuse_request_init(req); |
| 103 | req->preallocated = 1; |
| 104 | req->in.h.uid = current->fsuid; |
| 105 | req->in.h.gid = current->fsgid; |
| 106 | req->in.h.pid = current->pid; |
| 107 | return req; |
| 108 | } |
| 109 | |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 110 | /* This can return NULL, but only in case it's interrupted by a SIGKILL */ |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 111 | struct fuse_req *fuse_get_request(struct fuse_conn *fc) |
| 112 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 113 | int intr; |
| 114 | sigset_t oldset; |
| 115 | |
Miklos Szeredi | 0cd5b88 | 2006-01-16 22:14:38 -0800 | [diff] [blame] | 116 | atomic_inc(&fc->num_waiting); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 117 | block_sigs(&oldset); |
| 118 | intr = down_interruptible(&fc->outstanding_sem); |
| 119 | restore_sigs(&oldset); |
Miklos Szeredi | 0cd5b88 | 2006-01-16 22:14:38 -0800 | [diff] [blame] | 120 | if (intr) { |
| 121 | atomic_dec(&fc->num_waiting); |
| 122 | return NULL; |
| 123 | } |
| 124 | return do_get_request(fc); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 127 | /* Must be called with fc->lock held */ |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 128 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) |
| 129 | { |
Miklos Szeredi | 0cd5b88 | 2006-01-16 22:14:38 -0800 | [diff] [blame] | 130 | if (req->preallocated) { |
| 131 | atomic_dec(&fc->num_waiting); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 132 | list_add(&req->list, &fc->unused_list); |
Miklos Szeredi | 0cd5b88 | 2006-01-16 22:14:38 -0800 | [diff] [blame] | 133 | } else |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 134 | fuse_request_free(req); |
| 135 | |
| 136 | /* If we are in debt decrease that first */ |
| 137 | if (fc->outstanding_debt) |
| 138 | fc->outstanding_debt--; |
| 139 | else |
| 140 | up(&fc->outstanding_sem); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 141 | } |
| 142 | |
| 143 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
| 144 | { |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 145 | if (atomic_dec_and_test(&req->count)) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 146 | spin_lock(&fc->lock); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 147 | fuse_putback_request(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 148 | spin_unlock(&fc->lock); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 149 | } |
| 150 | } |
| 151 | |
| 152 | static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req) |
| 153 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 154 | if (atomic_dec_and_test(&req->count)) |
| 155 | fuse_putback_request(fc, req); |
| 156 | } |
| 157 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 158 | void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 159 | { |
| 160 | iput(req->inode); |
| 161 | iput(req->inode2); |
| 162 | if (req->file) |
| 163 | fput(req->file); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 164 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 165 | list_del(&req->bg_entry); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 166 | spin_unlock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 167 | } |
| 168 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 169 | /* |
| 170 | * This function is called when a request is finished. Either a reply |
| 171 | * has arrived or it was interrupted (and not yet sent) or some error |
Miklos Szeredi | f43b155 | 2006-01-16 22:14:26 -0800 | [diff] [blame] | 172 | * occurred during communication with userspace, or the device file |
| 173 | * was closed. In case of a background request the reference to the |
| 174 | * stored objects are released. The requester thread is woken up (if |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 175 | * still waiting), the 'end' callback is called if given, else the |
| 176 | * reference to the request is released |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 177 | * |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 178 | * Releasing extra reference for foreground requests must be done |
| 179 | * within the same locked region as setting state to finished. This |
| 180 | * is because fuse_reset_request() may be called after request is |
| 181 | * finished and it must be the sole possessor. If request is |
| 182 | * interrupted and put in the background, it will return with an error |
| 183 | * and hence never be reset and reused. |
| 184 | * |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 185 | * Called with fc->lock, unlocks it |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 186 | */ |
| 187 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
| 188 | { |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 189 | list_del(&req->list); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 190 | req->state = FUSE_REQ_FINISHED; |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 191 | if (!req->background) { |
| 192 | wake_up(&req->waitq); |
| 193 | fuse_put_request_locked(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 194 | spin_unlock(&fc->lock); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 195 | } else { |
| 196 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 197 | req->end = NULL; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 198 | spin_unlock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 199 | down_read(&fc->sbput_sem); |
| 200 | if (fc->mounted) |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 201 | fuse_release_background(fc, req); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 202 | up_read(&fc->sbput_sem); |
Miklos Szeredi | 7128ec2 | 2006-02-04 23:27:40 -0800 | [diff] [blame] | 203 | if (end) |
| 204 | end(fc, req); |
| 205 | else |
| 206 | fuse_put_request(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 207 | } |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 208 | } |
| 209 | |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 210 | /* |
| 211 | * Unfortunately request interruption not just solves the deadlock |
| 212 | * problem, it causes problems too. These stem from the fact, that an |
| 213 | * interrupted request is continued to be processed in userspace, |
| 214 | * while all the locks and object references (inode and file) held |
| 215 | * during the operation are released. |
| 216 | * |
| 217 | * To release the locks is exactly why there's a need to interrupt the |
| 218 | * request, so there's not a lot that can be done about this, except |
| 219 | * introduce additional locking in userspace. |
| 220 | * |
| 221 | * More important is to keep inode and file references until userspace |
| 222 | * has replied, otherwise FORGET and RELEASE could be sent while the |
| 223 | * inode/file is still used by the filesystem. |
| 224 | * |
| 225 | * For this reason the concept of "background" request is introduced. |
| 226 | * An interrupted request is backgrounded if it has been already sent |
| 227 | * to userspace. Backgrounding involves getting an extra reference to |
| 228 | * inode(s) or file used in the request, and adding the request to |
| 229 | * fc->background list. When a reply is received for a background |
| 230 | * request, the object references are released, and the request is |
| 231 | * removed from the list. If the filesystem is unmounted while there |
| 232 | * are still background requests, the list is walked and references |
| 233 | * are released as if a reply was received. |
| 234 | * |
| 235 | * There's one more use for a background request. The RELEASE message is |
| 236 | * always sent as background, since it doesn't return an error or |
| 237 | * data. |
| 238 | */ |
| 239 | static void background_request(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 240 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 241 | req->background = 1; |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 242 | list_add(&req->bg_entry, &fc->background); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 243 | if (req->inode) |
| 244 | req->inode = igrab(req->inode); |
| 245 | if (req->inode2) |
| 246 | req->inode2 = igrab(req->inode2); |
| 247 | if (req->file) |
| 248 | get_file(req->file); |
| 249 | } |
| 250 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 251 | /* Called with fc->lock held. Releases, and then reacquires it. */ |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 252 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 253 | { |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 254 | sigset_t oldset; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 255 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 256 | spin_unlock(&fc->lock); |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 257 | block_sigs(&oldset); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 258 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 259 | restore_sigs(&oldset); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 260 | spin_lock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 261 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 262 | return; |
| 263 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 264 | if (!req->interrupted) { |
| 265 | req->out.h.error = -EINTR; |
| 266 | req->interrupted = 1; |
| 267 | } |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 268 | if (req->locked) { |
| 269 | /* This is uninterruptible sleep, because data is |
| 270 | being copied to/from the buffers of req. During |
| 271 | locked state, there mustn't be any filesystem |
| 272 | operation (e.g. page fault), since that could lead |
| 273 | to deadlock */ |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 274 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 275 | wait_event(req->waitq, !req->locked); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 276 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 277 | } |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 278 | if (req->state == FUSE_REQ_PENDING) { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 279 | list_del(&req->list); |
| 280 | __fuse_put_request(req); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 281 | } else if (req->state == FUSE_REQ_SENT) |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 282 | background_request(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 283 | } |
| 284 | |
| 285 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
| 286 | { |
| 287 | unsigned nbytes = 0; |
| 288 | unsigned i; |
| 289 | |
| 290 | for (i = 0; i < numargs; i++) |
| 291 | nbytes += args[i].size; |
| 292 | |
| 293 | return nbytes; |
| 294 | } |
| 295 | |
| 296 | static void queue_request(struct fuse_conn *fc, struct fuse_req *req) |
| 297 | { |
| 298 | fc->reqctr++; |
| 299 | /* zero is special */ |
| 300 | if (fc->reqctr == 0) |
| 301 | fc->reqctr = 1; |
| 302 | req->in.h.unique = fc->reqctr; |
| 303 | req->in.h.len = sizeof(struct fuse_in_header) + |
| 304 | len_args(req->in.numargs, (struct fuse_arg *) req->in.args); |
| 305 | if (!req->preallocated) { |
| 306 | /* If request is not preallocated (either FORGET or |
| 307 | RELEASE), then still decrease outstanding_sem, so |
| 308 | user can't open infinite number of files while not |
| 309 | processing the RELEASE requests. However for |
| 310 | efficiency do it without blocking, so if down() |
| 311 | would block, just increase the debt instead */ |
| 312 | if (down_trylock(&fc->outstanding_sem)) |
| 313 | fc->outstanding_debt++; |
| 314 | } |
| 315 | list_add_tail(&req->list, &fc->pending); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 316 | req->state = FUSE_REQ_PENDING; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 317 | wake_up(&fc->waitq); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 318 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 319 | } |
| 320 | |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 321 | /* |
| 322 | * This can only be interrupted by a SIGKILL |
| 323 | */ |
| 324 | void request_send(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 325 | { |
| 326 | req->isreply = 1; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 327 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 328 | if (!fc->connected) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 329 | req->out.h.error = -ENOTCONN; |
| 330 | else if (fc->conn_error) |
| 331 | req->out.h.error = -ECONNREFUSED; |
| 332 | else { |
| 333 | queue_request(fc, req); |
| 334 | /* acquire extra reference, since request is still needed |
| 335 | after request_end() */ |
| 336 | __fuse_get_request(req); |
| 337 | |
Miklos Szeredi | 7c352bd | 2005-09-09 13:10:39 -0700 | [diff] [blame] | 338 | request_wait_answer(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 339 | } |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 340 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 341 | } |
| 342 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 343 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
| 344 | { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 345 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 346 | if (fc->connected) { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 347 | queue_request(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 348 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 349 | } else { |
| 350 | req->out.h.error = -ENOTCONN; |
| 351 | request_end(fc, req); |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) |
| 356 | { |
| 357 | req->isreply = 0; |
| 358 | request_send_nowait(fc, req); |
| 359 | } |
| 360 | |
| 361 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
| 362 | { |
| 363 | req->isreply = 1; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 364 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 365 | background_request(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 366 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 367 | request_send_nowait(fc, req); |
| 368 | } |
| 369 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 370 | /* |
| 371 | * Lock the request. Up to the next unlock_request() there mustn't be |
| 372 | * anything that could cause a page-fault. If the request was already |
| 373 | * interrupted bail out. |
| 374 | */ |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 375 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 376 | { |
| 377 | int err = 0; |
| 378 | if (req) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 379 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 380 | if (req->interrupted) |
| 381 | err = -ENOENT; |
| 382 | else |
| 383 | req->locked = 1; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 384 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 385 | } |
| 386 | return err; |
| 387 | } |
| 388 | |
| 389 | /* |
| 390 | * Unlock request. If it was interrupted during being locked, the |
| 391 | * requester thread is currently waiting for it to be unlocked, so |
| 392 | * wake it up. |
| 393 | */ |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 394 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 395 | { |
| 396 | if (req) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 397 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 398 | req->locked = 0; |
| 399 | if (req->interrupted) |
| 400 | wake_up(&req->waitq); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 401 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 402 | } |
| 403 | } |
| 404 | |
| 405 | struct fuse_copy_state { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 406 | struct fuse_conn *fc; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 407 | int write; |
| 408 | struct fuse_req *req; |
| 409 | const struct iovec *iov; |
| 410 | unsigned long nr_segs; |
| 411 | unsigned long seglen; |
| 412 | unsigned long addr; |
| 413 | struct page *pg; |
| 414 | void *mapaddr; |
| 415 | void *buf; |
| 416 | unsigned len; |
| 417 | }; |
| 418 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 419 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, |
| 420 | int write, struct fuse_req *req, |
| 421 | const struct iovec *iov, unsigned long nr_segs) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 422 | { |
| 423 | memset(cs, 0, sizeof(*cs)); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 424 | cs->fc = fc; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 425 | cs->write = write; |
| 426 | cs->req = req; |
| 427 | cs->iov = iov; |
| 428 | cs->nr_segs = nr_segs; |
| 429 | } |
| 430 | |
| 431 | /* Unmap and put previous page of userspace buffer */ |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 432 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 433 | { |
| 434 | if (cs->mapaddr) { |
| 435 | kunmap_atomic(cs->mapaddr, KM_USER0); |
| 436 | if (cs->write) { |
| 437 | flush_dcache_page(cs->pg); |
| 438 | set_page_dirty_lock(cs->pg); |
| 439 | } |
| 440 | put_page(cs->pg); |
| 441 | cs->mapaddr = NULL; |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | /* |
| 446 | * Get another pagefull of userspace buffer, and map it to kernel |
| 447 | * address space, and lock request |
| 448 | */ |
| 449 | static int fuse_copy_fill(struct fuse_copy_state *cs) |
| 450 | { |
| 451 | unsigned long offset; |
| 452 | int err; |
| 453 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 454 | unlock_request(cs->fc, cs->req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 455 | fuse_copy_finish(cs); |
| 456 | if (!cs->seglen) { |
| 457 | BUG_ON(!cs->nr_segs); |
| 458 | cs->seglen = cs->iov[0].iov_len; |
| 459 | cs->addr = (unsigned long) cs->iov[0].iov_base; |
| 460 | cs->iov ++; |
| 461 | cs->nr_segs --; |
| 462 | } |
| 463 | down_read(¤t->mm->mmap_sem); |
| 464 | err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0, |
| 465 | &cs->pg, NULL); |
| 466 | up_read(¤t->mm->mmap_sem); |
| 467 | if (err < 0) |
| 468 | return err; |
| 469 | BUG_ON(err != 1); |
| 470 | offset = cs->addr % PAGE_SIZE; |
| 471 | cs->mapaddr = kmap_atomic(cs->pg, KM_USER0); |
| 472 | cs->buf = cs->mapaddr + offset; |
| 473 | cs->len = min(PAGE_SIZE - offset, cs->seglen); |
| 474 | cs->seglen -= cs->len; |
| 475 | cs->addr += cs->len; |
| 476 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 477 | return lock_request(cs->fc, cs->req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 478 | } |
| 479 | |
| 480 | /* Do as much copy to/from userspace buffer as we can */ |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 481 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 482 | { |
| 483 | unsigned ncpy = min(*size, cs->len); |
| 484 | if (val) { |
| 485 | if (cs->write) |
| 486 | memcpy(cs->buf, *val, ncpy); |
| 487 | else |
| 488 | memcpy(*val, cs->buf, ncpy); |
| 489 | *val += ncpy; |
| 490 | } |
| 491 | *size -= ncpy; |
| 492 | cs->len -= ncpy; |
| 493 | cs->buf += ncpy; |
| 494 | return ncpy; |
| 495 | } |
| 496 | |
| 497 | /* |
| 498 | * Copy a page in the request to/from the userspace buffer. Must be |
| 499 | * done atomically |
| 500 | */ |
Miklos Szeredi | 8bfc016 | 2006-01-16 22:14:28 -0800 | [diff] [blame] | 501 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
| 502 | unsigned offset, unsigned count, int zeroing) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 503 | { |
| 504 | if (page && zeroing && count < PAGE_SIZE) { |
| 505 | void *mapaddr = kmap_atomic(page, KM_USER1); |
| 506 | memset(mapaddr, 0, PAGE_SIZE); |
| 507 | kunmap_atomic(mapaddr, KM_USER1); |
| 508 | } |
| 509 | while (count) { |
| 510 | int err; |
| 511 | if (!cs->len && (err = fuse_copy_fill(cs))) |
| 512 | return err; |
| 513 | if (page) { |
| 514 | void *mapaddr = kmap_atomic(page, KM_USER1); |
| 515 | void *buf = mapaddr + offset; |
| 516 | offset += fuse_copy_do(cs, &buf, &count); |
| 517 | kunmap_atomic(mapaddr, KM_USER1); |
| 518 | } else |
| 519 | offset += fuse_copy_do(cs, NULL, &count); |
| 520 | } |
| 521 | if (page && !cs->write) |
| 522 | flush_dcache_page(page); |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | /* Copy pages in the request to/from userspace buffer */ |
| 527 | static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, |
| 528 | int zeroing) |
| 529 | { |
| 530 | unsigned i; |
| 531 | struct fuse_req *req = cs->req; |
| 532 | unsigned offset = req->page_offset; |
| 533 | unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset); |
| 534 | |
| 535 | for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { |
| 536 | struct page *page = req->pages[i]; |
| 537 | int err = fuse_copy_page(cs, page, offset, count, zeroing); |
| 538 | if (err) |
| 539 | return err; |
| 540 | |
| 541 | nbytes -= count; |
| 542 | count = min(nbytes, (unsigned) PAGE_SIZE); |
| 543 | offset = 0; |
| 544 | } |
| 545 | return 0; |
| 546 | } |
| 547 | |
| 548 | /* Copy a single argument in the request to/from userspace buffer */ |
| 549 | static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) |
| 550 | { |
| 551 | while (size) { |
| 552 | int err; |
| 553 | if (!cs->len && (err = fuse_copy_fill(cs))) |
| 554 | return err; |
| 555 | fuse_copy_do(cs, &val, &size); |
| 556 | } |
| 557 | return 0; |
| 558 | } |
| 559 | |
| 560 | /* Copy request arguments to/from userspace buffer */ |
| 561 | static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, |
| 562 | unsigned argpages, struct fuse_arg *args, |
| 563 | int zeroing) |
| 564 | { |
| 565 | int err = 0; |
| 566 | unsigned i; |
| 567 | |
| 568 | for (i = 0; !err && i < numargs; i++) { |
| 569 | struct fuse_arg *arg = &args[i]; |
| 570 | if (i == numargs - 1 && argpages) |
| 571 | err = fuse_copy_pages(cs, arg->size, zeroing); |
| 572 | else |
| 573 | err = fuse_copy_one(cs, arg->value, arg->size); |
| 574 | } |
| 575 | return err; |
| 576 | } |
| 577 | |
| 578 | /* Wait until a request is available on the pending list */ |
| 579 | static void request_wait(struct fuse_conn *fc) |
| 580 | { |
| 581 | DECLARE_WAITQUEUE(wait, current); |
| 582 | |
| 583 | add_wait_queue_exclusive(&fc->waitq, &wait); |
Miklos Szeredi | 9ba7cbb | 2006-01-16 22:14:34 -0800 | [diff] [blame] | 584 | while (fc->connected && list_empty(&fc->pending)) { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 585 | set_current_state(TASK_INTERRUPTIBLE); |
| 586 | if (signal_pending(current)) |
| 587 | break; |
| 588 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 589 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 590 | schedule(); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 591 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 592 | } |
| 593 | set_current_state(TASK_RUNNING); |
| 594 | remove_wait_queue(&fc->waitq, &wait); |
| 595 | } |
| 596 | |
| 597 | /* |
| 598 | * Read a single request into the userspace filesystem's buffer. This |
| 599 | * function waits until a request is available, then removes it from |
| 600 | * the pending list and copies request data to userspace buffer. If |
| 601 | * no reply is needed (FORGET) or request has been interrupted or |
| 602 | * there was an error during the copying then it's finished by calling |
| 603 | * request_end(). Otherwise add it to the processing list, and set |
| 604 | * the 'sent' flag. |
| 605 | */ |
| 606 | static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, |
| 607 | unsigned long nr_segs, loff_t *off) |
| 608 | { |
| 609 | int err; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 610 | struct fuse_req *req; |
| 611 | struct fuse_in *in; |
| 612 | struct fuse_copy_state cs; |
| 613 | unsigned reqsize; |
Miklos Szeredi | 0720b31 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 614 | struct fuse_conn *fc = fuse_get_conn(file); |
| 615 | if (!fc) |
| 616 | return -EPERM; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 617 | |
Miklos Szeredi | 1d3d752 | 2006-01-06 00:19:40 -0800 | [diff] [blame] | 618 | restart: |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 619 | spin_lock(&fc->lock); |
Jeff Dike | e5ac1d1 | 2006-04-10 22:54:53 -0700 | [diff] [blame] | 620 | err = -EAGAIN; |
| 621 | if ((file->f_flags & O_NONBLOCK) && fc->connected && |
| 622 | list_empty(&fc->pending)) |
| 623 | goto err_unlock; |
| 624 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 625 | request_wait(fc); |
| 626 | err = -ENODEV; |
Miklos Szeredi | 9ba7cbb | 2006-01-16 22:14:34 -0800 | [diff] [blame] | 627 | if (!fc->connected) |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 628 | goto err_unlock; |
| 629 | err = -ERESTARTSYS; |
| 630 | if (list_empty(&fc->pending)) |
| 631 | goto err_unlock; |
| 632 | |
| 633 | req = list_entry(fc->pending.next, struct fuse_req, list); |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 634 | req->state = FUSE_REQ_READING; |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 635 | list_move(&req->list, &fc->io); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 636 | |
| 637 | in = &req->in; |
Miklos Szeredi | 1d3d752 | 2006-01-06 00:19:40 -0800 | [diff] [blame] | 638 | reqsize = in->h.len; |
| 639 | /* If request is too large, reply with an error and restart the read */ |
| 640 | if (iov_length(iov, nr_segs) < reqsize) { |
| 641 | req->out.h.error = -EIO; |
| 642 | /* SETXATTR is special, since it may contain too large data */ |
| 643 | if (in->h.opcode == FUSE_SETXATTR) |
| 644 | req->out.h.error = -E2BIG; |
| 645 | request_end(fc, req); |
| 646 | goto restart; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 647 | } |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 648 | spin_unlock(&fc->lock); |
| 649 | fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); |
Miklos Szeredi | 1d3d752 | 2006-01-06 00:19:40 -0800 | [diff] [blame] | 650 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); |
| 651 | if (!err) |
| 652 | err = fuse_copy_args(&cs, in->numargs, in->argpages, |
| 653 | (struct fuse_arg *) in->args, 0); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 654 | fuse_copy_finish(&cs); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 655 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 656 | req->locked = 0; |
| 657 | if (!err && req->interrupted) |
| 658 | err = -ENOENT; |
| 659 | if (err) { |
| 660 | if (!req->interrupted) |
| 661 | req->out.h.error = -EIO; |
| 662 | request_end(fc, req); |
| 663 | return err; |
| 664 | } |
| 665 | if (!req->isreply) |
| 666 | request_end(fc, req); |
| 667 | else { |
Miklos Szeredi | 83cfd49 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 668 | req->state = FUSE_REQ_SENT; |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 669 | list_move_tail(&req->list, &fc->processing); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 670 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 671 | } |
| 672 | return reqsize; |
| 673 | |
| 674 | err_unlock: |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 675 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 676 | return err; |
| 677 | } |
| 678 | |
| 679 | static ssize_t fuse_dev_read(struct file *file, char __user *buf, |
| 680 | size_t nbytes, loff_t *off) |
| 681 | { |
| 682 | struct iovec iov; |
| 683 | iov.iov_len = nbytes; |
| 684 | iov.iov_base = buf; |
| 685 | return fuse_dev_readv(file, &iov, 1, off); |
| 686 | } |
| 687 | |
| 688 | /* Look up request on processing list by unique ID */ |
| 689 | static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) |
| 690 | { |
| 691 | struct list_head *entry; |
| 692 | |
| 693 | list_for_each(entry, &fc->processing) { |
| 694 | struct fuse_req *req; |
| 695 | req = list_entry(entry, struct fuse_req, list); |
| 696 | if (req->in.h.unique == unique) |
| 697 | return req; |
| 698 | } |
| 699 | return NULL; |
| 700 | } |
| 701 | |
| 702 | static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, |
| 703 | unsigned nbytes) |
| 704 | { |
| 705 | unsigned reqsize = sizeof(struct fuse_out_header); |
| 706 | |
| 707 | if (out->h.error) |
| 708 | return nbytes != reqsize ? -EINVAL : 0; |
| 709 | |
| 710 | reqsize += len_args(out->numargs, out->args); |
| 711 | |
| 712 | if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) |
| 713 | return -EINVAL; |
| 714 | else if (reqsize > nbytes) { |
| 715 | struct fuse_arg *lastarg = &out->args[out->numargs-1]; |
| 716 | unsigned diffsize = reqsize - nbytes; |
| 717 | if (diffsize > lastarg->size) |
| 718 | return -EINVAL; |
| 719 | lastarg->size -= diffsize; |
| 720 | } |
| 721 | return fuse_copy_args(cs, out->numargs, out->argpages, out->args, |
| 722 | out->page_zeroing); |
| 723 | } |
| 724 | |
| 725 | /* |
| 726 | * Write a single reply to a request. First the header is copied from |
| 727 | * the write buffer. The request is then searched on the processing |
| 728 | * list by the unique ID found in the header. If found, then remove |
| 729 | * it from the list and copy the rest of the buffer to the request. |
| 730 | * The request is finished by calling request_end() |
| 731 | */ |
| 732 | static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, |
| 733 | unsigned long nr_segs, loff_t *off) |
| 734 | { |
| 735 | int err; |
| 736 | unsigned nbytes = iov_length(iov, nr_segs); |
| 737 | struct fuse_req *req; |
| 738 | struct fuse_out_header oh; |
| 739 | struct fuse_copy_state cs; |
| 740 | struct fuse_conn *fc = fuse_get_conn(file); |
| 741 | if (!fc) |
Miklos Szeredi | a87046d | 2006-04-10 22:54:56 -0700 | [diff] [blame^] | 742 | return -EPERM; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 743 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 744 | fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 745 | if (nbytes < sizeof(struct fuse_out_header)) |
| 746 | return -EINVAL; |
| 747 | |
| 748 | err = fuse_copy_one(&cs, &oh, sizeof(oh)); |
| 749 | if (err) |
| 750 | goto err_finish; |
| 751 | err = -EINVAL; |
| 752 | if (!oh.unique || oh.error <= -1000 || oh.error > 0 || |
| 753 | oh.len != nbytes) |
| 754 | goto err_finish; |
| 755 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 756 | spin_lock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 757 | err = -ENOENT; |
| 758 | if (!fc->connected) |
| 759 | goto err_unlock; |
| 760 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 761 | req = request_find(fc, oh.unique); |
| 762 | err = -EINVAL; |
| 763 | if (!req) |
| 764 | goto err_unlock; |
| 765 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 766 | if (req->interrupted) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 767 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 768 | fuse_copy_finish(&cs); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 769 | spin_lock(&fc->lock); |
Miklos Szeredi | 222f1d6 | 2006-01-16 22:14:25 -0800 | [diff] [blame] | 770 | request_end(fc, req); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 771 | return -ENOENT; |
| 772 | } |
Miklos Szeredi | d77a1d5 | 2006-01-16 22:14:31 -0800 | [diff] [blame] | 773 | list_move(&req->list, &fc->io); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 774 | req->out.h = oh; |
| 775 | req->locked = 1; |
| 776 | cs.req = req; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 777 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 778 | |
| 779 | err = copy_out_args(&cs, &req->out, nbytes); |
| 780 | fuse_copy_finish(&cs); |
| 781 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 782 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 783 | req->locked = 0; |
| 784 | if (!err) { |
| 785 | if (req->interrupted) |
| 786 | err = -ENOENT; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 787 | } else if (!req->interrupted) |
| 788 | req->out.h.error = -EIO; |
| 789 | request_end(fc, req); |
| 790 | |
| 791 | return err ? err : nbytes; |
| 792 | |
| 793 | err_unlock: |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 794 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 795 | err_finish: |
| 796 | fuse_copy_finish(&cs); |
| 797 | return err; |
| 798 | } |
| 799 | |
| 800 | static ssize_t fuse_dev_write(struct file *file, const char __user *buf, |
| 801 | size_t nbytes, loff_t *off) |
| 802 | { |
| 803 | struct iovec iov; |
| 804 | iov.iov_len = nbytes; |
| 805 | iov.iov_base = (char __user *) buf; |
| 806 | return fuse_dev_writev(file, &iov, 1, off); |
| 807 | } |
| 808 | |
| 809 | static unsigned fuse_dev_poll(struct file *file, poll_table *wait) |
| 810 | { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 811 | unsigned mask = POLLOUT | POLLWRNORM; |
Miklos Szeredi | 7025d9a | 2006-04-10 22:54:50 -0700 | [diff] [blame] | 812 | struct fuse_conn *fc = fuse_get_conn(file); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 813 | if (!fc) |
Miklos Szeredi | 7025d9a | 2006-04-10 22:54:50 -0700 | [diff] [blame] | 814 | return POLLERR; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 815 | |
| 816 | poll_wait(file, &fc->waitq, wait); |
| 817 | |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 818 | spin_lock(&fc->lock); |
Miklos Szeredi | 7025d9a | 2006-04-10 22:54:50 -0700 | [diff] [blame] | 819 | if (!fc->connected) |
| 820 | mask = POLLERR; |
| 821 | else if (!list_empty(&fc->pending)) |
| 822 | mask |= POLLIN | POLLRDNORM; |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 823 | spin_unlock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 824 | |
| 825 | return mask; |
| 826 | } |
| 827 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 828 | /* |
| 829 | * Abort all requests on the given list (pending or processing) |
| 830 | * |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 831 | * This function releases and reacquires fc->lock |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 832 | */ |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 833 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
| 834 | { |
| 835 | while (!list_empty(head)) { |
| 836 | struct fuse_req *req; |
| 837 | req = list_entry(head->next, struct fuse_req, list); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 838 | req->out.h.error = -ECONNABORTED; |
| 839 | request_end(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 840 | spin_lock(&fc->lock); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 841 | } |
| 842 | } |
| 843 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 844 | /* |
| 845 | * Abort requests under I/O |
| 846 | * |
| 847 | * The requests are set to interrupted and finished, and the request |
| 848 | * waiter is woken up. This will make request_wait_answer() wait |
| 849 | * until the request is unlocked and then return. |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 850 | * |
| 851 | * If the request is asynchronous, then the end function needs to be |
| 852 | * called after waiting for the request to be unlocked (if it was |
| 853 | * locked). |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 854 | */ |
| 855 | static void end_io_requests(struct fuse_conn *fc) |
| 856 | { |
| 857 | while (!list_empty(&fc->io)) { |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 858 | struct fuse_req *req = |
| 859 | list_entry(fc->io.next, struct fuse_req, list); |
| 860 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 861 | |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 862 | req->interrupted = 1; |
| 863 | req->out.h.error = -ECONNABORTED; |
| 864 | req->state = FUSE_REQ_FINISHED; |
| 865 | list_del_init(&req->list); |
| 866 | wake_up(&req->waitq); |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 867 | if (end) { |
| 868 | req->end = NULL; |
| 869 | /* The end function will consume this reference */ |
| 870 | __fuse_get_request(req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 871 | spin_unlock(&fc->lock); |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 872 | wait_event(req->waitq, !req->locked); |
| 873 | end(fc, req); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 874 | spin_lock(&fc->lock); |
Miklos Szeredi | 64c6d8e | 2006-01-16 22:14:42 -0800 | [diff] [blame] | 875 | } |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 876 | } |
| 877 | } |
| 878 | |
| 879 | /* |
| 880 | * Abort all requests. |
| 881 | * |
| 882 | * Emergency exit in case of a malicious or accidental deadlock, or |
| 883 | * just a hung filesystem. |
| 884 | * |
| 885 | * The same effect is usually achievable through killing the |
| 886 | * filesystem daemon and all users of the filesystem. The exception |
| 887 | * is the combination of an asynchronous request and the tricky |
| 888 | * deadlock (see Documentation/filesystems/fuse.txt). |
| 889 | * |
| 890 | * During the aborting, progression of requests from the pending and |
| 891 | * processing lists onto the io list, and progression of new requests |
| 892 | * onto the pending list is prevented by req->connected being false. |
| 893 | * |
| 894 | * Progression of requests under I/O to the processing list is |
| 895 | * prevented by the req->interrupted flag being true for these |
| 896 | * requests. For this reason requests on the io list must be aborted |
| 897 | * first. |
| 898 | */ |
| 899 | void fuse_abort_conn(struct fuse_conn *fc) |
| 900 | { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 901 | spin_lock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 902 | if (fc->connected) { |
| 903 | fc->connected = 0; |
| 904 | end_io_requests(fc); |
| 905 | end_requests(fc, &fc->pending); |
| 906 | end_requests(fc, &fc->processing); |
| 907 | wake_up_all(&fc->waitq); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 908 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 909 | } |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 910 | spin_unlock(&fc->lock); |
Miklos Szeredi | 69a53bf | 2006-01-16 22:14:41 -0800 | [diff] [blame] | 911 | } |
| 912 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 913 | static int fuse_dev_release(struct inode *inode, struct file *file) |
| 914 | { |
Miklos Szeredi | 0720b31 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 915 | struct fuse_conn *fc = fuse_get_conn(file); |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 916 | if (fc) { |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 917 | spin_lock(&fc->lock); |
Miklos Szeredi | 1e9a4ed | 2005-09-09 13:10:31 -0700 | [diff] [blame] | 918 | fc->connected = 0; |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 919 | end_requests(fc, &fc->pending); |
| 920 | end_requests(fc, &fc->processing); |
Miklos Szeredi | d713311 | 2006-04-10 22:54:55 -0700 | [diff] [blame] | 921 | spin_unlock(&fc->lock); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 922 | fasync_helper(-1, file, 0, &fc->fasync); |
Miklos Szeredi | f543f25 | 2006-01-16 22:14:35 -0800 | [diff] [blame] | 923 | kobject_put(&fc->kobj); |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 924 | } |
Miklos Szeredi | f543f25 | 2006-01-16 22:14:35 -0800 | [diff] [blame] | 925 | |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 926 | return 0; |
| 927 | } |
| 928 | |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 929 | static int fuse_dev_fasync(int fd, struct file *file, int on) |
| 930 | { |
| 931 | struct fuse_conn *fc = fuse_get_conn(file); |
| 932 | if (!fc) |
Miklos Szeredi | a87046d | 2006-04-10 22:54:56 -0700 | [diff] [blame^] | 933 | return -EPERM; |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 934 | |
| 935 | /* No locking - fasync_helper does its own locking */ |
| 936 | return fasync_helper(fd, file, on, &fc->fasync); |
| 937 | } |
| 938 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 939 | const struct file_operations fuse_dev_operations = { |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 940 | .owner = THIS_MODULE, |
| 941 | .llseek = no_llseek, |
| 942 | .read = fuse_dev_read, |
| 943 | .readv = fuse_dev_readv, |
| 944 | .write = fuse_dev_write, |
| 945 | .writev = fuse_dev_writev, |
| 946 | .poll = fuse_dev_poll, |
| 947 | .release = fuse_dev_release, |
Jeff Dike | 385a17b | 2006-04-10 22:54:52 -0700 | [diff] [blame] | 948 | .fasync = fuse_dev_fasync, |
Miklos Szeredi | 334f485 | 2005-09-09 13:10:27 -0700 | [diff] [blame] | 949 | }; |
| 950 | |
| 951 | static struct miscdevice fuse_miscdevice = { |
| 952 | .minor = FUSE_MINOR, |
| 953 | .name = "fuse", |
| 954 | .fops = &fuse_dev_operations, |
| 955 | }; |
| 956 | |
| 957 | int __init fuse_dev_init(void) |
| 958 | { |
| 959 | int err = -ENOMEM; |
| 960 | fuse_req_cachep = kmem_cache_create("fuse_request", |
| 961 | sizeof(struct fuse_req), |
| 962 | 0, 0, NULL, NULL); |
| 963 | if (!fuse_req_cachep) |
| 964 | goto out; |
| 965 | |
| 966 | err = misc_register(&fuse_miscdevice); |
| 967 | if (err) |
| 968 | goto out_cache_clean; |
| 969 | |
| 970 | return 0; |
| 971 | |
| 972 | out_cache_clean: |
| 973 | kmem_cache_destroy(fuse_req_cachep); |
| 974 | out: |
| 975 | return err; |
| 976 | } |
| 977 | |
| 978 | void fuse_dev_cleanup(void) |
| 979 | { |
| 980 | misc_deregister(&fuse_miscdevice); |
| 981 | kmem_cache_destroy(fuse_req_cachep); |
| 982 | } |