blob: 6c740f860665f4de7bf2b91e7c2e70ae146263cb [file] [log] [blame]
Miklos Szeredi334f4852005-09-09 13:10:27 -07001/*
2 FUSE: Filesystem in Userspace
Miklos Szeredid7133112006-04-10 22:54:55 -07003 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Miklos Szeredi334f4852005-09-09 13:10:27 -07004
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
22static kmem_cache_t *fuse_req_cachep;
23
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080024static struct fuse_conn *fuse_get_conn(struct file *file)
Miklos Szeredi334f4852005-09-09 13:10:27 -070025{
Miklos Szeredi0720b312006-04-10 22:54:55 -070026 /*
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
29 */
30 return file->private_data;
Miklos Szeredi334f4852005-09-09 13:10:27 -070031}
32
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080033static void fuse_request_init(struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -070034{
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1);
39}
40
41struct fuse_req *fuse_request_alloc(void)
42{
43 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
44 if (req)
45 fuse_request_init(req);
46 return req;
47}
48
49void fuse_request_free(struct fuse_req *req)
50{
51 kmem_cache_free(fuse_req_cachep, req);
52}
53
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080054static void block_sigs(sigset_t *oldset)
Miklos Szeredi334f4852005-09-09 13:10:27 -070055{
56 sigset_t mask;
57
58 siginitsetinv(&mask, sigmask(SIGKILL));
59 sigprocmask(SIG_BLOCK, &mask, oldset);
60}
61
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080062static void restore_sigs(sigset_t *oldset)
Miklos Szeredi334f4852005-09-09 13:10:27 -070063{
64 sigprocmask(SIG_SETMASK, oldset, NULL);
65}
66
Miklos Szeredi77e7f252006-02-17 13:52:52 -080067/*
68 * Reset request, so that it can be reused
69 *
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
72 */
Miklos Szeredi334f4852005-09-09 13:10:27 -070073void fuse_reset_request(struct fuse_req *req)
74{
Miklos Szeredi334f4852005-09-09 13:10:27 -070075 BUG_ON(atomic_read(&req->count) != 1);
76 fuse_request_init(req);
Miklos Szeredi334f4852005-09-09 13:10:27 -070077}
78
79static void __fuse_get_request(struct fuse_req *req)
80{
81 atomic_inc(&req->count);
82}
83
84/* Must be called with > 1 refcount */
85static void __fuse_put_request(struct fuse_req *req)
86{
87 BUG_ON(atomic_read(&req->count) < 2);
88 atomic_dec(&req->count);
89}
90
Miklos Szeredice1d5a42006-04-10 22:54:58 -070091struct fuse_req *fuse_get_req(struct fuse_conn *fc)
Miklos Szeredi334f4852005-09-09 13:10:27 -070092{
Miklos Szeredi08a53cd2006-04-10 22:54:59 -070093 struct fuse_req *req;
94 sigset_t oldset;
95 int err;
96
97 block_sigs(&oldset);
98 err = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
99 restore_sigs(&oldset);
100 if (err)
101 return ERR_PTR(-EINTR);
102
103 req = fuse_request_alloc();
Miklos Szeredice1d5a42006-04-10 22:54:58 -0700104 if (!req)
105 return ERR_PTR(-ENOMEM);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700106
Miklos Szeredice1d5a42006-04-10 22:54:58 -0700107 atomic_inc(&fc->num_waiting);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700108 fuse_request_init(req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700109 req->in.h.uid = current->fsuid;
110 req->in.h.gid = current->fsgid;
111 req->in.h.pid = current->pid;
112 return req;
113}
114
Miklos Szeredi334f4852005-09-09 13:10:27 -0700115void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
116{
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800117 if (atomic_dec_and_test(&req->count)) {
Miklos Szeredice1d5a42006-04-10 22:54:58 -0700118 atomic_dec(&fc->num_waiting);
119 fuse_request_free(req);
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800120 }
121}
122
Miklos Szeredid7133112006-04-10 22:54:55 -0700123void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700124{
125 iput(req->inode);
126 iput(req->inode2);
127 if (req->file)
128 fput(req->file);
Miklos Szeredid7133112006-04-10 22:54:55 -0700129 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700130 list_del(&req->bg_entry);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700131 if (fc->num_background == FUSE_MAX_BACKGROUND) {
132 fc->blocked = 0;
133 wake_up_all(&fc->blocked_waitq);
134 }
135 fc->num_background--;
Miklos Szeredid7133112006-04-10 22:54:55 -0700136 spin_unlock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700137}
138
Miklos Szeredi334f4852005-09-09 13:10:27 -0700139/*
140 * This function is called when a request is finished. Either a reply
141 * has arrived or it was interrupted (and not yet sent) or some error
Miklos Szeredif43b1552006-01-16 22:14:26 -0800142 * occurred during communication with userspace, or the device file
143 * was closed. In case of a background request the reference to the
144 * stored objects are released. The requester thread is woken up (if
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800145 * still waiting), the 'end' callback is called if given, else the
146 * reference to the request is released
Miklos Szeredi334f4852005-09-09 13:10:27 -0700147 *
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800148 * Releasing extra reference for foreground requests must be done
149 * within the same locked region as setting state to finished. This
150 * is because fuse_reset_request() may be called after request is
151 * finished and it must be the sole possessor. If request is
152 * interrupted and put in the background, it will return with an error
153 * and hence never be reset and reused.
154 *
Miklos Szeredid7133112006-04-10 22:54:55 -0700155 * Called with fc->lock, unlocks it
Miklos Szeredi334f4852005-09-09 13:10:27 -0700156 */
157static void request_end(struct fuse_conn *fc, struct fuse_req *req)
158{
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800159 list_del(&req->list);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800160 req->state = FUSE_REQ_FINISHED;
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800161 if (!req->background) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700162 spin_unlock(&fc->lock);
Miklos Szeredice1d5a42006-04-10 22:54:58 -0700163 wake_up(&req->waitq);
164 fuse_put_request(fc, req);
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800165 } else {
166 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
167 req->end = NULL;
Miklos Szeredid7133112006-04-10 22:54:55 -0700168 spin_unlock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700169 down_read(&fc->sbput_sem);
170 if (fc->mounted)
Miklos Szeredid7133112006-04-10 22:54:55 -0700171 fuse_release_background(fc, req);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700172 up_read(&fc->sbput_sem);
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800173 if (end)
174 end(fc, req);
175 else
176 fuse_put_request(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700177 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700178}
179
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700180/*
181 * Unfortunately request interruption not just solves the deadlock
182 * problem, it causes problems too. These stem from the fact, that an
183 * interrupted request is continued to be processed in userspace,
184 * while all the locks and object references (inode and file) held
185 * during the operation are released.
186 *
187 * To release the locks is exactly why there's a need to interrupt the
188 * request, so there's not a lot that can be done about this, except
189 * introduce additional locking in userspace.
190 *
191 * More important is to keep inode and file references until userspace
192 * has replied, otherwise FORGET and RELEASE could be sent while the
193 * inode/file is still used by the filesystem.
194 *
195 * For this reason the concept of "background" request is introduced.
196 * An interrupted request is backgrounded if it has been already sent
197 * to userspace. Backgrounding involves getting an extra reference to
198 * inode(s) or file used in the request, and adding the request to
199 * fc->background list. When a reply is received for a background
200 * request, the object references are released, and the request is
201 * removed from the list. If the filesystem is unmounted while there
202 * are still background requests, the list is walked and references
203 * are released as if a reply was received.
204 *
205 * There's one more use for a background request. The RELEASE message is
206 * always sent as background, since it doesn't return an error or
207 * data.
208 */
209static void background_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700210{
Miklos Szeredi334f4852005-09-09 13:10:27 -0700211 req->background = 1;
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700212 list_add(&req->bg_entry, &fc->background);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700213 fc->num_background++;
214 if (fc->num_background == FUSE_MAX_BACKGROUND)
215 fc->blocked = 1;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700216 if (req->inode)
217 req->inode = igrab(req->inode);
218 if (req->inode2)
219 req->inode2 = igrab(req->inode2);
220 if (req->file)
221 get_file(req->file);
222}
223
Miklos Szeredid7133112006-04-10 22:54:55 -0700224/* Called with fc->lock held. Releases, and then reacquires it. */
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700225static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700226{
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700227 sigset_t oldset;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700228
Miklos Szeredid7133112006-04-10 22:54:55 -0700229 spin_unlock(&fc->lock);
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700230 block_sigs(&oldset);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800231 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700232 restore_sigs(&oldset);
Miklos Szeredid7133112006-04-10 22:54:55 -0700233 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800234 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700235 return;
236
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800237 if (!req->interrupted) {
238 req->out.h.error = -EINTR;
239 req->interrupted = 1;
240 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700241 if (req->locked) {
242 /* This is uninterruptible sleep, because data is
243 being copied to/from the buffers of req. During
244 locked state, there mustn't be any filesystem
245 operation (e.g. page fault), since that could lead
246 to deadlock */
Miklos Szeredid7133112006-04-10 22:54:55 -0700247 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700248 wait_event(req->waitq, !req->locked);
Miklos Szeredid7133112006-04-10 22:54:55 -0700249 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700250 }
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800251 if (req->state == FUSE_REQ_PENDING) {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700252 list_del(&req->list);
253 __fuse_put_request(req);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800254 } else if (req->state == FUSE_REQ_SENT)
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700255 background_request(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700256}
257
258static unsigned len_args(unsigned numargs, struct fuse_arg *args)
259{
260 unsigned nbytes = 0;
261 unsigned i;
262
263 for (i = 0; i < numargs; i++)
264 nbytes += args[i].size;
265
266 return nbytes;
267}
268
269static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
270{
271 fc->reqctr++;
272 /* zero is special */
273 if (fc->reqctr == 0)
274 fc->reqctr = 1;
275 req->in.h.unique = fc->reqctr;
276 req->in.h.len = sizeof(struct fuse_in_header) +
277 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700278 list_add_tail(&req->list, &fc->pending);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800279 req->state = FUSE_REQ_PENDING;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700280 wake_up(&fc->waitq);
Jeff Dike385a17b2006-04-10 22:54:52 -0700281 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700282}
283
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700284/*
285 * This can only be interrupted by a SIGKILL
286 */
287void request_send(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700288{
289 req->isreply = 1;
Miklos Szeredid7133112006-04-10 22:54:55 -0700290 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700291 if (!fc->connected)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700292 req->out.h.error = -ENOTCONN;
293 else if (fc->conn_error)
294 req->out.h.error = -ECONNREFUSED;
295 else {
296 queue_request(fc, req);
297 /* acquire extra reference, since request is still needed
298 after request_end() */
299 __fuse_get_request(req);
300
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700301 request_wait_answer(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700302 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700303 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700304}
305
Miklos Szeredi334f4852005-09-09 13:10:27 -0700306static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
307{
Miklos Szeredid7133112006-04-10 22:54:55 -0700308 spin_lock(&fc->lock);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700309 background_request(fc, req);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700310 if (fc->connected) {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700311 queue_request(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700312 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700313 } else {
314 req->out.h.error = -ENOTCONN;
315 request_end(fc, req);
316 }
317}
318
319void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
320{
321 req->isreply = 0;
322 request_send_nowait(fc, req);
323}
324
325void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
326{
327 req->isreply = 1;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700328 request_send_nowait(fc, req);
329}
330
Miklos Szeredi334f4852005-09-09 13:10:27 -0700331/*
332 * Lock the request. Up to the next unlock_request() there mustn't be
333 * anything that could cause a page-fault. If the request was already
334 * interrupted bail out.
335 */
Miklos Szeredid7133112006-04-10 22:54:55 -0700336static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700337{
338 int err = 0;
339 if (req) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700340 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700341 if (req->interrupted)
342 err = -ENOENT;
343 else
344 req->locked = 1;
Miklos Szeredid7133112006-04-10 22:54:55 -0700345 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700346 }
347 return err;
348}
349
350/*
351 * Unlock request. If it was interrupted during being locked, the
352 * requester thread is currently waiting for it to be unlocked, so
353 * wake it up.
354 */
Miklos Szeredid7133112006-04-10 22:54:55 -0700355static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700356{
357 if (req) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700358 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700359 req->locked = 0;
360 if (req->interrupted)
361 wake_up(&req->waitq);
Miklos Szeredid7133112006-04-10 22:54:55 -0700362 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700363 }
364}
365
366struct fuse_copy_state {
Miklos Szeredid7133112006-04-10 22:54:55 -0700367 struct fuse_conn *fc;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700368 int write;
369 struct fuse_req *req;
370 const struct iovec *iov;
371 unsigned long nr_segs;
372 unsigned long seglen;
373 unsigned long addr;
374 struct page *pg;
375 void *mapaddr;
376 void *buf;
377 unsigned len;
378};
379
Miklos Szeredid7133112006-04-10 22:54:55 -0700380static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
381 int write, struct fuse_req *req,
382 const struct iovec *iov, unsigned long nr_segs)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700383{
384 memset(cs, 0, sizeof(*cs));
Miklos Szeredid7133112006-04-10 22:54:55 -0700385 cs->fc = fc;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700386 cs->write = write;
387 cs->req = req;
388 cs->iov = iov;
389 cs->nr_segs = nr_segs;
390}
391
392/* Unmap and put previous page of userspace buffer */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800393static void fuse_copy_finish(struct fuse_copy_state *cs)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700394{
395 if (cs->mapaddr) {
396 kunmap_atomic(cs->mapaddr, KM_USER0);
397 if (cs->write) {
398 flush_dcache_page(cs->pg);
399 set_page_dirty_lock(cs->pg);
400 }
401 put_page(cs->pg);
402 cs->mapaddr = NULL;
403 }
404}
405
406/*
407 * Get another pagefull of userspace buffer, and map it to kernel
408 * address space, and lock request
409 */
410static int fuse_copy_fill(struct fuse_copy_state *cs)
411{
412 unsigned long offset;
413 int err;
414
Miklos Szeredid7133112006-04-10 22:54:55 -0700415 unlock_request(cs->fc, cs->req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700416 fuse_copy_finish(cs);
417 if (!cs->seglen) {
418 BUG_ON(!cs->nr_segs);
419 cs->seglen = cs->iov[0].iov_len;
420 cs->addr = (unsigned long) cs->iov[0].iov_base;
421 cs->iov ++;
422 cs->nr_segs --;
423 }
424 down_read(&current->mm->mmap_sem);
425 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
426 &cs->pg, NULL);
427 up_read(&current->mm->mmap_sem);
428 if (err < 0)
429 return err;
430 BUG_ON(err != 1);
431 offset = cs->addr % PAGE_SIZE;
432 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
433 cs->buf = cs->mapaddr + offset;
434 cs->len = min(PAGE_SIZE - offset, cs->seglen);
435 cs->seglen -= cs->len;
436 cs->addr += cs->len;
437
Miklos Szeredid7133112006-04-10 22:54:55 -0700438 return lock_request(cs->fc, cs->req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700439}
440
441/* Do as much copy to/from userspace buffer as we can */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800442static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700443{
444 unsigned ncpy = min(*size, cs->len);
445 if (val) {
446 if (cs->write)
447 memcpy(cs->buf, *val, ncpy);
448 else
449 memcpy(*val, cs->buf, ncpy);
450 *val += ncpy;
451 }
452 *size -= ncpy;
453 cs->len -= ncpy;
454 cs->buf += ncpy;
455 return ncpy;
456}
457
458/*
459 * Copy a page in the request to/from the userspace buffer. Must be
460 * done atomically
461 */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800462static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
463 unsigned offset, unsigned count, int zeroing)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700464{
465 if (page && zeroing && count < PAGE_SIZE) {
466 void *mapaddr = kmap_atomic(page, KM_USER1);
467 memset(mapaddr, 0, PAGE_SIZE);
468 kunmap_atomic(mapaddr, KM_USER1);
469 }
470 while (count) {
471 int err;
472 if (!cs->len && (err = fuse_copy_fill(cs)))
473 return err;
474 if (page) {
475 void *mapaddr = kmap_atomic(page, KM_USER1);
476 void *buf = mapaddr + offset;
477 offset += fuse_copy_do(cs, &buf, &count);
478 kunmap_atomic(mapaddr, KM_USER1);
479 } else
480 offset += fuse_copy_do(cs, NULL, &count);
481 }
482 if (page && !cs->write)
483 flush_dcache_page(page);
484 return 0;
485}
486
487/* Copy pages in the request to/from userspace buffer */
488static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
489 int zeroing)
490{
491 unsigned i;
492 struct fuse_req *req = cs->req;
493 unsigned offset = req->page_offset;
494 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
495
496 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
497 struct page *page = req->pages[i];
498 int err = fuse_copy_page(cs, page, offset, count, zeroing);
499 if (err)
500 return err;
501
502 nbytes -= count;
503 count = min(nbytes, (unsigned) PAGE_SIZE);
504 offset = 0;
505 }
506 return 0;
507}
508
509/* Copy a single argument in the request to/from userspace buffer */
510static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
511{
512 while (size) {
513 int err;
514 if (!cs->len && (err = fuse_copy_fill(cs)))
515 return err;
516 fuse_copy_do(cs, &val, &size);
517 }
518 return 0;
519}
520
521/* Copy request arguments to/from userspace buffer */
522static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
523 unsigned argpages, struct fuse_arg *args,
524 int zeroing)
525{
526 int err = 0;
527 unsigned i;
528
529 for (i = 0; !err && i < numargs; i++) {
530 struct fuse_arg *arg = &args[i];
531 if (i == numargs - 1 && argpages)
532 err = fuse_copy_pages(cs, arg->size, zeroing);
533 else
534 err = fuse_copy_one(cs, arg->value, arg->size);
535 }
536 return err;
537}
538
539/* Wait until a request is available on the pending list */
540static void request_wait(struct fuse_conn *fc)
541{
542 DECLARE_WAITQUEUE(wait, current);
543
544 add_wait_queue_exclusive(&fc->waitq, &wait);
Miklos Szeredi9ba7cbb2006-01-16 22:14:34 -0800545 while (fc->connected && list_empty(&fc->pending)) {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700546 set_current_state(TASK_INTERRUPTIBLE);
547 if (signal_pending(current))
548 break;
549
Miklos Szeredid7133112006-04-10 22:54:55 -0700550 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700551 schedule();
Miklos Szeredid7133112006-04-10 22:54:55 -0700552 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700553 }
554 set_current_state(TASK_RUNNING);
555 remove_wait_queue(&fc->waitq, &wait);
556}
557
558/*
559 * Read a single request into the userspace filesystem's buffer. This
560 * function waits until a request is available, then removes it from
561 * the pending list and copies request data to userspace buffer. If
562 * no reply is needed (FORGET) or request has been interrupted or
563 * there was an error during the copying then it's finished by calling
564 * request_end(). Otherwise add it to the processing list, and set
565 * the 'sent' flag.
566 */
567static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
568 unsigned long nr_segs, loff_t *off)
569{
570 int err;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700571 struct fuse_req *req;
572 struct fuse_in *in;
573 struct fuse_copy_state cs;
574 unsigned reqsize;
Miklos Szeredi0720b312006-04-10 22:54:55 -0700575 struct fuse_conn *fc = fuse_get_conn(file);
576 if (!fc)
577 return -EPERM;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700578
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800579 restart:
Miklos Szeredid7133112006-04-10 22:54:55 -0700580 spin_lock(&fc->lock);
Jeff Dikee5ac1d12006-04-10 22:54:53 -0700581 err = -EAGAIN;
582 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
583 list_empty(&fc->pending))
584 goto err_unlock;
585
Miklos Szeredi334f4852005-09-09 13:10:27 -0700586 request_wait(fc);
587 err = -ENODEV;
Miklos Szeredi9ba7cbb2006-01-16 22:14:34 -0800588 if (!fc->connected)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700589 goto err_unlock;
590 err = -ERESTARTSYS;
591 if (list_empty(&fc->pending))
592 goto err_unlock;
593
594 req = list_entry(fc->pending.next, struct fuse_req, list);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800595 req->state = FUSE_REQ_READING;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800596 list_move(&req->list, &fc->io);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700597
598 in = &req->in;
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800599 reqsize = in->h.len;
600 /* If request is too large, reply with an error and restart the read */
601 if (iov_length(iov, nr_segs) < reqsize) {
602 req->out.h.error = -EIO;
603 /* SETXATTR is special, since it may contain too large data */
604 if (in->h.opcode == FUSE_SETXATTR)
605 req->out.h.error = -E2BIG;
606 request_end(fc, req);
607 goto restart;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700608 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700609 spin_unlock(&fc->lock);
610 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800611 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
612 if (!err)
613 err = fuse_copy_args(&cs, in->numargs, in->argpages,
614 (struct fuse_arg *) in->args, 0);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700615 fuse_copy_finish(&cs);
Miklos Szeredid7133112006-04-10 22:54:55 -0700616 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700617 req->locked = 0;
618 if (!err && req->interrupted)
619 err = -ENOENT;
620 if (err) {
621 if (!req->interrupted)
622 req->out.h.error = -EIO;
623 request_end(fc, req);
624 return err;
625 }
626 if (!req->isreply)
627 request_end(fc, req);
628 else {
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800629 req->state = FUSE_REQ_SENT;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800630 list_move_tail(&req->list, &fc->processing);
Miklos Szeredid7133112006-04-10 22:54:55 -0700631 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700632 }
633 return reqsize;
634
635 err_unlock:
Miklos Szeredid7133112006-04-10 22:54:55 -0700636 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700637 return err;
638}
639
640static ssize_t fuse_dev_read(struct file *file, char __user *buf,
641 size_t nbytes, loff_t *off)
642{
643 struct iovec iov;
644 iov.iov_len = nbytes;
645 iov.iov_base = buf;
646 return fuse_dev_readv(file, &iov, 1, off);
647}
648
649/* Look up request on processing list by unique ID */
650static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
651{
652 struct list_head *entry;
653
654 list_for_each(entry, &fc->processing) {
655 struct fuse_req *req;
656 req = list_entry(entry, struct fuse_req, list);
657 if (req->in.h.unique == unique)
658 return req;
659 }
660 return NULL;
661}
662
663static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
664 unsigned nbytes)
665{
666 unsigned reqsize = sizeof(struct fuse_out_header);
667
668 if (out->h.error)
669 return nbytes != reqsize ? -EINVAL : 0;
670
671 reqsize += len_args(out->numargs, out->args);
672
673 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
674 return -EINVAL;
675 else if (reqsize > nbytes) {
676 struct fuse_arg *lastarg = &out->args[out->numargs-1];
677 unsigned diffsize = reqsize - nbytes;
678 if (diffsize > lastarg->size)
679 return -EINVAL;
680 lastarg->size -= diffsize;
681 }
682 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
683 out->page_zeroing);
684}
685
686/*
687 * Write a single reply to a request. First the header is copied from
688 * the write buffer. The request is then searched on the processing
689 * list by the unique ID found in the header. If found, then remove
690 * it from the list and copy the rest of the buffer to the request.
691 * The request is finished by calling request_end()
692 */
693static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
694 unsigned long nr_segs, loff_t *off)
695{
696 int err;
697 unsigned nbytes = iov_length(iov, nr_segs);
698 struct fuse_req *req;
699 struct fuse_out_header oh;
700 struct fuse_copy_state cs;
701 struct fuse_conn *fc = fuse_get_conn(file);
702 if (!fc)
Miklos Szeredia87046d2006-04-10 22:54:56 -0700703 return -EPERM;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700704
Miklos Szeredid7133112006-04-10 22:54:55 -0700705 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700706 if (nbytes < sizeof(struct fuse_out_header))
707 return -EINVAL;
708
709 err = fuse_copy_one(&cs, &oh, sizeof(oh));
710 if (err)
711 goto err_finish;
712 err = -EINVAL;
713 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
714 oh.len != nbytes)
715 goto err_finish;
716
Miklos Szeredid7133112006-04-10 22:54:55 -0700717 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800718 err = -ENOENT;
719 if (!fc->connected)
720 goto err_unlock;
721
Miklos Szeredi334f4852005-09-09 13:10:27 -0700722 req = request_find(fc, oh.unique);
723 err = -EINVAL;
724 if (!req)
725 goto err_unlock;
726
Miklos Szeredi334f4852005-09-09 13:10:27 -0700727 if (req->interrupted) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700728 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700729 fuse_copy_finish(&cs);
Miklos Szeredid7133112006-04-10 22:54:55 -0700730 spin_lock(&fc->lock);
Miklos Szeredi222f1d62006-01-16 22:14:25 -0800731 request_end(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700732 return -ENOENT;
733 }
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800734 list_move(&req->list, &fc->io);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700735 req->out.h = oh;
736 req->locked = 1;
737 cs.req = req;
Miklos Szeredid7133112006-04-10 22:54:55 -0700738 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700739
740 err = copy_out_args(&cs, &req->out, nbytes);
741 fuse_copy_finish(&cs);
742
Miklos Szeredid7133112006-04-10 22:54:55 -0700743 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700744 req->locked = 0;
745 if (!err) {
746 if (req->interrupted)
747 err = -ENOENT;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700748 } else if (!req->interrupted)
749 req->out.h.error = -EIO;
750 request_end(fc, req);
751
752 return err ? err : nbytes;
753
754 err_unlock:
Miklos Szeredid7133112006-04-10 22:54:55 -0700755 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700756 err_finish:
757 fuse_copy_finish(&cs);
758 return err;
759}
760
761static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
762 size_t nbytes, loff_t *off)
763{
764 struct iovec iov;
765 iov.iov_len = nbytes;
766 iov.iov_base = (char __user *) buf;
767 return fuse_dev_writev(file, &iov, 1, off);
768}
769
770static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
771{
Miklos Szeredi334f4852005-09-09 13:10:27 -0700772 unsigned mask = POLLOUT | POLLWRNORM;
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700773 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700774 if (!fc)
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700775 return POLLERR;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700776
777 poll_wait(file, &fc->waitq, wait);
778
Miklos Szeredid7133112006-04-10 22:54:55 -0700779 spin_lock(&fc->lock);
Miklos Szeredi7025d9a2006-04-10 22:54:50 -0700780 if (!fc->connected)
781 mask = POLLERR;
782 else if (!list_empty(&fc->pending))
783 mask |= POLLIN | POLLRDNORM;
Miklos Szeredid7133112006-04-10 22:54:55 -0700784 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700785
786 return mask;
787}
788
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800789/*
790 * Abort all requests on the given list (pending or processing)
791 *
Miklos Szeredid7133112006-04-10 22:54:55 -0700792 * This function releases and reacquires fc->lock
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800793 */
Miklos Szeredi334f4852005-09-09 13:10:27 -0700794static void end_requests(struct fuse_conn *fc, struct list_head *head)
795{
796 while (!list_empty(head)) {
797 struct fuse_req *req;
798 req = list_entry(head->next, struct fuse_req, list);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700799 req->out.h.error = -ECONNABORTED;
800 request_end(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700801 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700802 }
803}
804
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800805/*
806 * Abort requests under I/O
807 *
808 * The requests are set to interrupted and finished, and the request
809 * waiter is woken up. This will make request_wait_answer() wait
810 * until the request is unlocked and then return.
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800811 *
812 * If the request is asynchronous, then the end function needs to be
813 * called after waiting for the request to be unlocked (if it was
814 * locked).
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800815 */
816static void end_io_requests(struct fuse_conn *fc)
817{
818 while (!list_empty(&fc->io)) {
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800819 struct fuse_req *req =
820 list_entry(fc->io.next, struct fuse_req, list);
821 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
822
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800823 req->interrupted = 1;
824 req->out.h.error = -ECONNABORTED;
825 req->state = FUSE_REQ_FINISHED;
826 list_del_init(&req->list);
827 wake_up(&req->waitq);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800828 if (end) {
829 req->end = NULL;
830 /* The end function will consume this reference */
831 __fuse_get_request(req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700832 spin_unlock(&fc->lock);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800833 wait_event(req->waitq, !req->locked);
834 end(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700835 spin_lock(&fc->lock);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -0800836 }
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800837 }
838}
839
840/*
841 * Abort all requests.
842 *
843 * Emergency exit in case of a malicious or accidental deadlock, or
844 * just a hung filesystem.
845 *
846 * The same effect is usually achievable through killing the
847 * filesystem daemon and all users of the filesystem. The exception
848 * is the combination of an asynchronous request and the tricky
849 * deadlock (see Documentation/filesystems/fuse.txt).
850 *
851 * During the aborting, progression of requests from the pending and
852 * processing lists onto the io list, and progression of new requests
853 * onto the pending list is prevented by req->connected being false.
854 *
855 * Progression of requests under I/O to the processing list is
856 * prevented by the req->interrupted flag being true for these
857 * requests. For this reason requests on the io list must be aborted
858 * first.
859 */
860void fuse_abort_conn(struct fuse_conn *fc)
861{
Miklos Szeredid7133112006-04-10 22:54:55 -0700862 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800863 if (fc->connected) {
864 fc->connected = 0;
865 end_io_requests(fc);
866 end_requests(fc, &fc->pending);
867 end_requests(fc, &fc->processing);
868 wake_up_all(&fc->waitq);
Jeff Dike385a17b2006-04-10 22:54:52 -0700869 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800870 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700871 spin_unlock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -0800872}
873
Miklos Szeredi334f4852005-09-09 13:10:27 -0700874static int fuse_dev_release(struct inode *inode, struct file *file)
875{
Miklos Szeredi0720b312006-04-10 22:54:55 -0700876 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700877 if (fc) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700878 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700879 fc->connected = 0;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700880 end_requests(fc, &fc->pending);
881 end_requests(fc, &fc->processing);
Miklos Szeredid7133112006-04-10 22:54:55 -0700882 spin_unlock(&fc->lock);
Jeff Dike385a17b2006-04-10 22:54:52 -0700883 fasync_helper(-1, file, 0, &fc->fasync);
Miklos Szeredif543f252006-01-16 22:14:35 -0800884 kobject_put(&fc->kobj);
Jeff Dike385a17b2006-04-10 22:54:52 -0700885 }
Miklos Szeredif543f252006-01-16 22:14:35 -0800886
Miklos Szeredi334f4852005-09-09 13:10:27 -0700887 return 0;
888}
889
Jeff Dike385a17b2006-04-10 22:54:52 -0700890static int fuse_dev_fasync(int fd, struct file *file, int on)
891{
892 struct fuse_conn *fc = fuse_get_conn(file);
893 if (!fc)
Miklos Szeredia87046d2006-04-10 22:54:56 -0700894 return -EPERM;
Jeff Dike385a17b2006-04-10 22:54:52 -0700895
896 /* No locking - fasync_helper does its own locking */
897 return fasync_helper(fd, file, on, &fc->fasync);
898}
899
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -0800900const struct file_operations fuse_dev_operations = {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700901 .owner = THIS_MODULE,
902 .llseek = no_llseek,
903 .read = fuse_dev_read,
904 .readv = fuse_dev_readv,
905 .write = fuse_dev_write,
906 .writev = fuse_dev_writev,
907 .poll = fuse_dev_poll,
908 .release = fuse_dev_release,
Jeff Dike385a17b2006-04-10 22:54:52 -0700909 .fasync = fuse_dev_fasync,
Miklos Szeredi334f4852005-09-09 13:10:27 -0700910};
911
912static struct miscdevice fuse_miscdevice = {
913 .minor = FUSE_MINOR,
914 .name = "fuse",
915 .fops = &fuse_dev_operations,
916};
917
918int __init fuse_dev_init(void)
919{
920 int err = -ENOMEM;
921 fuse_req_cachep = kmem_cache_create("fuse_request",
922 sizeof(struct fuse_req),
923 0, 0, NULL, NULL);
924 if (!fuse_req_cachep)
925 goto out;
926
927 err = misc_register(&fuse_miscdevice);
928 if (err)
929 goto out_cache_clean;
930
931 return 0;
932
933 out_cache_clean:
934 kmem_cache_destroy(fuse_req_cachep);
935 out:
936 return err;
937}
938
939void fuse_dev_cleanup(void)
940{
941 misc_deregister(&fuse_miscdevice);
942 kmem_cache_destroy(fuse_req_cachep);
943}