blob: 4413f5e7b13361f65aa8fd1333d56273d96623d4 [file] [log] [blame]
Miklos Szeredi334f4852005-09-09 13:10:27 -07001/*
2 FUSE: Filesystem in Userspace
Miklos Szeredi1729a162008-11-26 12:03:54 +01003 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
Miklos Szeredi334f4852005-09-09 13:10:27 -07004
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
Miklos Szeredidd3bb142010-05-25 15:06:06 +020019#include <linux/pipe_fs_i.h>
Miklos Szeredice534fb2010-05-25 15:06:07 +020020#include <linux/swap.h>
21#include <linux/splice.h>
Miklos Szeredi334f4852005-09-09 13:10:27 -070022
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24
Christoph Lametere18b8902006-12-06 20:33:20 -080025static struct kmem_cache *fuse_req_cachep;
Miklos Szeredi334f4852005-09-09 13:10:27 -070026
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080027static struct fuse_conn *fuse_get_conn(struct file *file)
Miklos Szeredi334f4852005-09-09 13:10:27 -070028{
Miklos Szeredi0720b312006-04-10 22:54:55 -070029 /*
30 * Lockless access is OK, because file->private data is set
31 * once during mount and is valid until the file is released.
32 */
33 return file->private_data;
Miklos Szeredi334f4852005-09-09 13:10:27 -070034}
35
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080036static void fuse_request_init(struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -070037{
38 memset(req, 0, sizeof(*req));
39 INIT_LIST_HEAD(&req->list);
Miklos Szeredia4d27e72006-06-25 05:48:54 -070040 INIT_LIST_HEAD(&req->intr_entry);
Miklos Szeredi334f4852005-09-09 13:10:27 -070041 init_waitqueue_head(&req->waitq);
42 atomic_set(&req->count, 1);
43}
44
45struct fuse_req *fuse_request_alloc(void)
46{
Christoph Lametere94b1762006-12-06 20:33:17 -080047 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
Miklos Szeredi334f4852005-09-09 13:10:27 -070048 if (req)
49 fuse_request_init(req);
50 return req;
51}
Tejun Heo08cbf542009-04-14 10:54:53 +090052EXPORT_SYMBOL_GPL(fuse_request_alloc);
Miklos Szeredi334f4852005-09-09 13:10:27 -070053
Miklos Szeredi3be5a522008-04-30 00:54:41 -070054struct fuse_req *fuse_request_alloc_nofs(void)
55{
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
57 if (req)
58 fuse_request_init(req);
59 return req;
60}
61
Miklos Szeredi334f4852005-09-09 13:10:27 -070062void fuse_request_free(struct fuse_req *req)
63{
64 kmem_cache_free(fuse_req_cachep, req);
65}
66
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080067static void block_sigs(sigset_t *oldset)
Miklos Szeredi334f4852005-09-09 13:10:27 -070068{
69 sigset_t mask;
70
71 siginitsetinv(&mask, sigmask(SIGKILL));
72 sigprocmask(SIG_BLOCK, &mask, oldset);
73}
74
Miklos Szeredi8bfc0162006-01-16 22:14:28 -080075static void restore_sigs(sigset_t *oldset)
Miklos Szeredi334f4852005-09-09 13:10:27 -070076{
77 sigprocmask(SIG_SETMASK, oldset, NULL);
78}
79
Miklos Szeredi334f4852005-09-09 13:10:27 -070080static void __fuse_get_request(struct fuse_req *req)
81{
82 atomic_inc(&req->count);
83}
84
85/* Must be called with > 1 refcount */
86static void __fuse_put_request(struct fuse_req *req)
87{
88 BUG_ON(atomic_read(&req->count) < 2);
89 atomic_dec(&req->count);
90}
91
Miklos Szeredi33649c92006-06-25 05:48:52 -070092static void fuse_req_init_context(struct fuse_req *req)
93{
David Howells2186a712008-11-14 10:38:53 +110094 req->in.h.uid = current_fsuid();
95 req->in.h.gid = current_fsgid();
Miklos Szeredi33649c92006-06-25 05:48:52 -070096 req->in.h.pid = current->pid;
97}
98
Miklos Szeredice1d5a42006-04-10 22:54:58 -070099struct fuse_req *fuse_get_req(struct fuse_conn *fc)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700100{
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700101 struct fuse_req *req;
102 sigset_t oldset;
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200103 int intr;
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700104 int err;
105
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200106 atomic_inc(&fc->num_waiting);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700107 block_sigs(&oldset);
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200108 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700109 restore_sigs(&oldset);
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200110 err = -EINTR;
111 if (intr)
112 goto out;
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700113
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700114 err = -ENOTCONN;
115 if (!fc->connected)
116 goto out;
117
Miklos Szeredi08a53cd2006-04-10 22:54:59 -0700118 req = fuse_request_alloc();
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200119 err = -ENOMEM;
Miklos Szeredice1d5a42006-04-10 22:54:58 -0700120 if (!req)
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200121 goto out;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700122
Miklos Szeredi33649c92006-06-25 05:48:52 -0700123 fuse_req_init_context(req);
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200124 req->waiting = 1;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700125 return req;
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200126
127 out:
128 atomic_dec(&fc->num_waiting);
129 return ERR_PTR(err);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700130}
Tejun Heo08cbf542009-04-14 10:54:53 +0900131EXPORT_SYMBOL_GPL(fuse_get_req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700132
Miklos Szeredi33649c92006-06-25 05:48:52 -0700133/*
134 * Return request in fuse_file->reserved_req. However that may
135 * currently be in use. If that is the case, wait for it to become
136 * available.
137 */
138static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
139 struct file *file)
140{
141 struct fuse_req *req = NULL;
142 struct fuse_file *ff = file->private_data;
143
144 do {
Miklos Szeredide5e3de2007-10-16 23:31:00 -0700145 wait_event(fc->reserved_req_waitq, ff->reserved_req);
Miklos Szeredi33649c92006-06-25 05:48:52 -0700146 spin_lock(&fc->lock);
147 if (ff->reserved_req) {
148 req = ff->reserved_req;
149 ff->reserved_req = NULL;
150 get_file(file);
151 req->stolen_file = file;
152 }
153 spin_unlock(&fc->lock);
154 } while (!req);
155
156 return req;
157}
158
159/*
160 * Put stolen request back into fuse_file->reserved_req
161 */
162static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
163{
164 struct file *file = req->stolen_file;
165 struct fuse_file *ff = file->private_data;
166
167 spin_lock(&fc->lock);
168 fuse_request_init(req);
169 BUG_ON(ff->reserved_req);
170 ff->reserved_req = req;
Miklos Szeredide5e3de2007-10-16 23:31:00 -0700171 wake_up_all(&fc->reserved_req_waitq);
Miklos Szeredi33649c92006-06-25 05:48:52 -0700172 spin_unlock(&fc->lock);
173 fput(file);
174}
175
176/*
177 * Gets a requests for a file operation, always succeeds
178 *
179 * This is used for sending the FLUSH request, which must get to
180 * userspace, due to POSIX locks which may need to be unlocked.
181 *
182 * If allocation fails due to OOM, use the reserved request in
183 * fuse_file.
184 *
185 * This is very unlikely to deadlock accidentally, since the
186 * filesystem should not have it's own file open. If deadlock is
187 * intentional, it can still be broken by "aborting" the filesystem.
188 */
189struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
190{
191 struct fuse_req *req;
192
193 atomic_inc(&fc->num_waiting);
194 wait_event(fc->blocked_waitq, !fc->blocked);
195 req = fuse_request_alloc();
196 if (!req)
197 req = get_reserved_req(fc, file);
198
199 fuse_req_init_context(req);
200 req->waiting = 1;
201 return req;
202}
203
Miklos Szeredi334f4852005-09-09 13:10:27 -0700204void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
205{
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800206 if (atomic_dec_and_test(&req->count)) {
Miklos Szeredi9bc5ddd2006-04-11 21:16:09 +0200207 if (req->waiting)
208 atomic_dec(&fc->num_waiting);
Miklos Szeredi33649c92006-06-25 05:48:52 -0700209
210 if (req->stolen_file)
211 put_reserved_req(fc, req);
212 else
213 fuse_request_free(req);
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800214 }
215}
Tejun Heo08cbf542009-04-14 10:54:53 +0900216EXPORT_SYMBOL_GPL(fuse_put_request);
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800217
Miklos Szeredid12def12008-02-06 01:38:39 -0800218static unsigned len_args(unsigned numargs, struct fuse_arg *args)
219{
220 unsigned nbytes = 0;
221 unsigned i;
222
223 for (i = 0; i < numargs; i++)
224 nbytes += args[i].size;
225
226 return nbytes;
227}
228
229static u64 fuse_get_unique(struct fuse_conn *fc)
230{
231 fc->reqctr++;
232 /* zero is special */
233 if (fc->reqctr == 0)
234 fc->reqctr = 1;
235
236 return fc->reqctr;
237}
238
239static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
240{
241 req->in.h.unique = fuse_get_unique(fc);
242 req->in.h.len = sizeof(struct fuse_in_header) +
243 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
244 list_add_tail(&req->list, &fc->pending);
245 req->state = FUSE_REQ_PENDING;
246 if (!req->waiting) {
247 req->waiting = 1;
248 atomic_inc(&fc->num_waiting);
249 }
250 wake_up(&fc->waitq);
251 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
252}
253
254static void flush_bg_queue(struct fuse_conn *fc)
255{
Csaba Henk7a6d3c82009-07-01 17:28:41 -0700256 while (fc->active_background < fc->max_background &&
Miklos Szeredid12def12008-02-06 01:38:39 -0800257 !list_empty(&fc->bg_queue)) {
258 struct fuse_req *req;
259
260 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
261 list_del(&req->list);
262 fc->active_background++;
263 queue_request(fc, req);
264 }
265}
266
Miklos Szeredi6dbbcb12006-04-26 10:49:06 +0200267/*
Miklos Szeredi334f4852005-09-09 13:10:27 -0700268 * This function is called when a request is finished. Either a reply
Miklos Szeredif9a28422006-06-25 05:48:53 -0700269 * has arrived or it was aborted (and not yet sent) or some error
Miklos Szeredif43b1552006-01-16 22:14:26 -0800270 * occurred during communication with userspace, or the device file
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700271 * was closed. The requester thread is woken up (if still waiting),
272 * the 'end' callback is called if given, else the reference to the
273 * request is released
Miklos Szeredi7128ec22006-02-04 23:27:40 -0800274 *
Miklos Szeredid7133112006-04-10 22:54:55 -0700275 * Called with fc->lock, unlocks it
Miklos Szeredi334f4852005-09-09 13:10:27 -0700276 */
277static void request_end(struct fuse_conn *fc, struct fuse_req *req)
Harvey Harrison5d9ec852008-12-02 14:49:42 +0100278__releases(&fc->lock)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700279{
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700280 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
281 req->end = NULL;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800282 list_del(&req->list);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700283 list_del(&req->intr_entry);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800284 req->state = FUSE_REQ_FINISHED;
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700285 if (req->background) {
Csaba Henk7a6d3c82009-07-01 17:28:41 -0700286 if (fc->num_background == fc->max_background) {
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700287 fc->blocked = 0;
288 wake_up_all(&fc->blocked_waitq);
289 }
Csaba Henk7a6d3c82009-07-01 17:28:41 -0700290 if (fc->num_background == fc->congestion_threshold &&
Tejun Heoa325f9b2009-04-14 10:54:52 +0900291 fc->connected && fc->bdi_initialized) {
Jens Axboe8aa7e842009-07-09 14:52:32 +0200292 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
293 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
Miklos Szeredif92b99b2007-10-16 23:30:59 -0700294 }
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700295 fc->num_background--;
Miklos Szeredid12def12008-02-06 01:38:39 -0800296 fc->active_background--;
297 flush_bg_queue(fc);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700298 }
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700299 spin_unlock(&fc->lock);
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700300 wake_up(&req->waitq);
301 if (end)
302 end(fc, req);
Tejun Heoe9bb09d2008-11-26 12:03:54 +0100303 fuse_put_request(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700304}
305
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700306static void wait_answer_interruptible(struct fuse_conn *fc,
307 struct fuse_req *req)
Harvey Harrison5d9ec852008-12-02 14:49:42 +0100308__releases(&fc->lock)
309__acquires(&fc->lock)
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700310{
311 if (signal_pending(current))
312 return;
313
314 spin_unlock(&fc->lock);
315 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
316 spin_lock(&fc->lock);
317}
318
319static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
320{
321 list_add_tail(&req->intr_entry, &fc->interrupts);
322 wake_up(&fc->waitq);
323 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
324}
325
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700326static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
Harvey Harrison5d9ec852008-12-02 14:49:42 +0100327__releases(&fc->lock)
328__acquires(&fc->lock)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700329{
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700330 if (!fc->no_interrupt) {
331 /* Any signal may interrupt this */
332 wait_answer_interruptible(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700333
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700334 if (req->aborted)
335 goto aborted;
336 if (req->state == FUSE_REQ_FINISHED)
337 return;
338
339 req->interrupted = 1;
340 if (req->state == FUSE_REQ_SENT)
341 queue_interrupt(fc, req);
342 }
343
Miklos Szeredia131de02007-10-16 23:31:04 -0700344 if (!req->force) {
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700345 sigset_t oldset;
346
347 /* Only fatal signals may interrupt this */
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700348 block_sigs(&oldset);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700349 wait_answer_interruptible(fc, req);
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700350 restore_sigs(&oldset);
Miklos Szeredia131de02007-10-16 23:31:04 -0700351
352 if (req->aborted)
353 goto aborted;
354 if (req->state == FUSE_REQ_FINISHED)
355 return;
356
357 /* Request is not yet in userspace, bail out */
358 if (req->state == FUSE_REQ_PENDING) {
359 list_del(&req->list);
360 __fuse_put_request(req);
361 req->out.h.error = -EINTR;
362 return;
363 }
Miklos Szeredi51eb01e2006-06-25 05:48:50 -0700364 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700365
Miklos Szeredia131de02007-10-16 23:31:04 -0700366 /*
367 * Either request is already in userspace, or it was forced.
368 * Wait it out.
369 */
370 spin_unlock(&fc->lock);
371 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
372 spin_lock(&fc->lock);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700373
Miklos Szeredia131de02007-10-16 23:31:04 -0700374 if (!req->aborted)
375 return;
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700376
377 aborted:
Miklos Szeredia131de02007-10-16 23:31:04 -0700378 BUG_ON(req->state != FUSE_REQ_FINISHED);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700379 if (req->locked) {
380 /* This is uninterruptible sleep, because data is
381 being copied to/from the buffers of req. During
382 locked state, there mustn't be any filesystem
383 operation (e.g. page fault), since that could lead
384 to deadlock */
Miklos Szeredid7133112006-04-10 22:54:55 -0700385 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700386 wait_event(req->waitq, !req->locked);
Miklos Szeredid7133112006-04-10 22:54:55 -0700387 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700388 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700389}
390
Tejun Heob93f8582008-11-26 12:03:55 +0100391void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700392{
393 req->isreply = 1;
Miklos Szeredid7133112006-04-10 22:54:55 -0700394 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700395 if (!fc->connected)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700396 req->out.h.error = -ENOTCONN;
397 else if (fc->conn_error)
398 req->out.h.error = -ECONNREFUSED;
399 else {
400 queue_request(fc, req);
401 /* acquire extra reference, since request is still needed
402 after request_end() */
403 __fuse_get_request(req);
404
Miklos Szeredi7c352bd2005-09-09 13:10:39 -0700405 request_wait_answer(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700406 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700407 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700408}
Tejun Heo08cbf542009-04-14 10:54:53 +0900409EXPORT_SYMBOL_GPL(fuse_request_send);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700410
Tejun Heob93f8582008-11-26 12:03:55 +0100411static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
412 struct fuse_req *req)
Miklos Szeredid12def12008-02-06 01:38:39 -0800413{
414 req->background = 1;
415 fc->num_background++;
Csaba Henk7a6d3c82009-07-01 17:28:41 -0700416 if (fc->num_background == fc->max_background)
Miklos Szeredid12def12008-02-06 01:38:39 -0800417 fc->blocked = 1;
Csaba Henk7a6d3c82009-07-01 17:28:41 -0700418 if (fc->num_background == fc->congestion_threshold &&
Tejun Heoa325f9b2009-04-14 10:54:52 +0900419 fc->bdi_initialized) {
Jens Axboe8aa7e842009-07-09 14:52:32 +0200420 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
421 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
Miklos Szeredid12def12008-02-06 01:38:39 -0800422 }
423 list_add_tail(&req->list, &fc->bg_queue);
424 flush_bg_queue(fc);
425}
426
Tejun Heob93f8582008-11-26 12:03:55 +0100427static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700428{
Miklos Szeredid7133112006-04-10 22:54:55 -0700429 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -0700430 if (fc->connected) {
Tejun Heob93f8582008-11-26 12:03:55 +0100431 fuse_request_send_nowait_locked(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -0700432 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700433 } else {
434 req->out.h.error = -ENOTCONN;
435 request_end(fc, req);
436 }
437}
438
Tejun Heob93f8582008-11-26 12:03:55 +0100439void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700440{
441 req->isreply = 0;
Tejun Heob93f8582008-11-26 12:03:55 +0100442 fuse_request_send_nowait(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700443}
444
Tejun Heob93f8582008-11-26 12:03:55 +0100445void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700446{
447 req->isreply = 1;
Tejun Heob93f8582008-11-26 12:03:55 +0100448 fuse_request_send_nowait(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700449}
Tejun Heo08cbf542009-04-14 10:54:53 +0900450EXPORT_SYMBOL_GPL(fuse_request_send_background);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700451
Miklos Szeredi334f4852005-09-09 13:10:27 -0700452/*
Miklos Szeredi3be5a522008-04-30 00:54:41 -0700453 * Called under fc->lock
454 *
455 * fc->connected must have been checked previously
456 */
Tejun Heob93f8582008-11-26 12:03:55 +0100457void fuse_request_send_background_locked(struct fuse_conn *fc,
458 struct fuse_req *req)
Miklos Szeredi3be5a522008-04-30 00:54:41 -0700459{
460 req->isreply = 1;
Tejun Heob93f8582008-11-26 12:03:55 +0100461 fuse_request_send_nowait_locked(fc, req);
Miklos Szeredi3be5a522008-04-30 00:54:41 -0700462}
463
464/*
Miklos Szeredi334f4852005-09-09 13:10:27 -0700465 * Lock the request. Up to the next unlock_request() there mustn't be
466 * anything that could cause a page-fault. If the request was already
Miklos Szeredif9a28422006-06-25 05:48:53 -0700467 * aborted bail out.
Miklos Szeredi334f4852005-09-09 13:10:27 -0700468 */
Miklos Szeredid7133112006-04-10 22:54:55 -0700469static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700470{
471 int err = 0;
472 if (req) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700473 spin_lock(&fc->lock);
Miklos Szeredif9a28422006-06-25 05:48:53 -0700474 if (req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700475 err = -ENOENT;
476 else
477 req->locked = 1;
Miklos Szeredid7133112006-04-10 22:54:55 -0700478 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700479 }
480 return err;
481}
482
483/*
Miklos Szeredif9a28422006-06-25 05:48:53 -0700484 * Unlock request. If it was aborted during being locked, the
Miklos Szeredi334f4852005-09-09 13:10:27 -0700485 * requester thread is currently waiting for it to be unlocked, so
486 * wake it up.
487 */
Miklos Szeredid7133112006-04-10 22:54:55 -0700488static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700489{
490 if (req) {
Miklos Szeredid7133112006-04-10 22:54:55 -0700491 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700492 req->locked = 0;
Miklos Szeredif9a28422006-06-25 05:48:53 -0700493 if (req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700494 wake_up(&req->waitq);
Miklos Szeredid7133112006-04-10 22:54:55 -0700495 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700496 }
497}
498
499struct fuse_copy_state {
Miklos Szeredid7133112006-04-10 22:54:55 -0700500 struct fuse_conn *fc;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700501 int write;
502 struct fuse_req *req;
503 const struct iovec *iov;
Miklos Szeredidd3bb142010-05-25 15:06:06 +0200504 struct pipe_buffer *pipebufs;
505 struct pipe_buffer *currbuf;
506 struct pipe_inode_info *pipe;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700507 unsigned long nr_segs;
508 unsigned long seglen;
509 unsigned long addr;
510 struct page *pg;
511 void *mapaddr;
512 void *buf;
513 unsigned len;
Miklos Szeredice534fb2010-05-25 15:06:07 +0200514 unsigned move_pages:1;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700515};
516
Miklos Szeredid7133112006-04-10 22:54:55 -0700517static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
Miklos Szeredic3021622010-05-25 15:06:07 +0200518 int write,
Miklos Szeredid7133112006-04-10 22:54:55 -0700519 const struct iovec *iov, unsigned long nr_segs)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700520{
521 memset(cs, 0, sizeof(*cs));
Miklos Szeredid7133112006-04-10 22:54:55 -0700522 cs->fc = fc;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700523 cs->write = write;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700524 cs->iov = iov;
525 cs->nr_segs = nr_segs;
526}
527
528/* Unmap and put previous page of userspace buffer */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800529static void fuse_copy_finish(struct fuse_copy_state *cs)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700530{
Miklos Szeredidd3bb142010-05-25 15:06:06 +0200531 if (cs->currbuf) {
532 struct pipe_buffer *buf = cs->currbuf;
533
Miklos Szeredic3021622010-05-25 15:06:07 +0200534 if (!cs->write) {
535 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
536 } else {
537 kunmap_atomic(cs->mapaddr, KM_USER0);
538 buf->len = PAGE_SIZE - cs->len;
539 }
Miklos Szeredidd3bb142010-05-25 15:06:06 +0200540 cs->currbuf = NULL;
541 cs->mapaddr = NULL;
542 } else if (cs->mapaddr) {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700543 kunmap_atomic(cs->mapaddr, KM_USER0);
544 if (cs->write) {
545 flush_dcache_page(cs->pg);
546 set_page_dirty_lock(cs->pg);
547 }
548 put_page(cs->pg);
549 cs->mapaddr = NULL;
550 }
551}
552
553/*
554 * Get another pagefull of userspace buffer, and map it to kernel
555 * address space, and lock request
556 */
557static int fuse_copy_fill(struct fuse_copy_state *cs)
558{
559 unsigned long offset;
560 int err;
561
Miklos Szeredid7133112006-04-10 22:54:55 -0700562 unlock_request(cs->fc, cs->req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700563 fuse_copy_finish(cs);
Miklos Szeredidd3bb142010-05-25 15:06:06 +0200564 if (cs->pipebufs) {
565 struct pipe_buffer *buf = cs->pipebufs;
566
Miklos Szeredic3021622010-05-25 15:06:07 +0200567 if (!cs->write) {
568 err = buf->ops->confirm(cs->pipe, buf);
569 if (err)
570 return err;
Miklos Szeredidd3bb142010-05-25 15:06:06 +0200571
Miklos Szeredic3021622010-05-25 15:06:07 +0200572 BUG_ON(!cs->nr_segs);
573 cs->currbuf = buf;
574 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
575 cs->len = buf->len;
576 cs->buf = cs->mapaddr + buf->offset;
577 cs->pipebufs++;
578 cs->nr_segs--;
579 } else {
580 struct page *page;
581
582 if (cs->nr_segs == cs->pipe->buffers)
583 return -EIO;
584
585 page = alloc_page(GFP_HIGHUSER);
586 if (!page)
587 return -ENOMEM;
588
589 buf->page = page;
590 buf->offset = 0;
591 buf->len = 0;
592
593 cs->currbuf = buf;
594 cs->mapaddr = kmap_atomic(page, KM_USER0);
595 cs->buf = cs->mapaddr;
596 cs->len = PAGE_SIZE;
597 cs->pipebufs++;
598 cs->nr_segs++;
599 }
Miklos Szeredidd3bb142010-05-25 15:06:06 +0200600 } else {
601 if (!cs->seglen) {
602 BUG_ON(!cs->nr_segs);
603 cs->seglen = cs->iov[0].iov_len;
604 cs->addr = (unsigned long) cs->iov[0].iov_base;
605 cs->iov++;
606 cs->nr_segs--;
607 }
608 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
609 if (err < 0)
610 return err;
611 BUG_ON(err != 1);
612 offset = cs->addr % PAGE_SIZE;
613 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
614 cs->buf = cs->mapaddr + offset;
615 cs->len = min(PAGE_SIZE - offset, cs->seglen);
616 cs->seglen -= cs->len;
617 cs->addr += cs->len;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700618 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700619
Miklos Szeredid7133112006-04-10 22:54:55 -0700620 return lock_request(cs->fc, cs->req);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700621}
622
623/* Do as much copy to/from userspace buffer as we can */
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800624static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700625{
626 unsigned ncpy = min(*size, cs->len);
627 if (val) {
628 if (cs->write)
629 memcpy(cs->buf, *val, ncpy);
630 else
631 memcpy(*val, cs->buf, ncpy);
632 *val += ncpy;
633 }
634 *size -= ncpy;
635 cs->len -= ncpy;
636 cs->buf += ncpy;
637 return ncpy;
638}
639
Miklos Szeredice534fb2010-05-25 15:06:07 +0200640static int fuse_check_page(struct page *page)
641{
642 if (page_mapcount(page) ||
643 page->mapping != NULL ||
644 page_count(page) != 1 ||
645 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
646 ~(1 << PG_locked |
647 1 << PG_referenced |
648 1 << PG_uptodate |
649 1 << PG_lru |
650 1 << PG_active |
651 1 << PG_reclaim))) {
652 printk(KERN_WARNING "fuse: trying to steal weird page\n");
653 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
654 return 1;
655 }
656 return 0;
657}
658
659static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
660{
661 int err;
662 struct page *oldpage = *pagep;
663 struct page *newpage;
664 struct pipe_buffer *buf = cs->pipebufs;
665 struct address_space *mapping;
666 pgoff_t index;
667
668 unlock_request(cs->fc, cs->req);
669 fuse_copy_finish(cs);
670
671 err = buf->ops->confirm(cs->pipe, buf);
672 if (err)
673 return err;
674
675 BUG_ON(!cs->nr_segs);
676 cs->currbuf = buf;
677 cs->len = buf->len;
678 cs->pipebufs++;
679 cs->nr_segs--;
680
681 if (cs->len != PAGE_SIZE)
682 goto out_fallback;
683
684 if (buf->ops->steal(cs->pipe, buf) != 0)
685 goto out_fallback;
686
687 newpage = buf->page;
688
689 if (WARN_ON(!PageUptodate(newpage)))
690 return -EIO;
691
692 ClearPageMappedToDisk(newpage);
693
694 if (fuse_check_page(newpage) != 0)
695 goto out_fallback_unlock;
696
697 mapping = oldpage->mapping;
698 index = oldpage->index;
699
700 /*
701 * This is a new and locked page, it shouldn't be mapped or
702 * have any special flags on it
703 */
704 if (WARN_ON(page_mapped(oldpage)))
705 goto out_fallback_unlock;
706 if (WARN_ON(page_has_private(oldpage)))
707 goto out_fallback_unlock;
708 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
709 goto out_fallback_unlock;
710 if (WARN_ON(PageMlocked(oldpage)))
711 goto out_fallback_unlock;
712
713 remove_from_page_cache(oldpage);
714 page_cache_release(oldpage);
715
716 err = add_to_page_cache_locked(newpage, mapping, index, GFP_KERNEL);
717 if (err) {
718 printk(KERN_WARNING "fuse_try_move_page: failed to add page");
719 goto out_fallback_unlock;
720 }
721 page_cache_get(newpage);
722
723 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
724 lru_cache_add_file(newpage);
725
726 err = 0;
727 spin_lock(&cs->fc->lock);
728 if (cs->req->aborted)
729 err = -ENOENT;
730 else
731 *pagep = newpage;
732 spin_unlock(&cs->fc->lock);
733
734 if (err) {
735 unlock_page(newpage);
736 page_cache_release(newpage);
737 return err;
738 }
739
740 unlock_page(oldpage);
741 page_cache_release(oldpage);
742 cs->len = 0;
743
744 return 0;
745
746out_fallback_unlock:
747 unlock_page(newpage);
748out_fallback:
749 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
750 cs->buf = cs->mapaddr + buf->offset;
751
752 err = lock_request(cs->fc, cs->req);
753 if (err)
754 return err;
755
756 return 1;
757}
758
Miklos Szeredic3021622010-05-25 15:06:07 +0200759static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
760 unsigned offset, unsigned count)
761{
762 struct pipe_buffer *buf;
763
764 if (cs->nr_segs == cs->pipe->buffers)
765 return -EIO;
766
767 unlock_request(cs->fc, cs->req);
768 fuse_copy_finish(cs);
769
770 buf = cs->pipebufs;
771 page_cache_get(page);
772 buf->page = page;
773 buf->offset = offset;
774 buf->len = count;
775
776 cs->pipebufs++;
777 cs->nr_segs++;
778 cs->len = 0;
779
780 return 0;
781}
782
Miklos Szeredi334f4852005-09-09 13:10:27 -0700783/*
784 * Copy a page in the request to/from the userspace buffer. Must be
785 * done atomically
786 */
Miklos Szeredice534fb2010-05-25 15:06:07 +0200787static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
Miklos Szeredi8bfc0162006-01-16 22:14:28 -0800788 unsigned offset, unsigned count, int zeroing)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700789{
Miklos Szeredice534fb2010-05-25 15:06:07 +0200790 int err;
791 struct page *page = *pagep;
792
Miklos Szeredi334f4852005-09-09 13:10:27 -0700793 if (page && zeroing && count < PAGE_SIZE) {
794 void *mapaddr = kmap_atomic(page, KM_USER1);
795 memset(mapaddr, 0, PAGE_SIZE);
796 kunmap_atomic(mapaddr, KM_USER1);
797 }
798 while (count) {
Miklos Szeredic3021622010-05-25 15:06:07 +0200799 if (cs->write && cs->pipebufs && page) {
800 return fuse_ref_page(cs, page, offset, count);
801 } else if (!cs->len) {
Miklos Szeredice534fb2010-05-25 15:06:07 +0200802 if (cs->move_pages && page &&
803 offset == 0 && count == PAGE_SIZE) {
804 err = fuse_try_move_page(cs, pagep);
805 if (err <= 0)
806 return err;
807 } else {
808 err = fuse_copy_fill(cs);
809 if (err)
810 return err;
811 }
Miklos Szeredi1729a162008-11-26 12:03:54 +0100812 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700813 if (page) {
814 void *mapaddr = kmap_atomic(page, KM_USER1);
815 void *buf = mapaddr + offset;
816 offset += fuse_copy_do(cs, &buf, &count);
817 kunmap_atomic(mapaddr, KM_USER1);
818 } else
819 offset += fuse_copy_do(cs, NULL, &count);
820 }
821 if (page && !cs->write)
822 flush_dcache_page(page);
823 return 0;
824}
825
826/* Copy pages in the request to/from userspace buffer */
827static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
828 int zeroing)
829{
830 unsigned i;
831 struct fuse_req *req = cs->req;
832 unsigned offset = req->page_offset;
833 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
834
835 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
Miklos Szeredice534fb2010-05-25 15:06:07 +0200836 int err;
837
838 err = fuse_copy_page(cs, &req->pages[i], offset, count,
839 zeroing);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700840 if (err)
841 return err;
842
843 nbytes -= count;
844 count = min(nbytes, (unsigned) PAGE_SIZE);
845 offset = 0;
846 }
847 return 0;
848}
849
850/* Copy a single argument in the request to/from userspace buffer */
851static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
852{
853 while (size) {
Miklos Szeredi1729a162008-11-26 12:03:54 +0100854 if (!cs->len) {
855 int err = fuse_copy_fill(cs);
856 if (err)
857 return err;
858 }
Miklos Szeredi334f4852005-09-09 13:10:27 -0700859 fuse_copy_do(cs, &val, &size);
860 }
861 return 0;
862}
863
864/* Copy request arguments to/from userspace buffer */
865static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
866 unsigned argpages, struct fuse_arg *args,
867 int zeroing)
868{
869 int err = 0;
870 unsigned i;
871
872 for (i = 0; !err && i < numargs; i++) {
873 struct fuse_arg *arg = &args[i];
874 if (i == numargs - 1 && argpages)
875 err = fuse_copy_pages(cs, arg->size, zeroing);
876 else
877 err = fuse_copy_one(cs, arg->value, arg->size);
878 }
879 return err;
880}
881
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700882static int request_pending(struct fuse_conn *fc)
883{
884 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
885}
886
Miklos Szeredi334f4852005-09-09 13:10:27 -0700887/* Wait until a request is available on the pending list */
888static void request_wait(struct fuse_conn *fc)
Harvey Harrison5d9ec852008-12-02 14:49:42 +0100889__releases(&fc->lock)
890__acquires(&fc->lock)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700891{
892 DECLARE_WAITQUEUE(wait, current);
893
894 add_wait_queue_exclusive(&fc->waitq, &wait);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700895 while (fc->connected && !request_pending(fc)) {
Miklos Szeredi334f4852005-09-09 13:10:27 -0700896 set_current_state(TASK_INTERRUPTIBLE);
897 if (signal_pending(current))
898 break;
899
Miklos Szeredid7133112006-04-10 22:54:55 -0700900 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700901 schedule();
Miklos Szeredid7133112006-04-10 22:54:55 -0700902 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700903 }
904 set_current_state(TASK_RUNNING);
905 remove_wait_queue(&fc->waitq, &wait);
906}
907
908/*
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700909 * Transfer an interrupt request to userspace
910 *
911 * Unlike other requests this is assembled on demand, without a need
912 * to allocate a separate fuse_req structure.
913 *
914 * Called with fc->lock held, releases it
915 */
Miklos Szeredic3021622010-05-25 15:06:07 +0200916static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
917 size_t nbytes, struct fuse_req *req)
Harvey Harrison5d9ec852008-12-02 14:49:42 +0100918__releases(&fc->lock)
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700919{
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700920 struct fuse_in_header ih;
921 struct fuse_interrupt_in arg;
922 unsigned reqsize = sizeof(ih) + sizeof(arg);
923 int err;
924
925 list_del_init(&req->intr_entry);
926 req->intr_unique = fuse_get_unique(fc);
927 memset(&ih, 0, sizeof(ih));
928 memset(&arg, 0, sizeof(arg));
929 ih.len = reqsize;
930 ih.opcode = FUSE_INTERRUPT;
931 ih.unique = req->intr_unique;
932 arg.unique = req->in.h.unique;
933
934 spin_unlock(&fc->lock);
Miklos Szeredic3021622010-05-25 15:06:07 +0200935 if (nbytes < reqsize)
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700936 return -EINVAL;
937
Miklos Szeredic3021622010-05-25 15:06:07 +0200938 err = fuse_copy_one(cs, &ih, sizeof(ih));
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700939 if (!err)
Miklos Szeredic3021622010-05-25 15:06:07 +0200940 err = fuse_copy_one(cs, &arg, sizeof(arg));
941 fuse_copy_finish(cs);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700942
943 return err ? err : reqsize;
944}
945
946/*
Miklos Szeredi334f4852005-09-09 13:10:27 -0700947 * Read a single request into the userspace filesystem's buffer. This
948 * function waits until a request is available, then removes it from
949 * the pending list and copies request data to userspace buffer. If
Miklos Szeredif9a28422006-06-25 05:48:53 -0700950 * no reply is needed (FORGET) or request has been aborted or there
951 * was an error during the copying then it's finished by calling
Miklos Szeredi334f4852005-09-09 13:10:27 -0700952 * request_end(). Otherwise add it to the processing list, and set
953 * the 'sent' flag.
954 */
Miklos Szeredic3021622010-05-25 15:06:07 +0200955static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
956 struct fuse_copy_state *cs, size_t nbytes)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700957{
958 int err;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700959 struct fuse_req *req;
960 struct fuse_in *in;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700961 unsigned reqsize;
962
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800963 restart:
Miklos Szeredid7133112006-04-10 22:54:55 -0700964 spin_lock(&fc->lock);
Jeff Dikee5ac1d12006-04-10 22:54:53 -0700965 err = -EAGAIN;
966 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700967 !request_pending(fc))
Jeff Dikee5ac1d12006-04-10 22:54:53 -0700968 goto err_unlock;
969
Miklos Szeredi334f4852005-09-09 13:10:27 -0700970 request_wait(fc);
971 err = -ENODEV;
Miklos Szeredi9ba7cbb2006-01-16 22:14:34 -0800972 if (!fc->connected)
Miklos Szeredi334f4852005-09-09 13:10:27 -0700973 goto err_unlock;
974 err = -ERESTARTSYS;
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700975 if (!request_pending(fc))
Miklos Szeredi334f4852005-09-09 13:10:27 -0700976 goto err_unlock;
977
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700978 if (!list_empty(&fc->interrupts)) {
979 req = list_entry(fc->interrupts.next, struct fuse_req,
980 intr_entry);
Miklos Szeredic3021622010-05-25 15:06:07 +0200981 return fuse_read_interrupt(fc, cs, nbytes, req);
Miklos Szeredia4d27e72006-06-25 05:48:54 -0700982 }
983
Miklos Szeredi334f4852005-09-09 13:10:27 -0700984 req = list_entry(fc->pending.next, struct fuse_req, list);
Miklos Szeredi83cfd492006-01-16 22:14:31 -0800985 req->state = FUSE_REQ_READING;
Miklos Szeredid77a1d52006-01-16 22:14:31 -0800986 list_move(&req->list, &fc->io);
Miklos Szeredi334f4852005-09-09 13:10:27 -0700987
988 in = &req->in;
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800989 reqsize = in->h.len;
990 /* If request is too large, reply with an error and restart the read */
Miklos Szeredic3021622010-05-25 15:06:07 +0200991 if (nbytes < reqsize) {
Miklos Szeredi1d3d7522006-01-06 00:19:40 -0800992 req->out.h.error = -EIO;
993 /* SETXATTR is special, since it may contain too large data */
994 if (in->h.opcode == FUSE_SETXATTR)
995 req->out.h.error = -E2BIG;
996 request_end(fc, req);
997 goto restart;
Miklos Szeredi334f4852005-09-09 13:10:27 -0700998 }
Miklos Szeredid7133112006-04-10 22:54:55 -0700999 spin_unlock(&fc->lock);
Miklos Szeredic3021622010-05-25 15:06:07 +02001000 cs->req = req;
1001 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
Miklos Szeredi1d3d7522006-01-06 00:19:40 -08001002 if (!err)
Miklos Szeredic3021622010-05-25 15:06:07 +02001003 err = fuse_copy_args(cs, in->numargs, in->argpages,
Miklos Szeredi1d3d7522006-01-06 00:19:40 -08001004 (struct fuse_arg *) in->args, 0);
Miklos Szeredic3021622010-05-25 15:06:07 +02001005 fuse_copy_finish(cs);
Miklos Szeredid7133112006-04-10 22:54:55 -07001006 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001007 req->locked = 0;
Miklos Szeredic9c9d7d2007-10-16 23:31:05 -07001008 if (req->aborted) {
1009 request_end(fc, req);
1010 return -ENODEV;
1011 }
Miklos Szeredi334f4852005-09-09 13:10:27 -07001012 if (err) {
Miklos Szeredic9c9d7d2007-10-16 23:31:05 -07001013 req->out.h.error = -EIO;
Miklos Szeredi334f4852005-09-09 13:10:27 -07001014 request_end(fc, req);
1015 return err;
1016 }
1017 if (!req->isreply)
1018 request_end(fc, req);
1019 else {
Miklos Szeredi83cfd492006-01-16 22:14:31 -08001020 req->state = FUSE_REQ_SENT;
Miklos Szeredid77a1d52006-01-16 22:14:31 -08001021 list_move_tail(&req->list, &fc->processing);
Miklos Szeredia4d27e72006-06-25 05:48:54 -07001022 if (req->interrupted)
1023 queue_interrupt(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -07001024 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001025 }
1026 return reqsize;
1027
1028 err_unlock:
Miklos Szeredid7133112006-04-10 22:54:55 -07001029 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001030 return err;
1031}
1032
Miklos Szeredic3021622010-05-25 15:06:07 +02001033static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1034 unsigned long nr_segs, loff_t pos)
1035{
1036 struct fuse_copy_state cs;
1037 struct file *file = iocb->ki_filp;
1038 struct fuse_conn *fc = fuse_get_conn(file);
1039 if (!fc)
1040 return -EPERM;
1041
1042 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1043
1044 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1045}
1046
1047static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1048 struct pipe_buffer *buf)
1049{
1050 return 1;
1051}
1052
1053static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1054 .can_merge = 0,
1055 .map = generic_pipe_buf_map,
1056 .unmap = generic_pipe_buf_unmap,
1057 .confirm = generic_pipe_buf_confirm,
1058 .release = generic_pipe_buf_release,
1059 .steal = fuse_dev_pipe_buf_steal,
1060 .get = generic_pipe_buf_get,
1061};
1062
1063static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1064 struct pipe_inode_info *pipe,
1065 size_t len, unsigned int flags)
1066{
1067 int ret;
1068 int page_nr = 0;
1069 int do_wakeup = 0;
1070 struct pipe_buffer *bufs;
1071 struct fuse_copy_state cs;
1072 struct fuse_conn *fc = fuse_get_conn(in);
1073 if (!fc)
1074 return -EPERM;
1075
1076 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1077 if (!bufs)
1078 return -ENOMEM;
1079
1080 fuse_copy_init(&cs, fc, 1, NULL, 0);
1081 cs.pipebufs = bufs;
1082 cs.pipe = pipe;
1083 ret = fuse_dev_do_read(fc, in, &cs, len);
1084 if (ret < 0)
1085 goto out;
1086
1087 ret = 0;
1088 pipe_lock(pipe);
1089
1090 if (!pipe->readers) {
1091 send_sig(SIGPIPE, current, 0);
1092 if (!ret)
1093 ret = -EPIPE;
1094 goto out_unlock;
1095 }
1096
1097 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1098 ret = -EIO;
1099 goto out_unlock;
1100 }
1101
1102 while (page_nr < cs.nr_segs) {
1103 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1104 struct pipe_buffer *buf = pipe->bufs + newbuf;
1105
1106 buf->page = bufs[page_nr].page;
1107 buf->offset = bufs[page_nr].offset;
1108 buf->len = bufs[page_nr].len;
1109 buf->ops = &fuse_dev_pipe_buf_ops;
1110
1111 pipe->nrbufs++;
1112 page_nr++;
1113 ret += buf->len;
1114
1115 if (pipe->inode)
1116 do_wakeup = 1;
1117 }
1118
1119out_unlock:
1120 pipe_unlock(pipe);
1121
1122 if (do_wakeup) {
1123 smp_mb();
1124 if (waitqueue_active(&pipe->wait))
1125 wake_up_interruptible(&pipe->wait);
1126 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1127 }
1128
1129out:
1130 for (; page_nr < cs.nr_segs; page_nr++)
1131 page_cache_release(bufs[page_nr].page);
1132
1133 kfree(bufs);
1134 return ret;
1135}
1136
Tejun Heo95668a62008-11-26 12:03:55 +01001137static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1138 struct fuse_copy_state *cs)
1139{
1140 struct fuse_notify_poll_wakeup_out outarg;
Miklos Szeredif6d47a12009-01-26 15:00:59 +01001141 int err = -EINVAL;
Tejun Heo95668a62008-11-26 12:03:55 +01001142
1143 if (size != sizeof(outarg))
Miklos Szeredif6d47a12009-01-26 15:00:59 +01001144 goto err;
Tejun Heo95668a62008-11-26 12:03:55 +01001145
1146 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1147 if (err)
Miklos Szeredif6d47a12009-01-26 15:00:59 +01001148 goto err;
Tejun Heo95668a62008-11-26 12:03:55 +01001149
Miklos Szeredif6d47a12009-01-26 15:00:59 +01001150 fuse_copy_finish(cs);
Tejun Heo95668a62008-11-26 12:03:55 +01001151 return fuse_notify_poll_wakeup(fc, &outarg);
Miklos Szeredif6d47a12009-01-26 15:00:59 +01001152
1153err:
1154 fuse_copy_finish(cs);
1155 return err;
Tejun Heo95668a62008-11-26 12:03:55 +01001156}
1157
John Muir3b463ae2009-05-31 11:13:57 -04001158static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1159 struct fuse_copy_state *cs)
1160{
1161 struct fuse_notify_inval_inode_out outarg;
1162 int err = -EINVAL;
1163
1164 if (size != sizeof(outarg))
1165 goto err;
1166
1167 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1168 if (err)
1169 goto err;
1170 fuse_copy_finish(cs);
1171
1172 down_read(&fc->killsb);
1173 err = -ENOENT;
Miklos Szeredib21dda42010-02-05 12:08:31 +01001174 if (fc->sb) {
1175 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1176 outarg.off, outarg.len);
1177 }
John Muir3b463ae2009-05-31 11:13:57 -04001178 up_read(&fc->killsb);
1179 return err;
1180
1181err:
1182 fuse_copy_finish(cs);
1183 return err;
1184}
1185
1186static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1187 struct fuse_copy_state *cs)
1188{
1189 struct fuse_notify_inval_entry_out outarg;
Fang Wenqib2d82ee2009-12-30 18:37:13 +08001190 int err = -ENOMEM;
1191 char *buf;
John Muir3b463ae2009-05-31 11:13:57 -04001192 struct qstr name;
1193
Fang Wenqib2d82ee2009-12-30 18:37:13 +08001194 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1195 if (!buf)
1196 goto err;
1197
1198 err = -EINVAL;
John Muir3b463ae2009-05-31 11:13:57 -04001199 if (size < sizeof(outarg))
1200 goto err;
1201
1202 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1203 if (err)
1204 goto err;
1205
1206 err = -ENAMETOOLONG;
1207 if (outarg.namelen > FUSE_NAME_MAX)
1208 goto err;
1209
1210 name.name = buf;
1211 name.len = outarg.namelen;
1212 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1213 if (err)
1214 goto err;
1215 fuse_copy_finish(cs);
1216 buf[outarg.namelen] = 0;
1217 name.hash = full_name_hash(name.name, name.len);
1218
1219 down_read(&fc->killsb);
1220 err = -ENOENT;
Miklos Szeredib21dda42010-02-05 12:08:31 +01001221 if (fc->sb)
1222 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
John Muir3b463ae2009-05-31 11:13:57 -04001223 up_read(&fc->killsb);
Fang Wenqib2d82ee2009-12-30 18:37:13 +08001224 kfree(buf);
John Muir3b463ae2009-05-31 11:13:57 -04001225 return err;
1226
1227err:
Fang Wenqib2d82ee2009-12-30 18:37:13 +08001228 kfree(buf);
John Muir3b463ae2009-05-31 11:13:57 -04001229 fuse_copy_finish(cs);
1230 return err;
1231}
1232
Tejun Heo85993962008-11-26 12:03:55 +01001233static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1234 unsigned int size, struct fuse_copy_state *cs)
1235{
1236 switch (code) {
Tejun Heo95668a62008-11-26 12:03:55 +01001237 case FUSE_NOTIFY_POLL:
1238 return fuse_notify_poll(fc, size, cs);
1239
John Muir3b463ae2009-05-31 11:13:57 -04001240 case FUSE_NOTIFY_INVAL_INODE:
1241 return fuse_notify_inval_inode(fc, size, cs);
1242
1243 case FUSE_NOTIFY_INVAL_ENTRY:
1244 return fuse_notify_inval_entry(fc, size, cs);
1245
Tejun Heo85993962008-11-26 12:03:55 +01001246 default:
Miklos Szeredif6d47a12009-01-26 15:00:59 +01001247 fuse_copy_finish(cs);
Tejun Heo85993962008-11-26 12:03:55 +01001248 return -EINVAL;
1249 }
1250}
1251
Miklos Szeredi334f4852005-09-09 13:10:27 -07001252/* Look up request on processing list by unique ID */
1253static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1254{
1255 struct list_head *entry;
1256
1257 list_for_each(entry, &fc->processing) {
1258 struct fuse_req *req;
1259 req = list_entry(entry, struct fuse_req, list);
Miklos Szeredia4d27e72006-06-25 05:48:54 -07001260 if (req->in.h.unique == unique || req->intr_unique == unique)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001261 return req;
1262 }
1263 return NULL;
1264}
1265
1266static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1267 unsigned nbytes)
1268{
1269 unsigned reqsize = sizeof(struct fuse_out_header);
1270
1271 if (out->h.error)
1272 return nbytes != reqsize ? -EINVAL : 0;
1273
1274 reqsize += len_args(out->numargs, out->args);
1275
1276 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1277 return -EINVAL;
1278 else if (reqsize > nbytes) {
1279 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1280 unsigned diffsize = reqsize - nbytes;
1281 if (diffsize > lastarg->size)
1282 return -EINVAL;
1283 lastarg->size -= diffsize;
1284 }
1285 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1286 out->page_zeroing);
1287}
1288
1289/*
1290 * Write a single reply to a request. First the header is copied from
1291 * the write buffer. The request is then searched on the processing
1292 * list by the unique ID found in the header. If found, then remove
1293 * it from the list and copy the rest of the buffer to the request.
1294 * The request is finished by calling request_end()
1295 */
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001296static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1297 struct fuse_copy_state *cs, size_t nbytes)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001298{
1299 int err;
Miklos Szeredi334f4852005-09-09 13:10:27 -07001300 struct fuse_req *req;
1301 struct fuse_out_header oh;
Miklos Szeredi334f4852005-09-09 13:10:27 -07001302
Miklos Szeredi334f4852005-09-09 13:10:27 -07001303 if (nbytes < sizeof(struct fuse_out_header))
1304 return -EINVAL;
1305
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001306 err = fuse_copy_one(cs, &oh, sizeof(oh));
Miklos Szeredi334f4852005-09-09 13:10:27 -07001307 if (err)
1308 goto err_finish;
Tejun Heo85993962008-11-26 12:03:55 +01001309
Miklos Szeredi334f4852005-09-09 13:10:27 -07001310 err = -EINVAL;
Tejun Heo85993962008-11-26 12:03:55 +01001311 if (oh.len != nbytes)
1312 goto err_finish;
1313
1314 /*
1315 * Zero oh.unique indicates unsolicited notification message
1316 * and error contains notification code.
1317 */
1318 if (!oh.unique) {
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001319 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
Tejun Heo85993962008-11-26 12:03:55 +01001320 return err ? err : nbytes;
1321 }
1322
1323 err = -EINVAL;
1324 if (oh.error <= -1000 || oh.error > 0)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001325 goto err_finish;
1326
Miklos Szeredid7133112006-04-10 22:54:55 -07001327 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001328 err = -ENOENT;
1329 if (!fc->connected)
1330 goto err_unlock;
1331
Miklos Szeredi334f4852005-09-09 13:10:27 -07001332 req = request_find(fc, oh.unique);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001333 if (!req)
1334 goto err_unlock;
1335
Miklos Szeredif9a28422006-06-25 05:48:53 -07001336 if (req->aborted) {
Miklos Szeredid7133112006-04-10 22:54:55 -07001337 spin_unlock(&fc->lock);
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001338 fuse_copy_finish(cs);
Miklos Szeredid7133112006-04-10 22:54:55 -07001339 spin_lock(&fc->lock);
Miklos Szeredi222f1d62006-01-16 22:14:25 -08001340 request_end(fc, req);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001341 return -ENOENT;
1342 }
Miklos Szeredia4d27e72006-06-25 05:48:54 -07001343 /* Is it an interrupt reply? */
1344 if (req->intr_unique == oh.unique) {
1345 err = -EINVAL;
1346 if (nbytes != sizeof(struct fuse_out_header))
1347 goto err_unlock;
1348
1349 if (oh.error == -ENOSYS)
1350 fc->no_interrupt = 1;
1351 else if (oh.error == -EAGAIN)
1352 queue_interrupt(fc, req);
1353
1354 spin_unlock(&fc->lock);
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001355 fuse_copy_finish(cs);
Miklos Szeredia4d27e72006-06-25 05:48:54 -07001356 return nbytes;
1357 }
1358
1359 req->state = FUSE_REQ_WRITING;
Miklos Szeredid77a1d52006-01-16 22:14:31 -08001360 list_move(&req->list, &fc->io);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001361 req->out.h = oh;
1362 req->locked = 1;
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001363 cs->req = req;
Miklos Szeredice534fb2010-05-25 15:06:07 +02001364 if (!req->out.page_replace)
1365 cs->move_pages = 0;
Miklos Szeredid7133112006-04-10 22:54:55 -07001366 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001367
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001368 err = copy_out_args(cs, &req->out, nbytes);
1369 fuse_copy_finish(cs);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001370
Miklos Szeredid7133112006-04-10 22:54:55 -07001371 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001372 req->locked = 0;
1373 if (!err) {
Miklos Szeredif9a28422006-06-25 05:48:53 -07001374 if (req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001375 err = -ENOENT;
Miklos Szeredif9a28422006-06-25 05:48:53 -07001376 } else if (!req->aborted)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001377 req->out.h.error = -EIO;
1378 request_end(fc, req);
1379
1380 return err ? err : nbytes;
1381
1382 err_unlock:
Miklos Szeredid7133112006-04-10 22:54:55 -07001383 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001384 err_finish:
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001385 fuse_copy_finish(cs);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001386 return err;
1387}
1388
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001389static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1390 unsigned long nr_segs, loff_t pos)
1391{
1392 struct fuse_copy_state cs;
1393 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1394 if (!fc)
1395 return -EPERM;
1396
Miklos Szeredic3021622010-05-25 15:06:07 +02001397 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001398
1399 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1400}
1401
1402static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1403 struct file *out, loff_t *ppos,
1404 size_t len, unsigned int flags)
1405{
1406 unsigned nbuf;
1407 unsigned idx;
1408 struct pipe_buffer *bufs;
1409 struct fuse_copy_state cs;
1410 struct fuse_conn *fc;
1411 size_t rem;
1412 ssize_t ret;
1413
1414 fc = fuse_get_conn(out);
1415 if (!fc)
1416 return -EPERM;
1417
1418 bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL);
1419 if (!bufs)
1420 return -ENOMEM;
1421
1422 pipe_lock(pipe);
1423 nbuf = 0;
1424 rem = 0;
1425 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1426 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1427
1428 ret = -EINVAL;
1429 if (rem < len) {
1430 pipe_unlock(pipe);
1431 goto out;
1432 }
1433
1434 rem = len;
1435 while (rem) {
1436 struct pipe_buffer *ibuf;
1437 struct pipe_buffer *obuf;
1438
1439 BUG_ON(nbuf >= pipe->buffers);
1440 BUG_ON(!pipe->nrbufs);
1441 ibuf = &pipe->bufs[pipe->curbuf];
1442 obuf = &bufs[nbuf];
1443
1444 if (rem >= ibuf->len) {
1445 *obuf = *ibuf;
1446 ibuf->ops = NULL;
1447 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1448 pipe->nrbufs--;
1449 } else {
1450 ibuf->ops->get(pipe, ibuf);
1451 *obuf = *ibuf;
1452 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1453 obuf->len = rem;
1454 ibuf->offset += obuf->len;
1455 ibuf->len -= obuf->len;
1456 }
1457 nbuf++;
1458 rem -= obuf->len;
1459 }
1460 pipe_unlock(pipe);
1461
Miklos Szeredic3021622010-05-25 15:06:07 +02001462 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001463 cs.pipebufs = bufs;
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001464 cs.pipe = pipe;
1465
Miklos Szeredice534fb2010-05-25 15:06:07 +02001466 if (flags & SPLICE_F_MOVE)
1467 cs.move_pages = 1;
1468
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001469 ret = fuse_dev_do_write(fc, &cs, len);
1470
1471 for (idx = 0; idx < nbuf; idx++) {
1472 struct pipe_buffer *buf = &bufs[idx];
1473 buf->ops->release(pipe, buf);
1474 }
1475out:
1476 kfree(bufs);
1477 return ret;
1478}
1479
Miklos Szeredi334f4852005-09-09 13:10:27 -07001480static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1481{
Miklos Szeredi334f4852005-09-09 13:10:27 -07001482 unsigned mask = POLLOUT | POLLWRNORM;
Miklos Szeredi7025d9a2006-04-10 22:54:50 -07001483 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001484 if (!fc)
Miklos Szeredi7025d9a2006-04-10 22:54:50 -07001485 return POLLERR;
Miklos Szeredi334f4852005-09-09 13:10:27 -07001486
1487 poll_wait(file, &fc->waitq, wait);
1488
Miklos Szeredid7133112006-04-10 22:54:55 -07001489 spin_lock(&fc->lock);
Miklos Szeredi7025d9a2006-04-10 22:54:50 -07001490 if (!fc->connected)
1491 mask = POLLERR;
Miklos Szeredia4d27e72006-06-25 05:48:54 -07001492 else if (request_pending(fc))
Miklos Szeredi7025d9a2006-04-10 22:54:50 -07001493 mask |= POLLIN | POLLRDNORM;
Miklos Szeredid7133112006-04-10 22:54:55 -07001494 spin_unlock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001495
1496 return mask;
1497}
1498
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001499/*
1500 * Abort all requests on the given list (pending or processing)
1501 *
Miklos Szeredid7133112006-04-10 22:54:55 -07001502 * This function releases and reacquires fc->lock
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001503 */
Miklos Szeredi334f4852005-09-09 13:10:27 -07001504static void end_requests(struct fuse_conn *fc, struct list_head *head)
Harvey Harrison5d9ec852008-12-02 14:49:42 +01001505__releases(&fc->lock)
1506__acquires(&fc->lock)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001507{
1508 while (!list_empty(head)) {
1509 struct fuse_req *req;
1510 req = list_entry(head->next, struct fuse_req, list);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001511 req->out.h.error = -ECONNABORTED;
1512 request_end(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -07001513 spin_lock(&fc->lock);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001514 }
1515}
1516
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001517/*
1518 * Abort requests under I/O
1519 *
Miklos Szeredif9a28422006-06-25 05:48:53 -07001520 * The requests are set to aborted and finished, and the request
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001521 * waiter is woken up. This will make request_wait_answer() wait
1522 * until the request is unlocked and then return.
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001523 *
1524 * If the request is asynchronous, then the end function needs to be
1525 * called after waiting for the request to be unlocked (if it was
1526 * locked).
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001527 */
1528static void end_io_requests(struct fuse_conn *fc)
Harvey Harrison5d9ec852008-12-02 14:49:42 +01001529__releases(&fc->lock)
1530__acquires(&fc->lock)
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001531{
1532 while (!list_empty(&fc->io)) {
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001533 struct fuse_req *req =
1534 list_entry(fc->io.next, struct fuse_req, list);
1535 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1536
Miklos Szeredif9a28422006-06-25 05:48:53 -07001537 req->aborted = 1;
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001538 req->out.h.error = -ECONNABORTED;
1539 req->state = FUSE_REQ_FINISHED;
1540 list_del_init(&req->list);
1541 wake_up(&req->waitq);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001542 if (end) {
1543 req->end = NULL;
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001544 __fuse_get_request(req);
Miklos Szeredid7133112006-04-10 22:54:55 -07001545 spin_unlock(&fc->lock);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001546 wait_event(req->waitq, !req->locked);
1547 end(fc, req);
Tejun Heoe9bb09d2008-11-26 12:03:54 +01001548 fuse_put_request(fc, req);
Miklos Szeredid7133112006-04-10 22:54:55 -07001549 spin_lock(&fc->lock);
Miklos Szeredi64c6d8e2006-01-16 22:14:42 -08001550 }
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001551 }
1552}
1553
1554/*
1555 * Abort all requests.
1556 *
1557 * Emergency exit in case of a malicious or accidental deadlock, or
1558 * just a hung filesystem.
1559 *
1560 * The same effect is usually achievable through killing the
1561 * filesystem daemon and all users of the filesystem. The exception
1562 * is the combination of an asynchronous request and the tricky
1563 * deadlock (see Documentation/filesystems/fuse.txt).
1564 *
1565 * During the aborting, progression of requests from the pending and
1566 * processing lists onto the io list, and progression of new requests
1567 * onto the pending list is prevented by req->connected being false.
1568 *
1569 * Progression of requests under I/O to the processing list is
Miklos Szeredif9a28422006-06-25 05:48:53 -07001570 * prevented by the req->aborted flag being true for these requests.
1571 * For this reason requests on the io list must be aborted first.
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001572 */
1573void fuse_abort_conn(struct fuse_conn *fc)
1574{
Miklos Szeredid7133112006-04-10 22:54:55 -07001575 spin_lock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001576 if (fc->connected) {
1577 fc->connected = 0;
Miklos Szeredi51eb01e2006-06-25 05:48:50 -07001578 fc->blocked = 0;
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001579 end_io_requests(fc);
1580 end_requests(fc, &fc->pending);
1581 end_requests(fc, &fc->processing);
1582 wake_up_all(&fc->waitq);
Miklos Szeredi51eb01e2006-06-25 05:48:50 -07001583 wake_up_all(&fc->blocked_waitq);
Jeff Dike385a17b2006-04-10 22:54:52 -07001584 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001585 }
Miklos Szeredid7133112006-04-10 22:54:55 -07001586 spin_unlock(&fc->lock);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001587}
Tejun Heo08cbf542009-04-14 10:54:53 +09001588EXPORT_SYMBOL_GPL(fuse_abort_conn);
Miklos Szeredi69a53bf2006-01-16 22:14:41 -08001589
Tejun Heo08cbf542009-04-14 10:54:53 +09001590int fuse_dev_release(struct inode *inode, struct file *file)
Miklos Szeredi334f4852005-09-09 13:10:27 -07001591{
Miklos Szeredi0720b312006-04-10 22:54:55 -07001592 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001593 if (fc) {
Miklos Szeredid7133112006-04-10 22:54:55 -07001594 spin_lock(&fc->lock);
Miklos Szeredi1e9a4ed2005-09-09 13:10:31 -07001595 fc->connected = 0;
Miklos Szeredi334f4852005-09-09 13:10:27 -07001596 end_requests(fc, &fc->pending);
1597 end_requests(fc, &fc->processing);
Miklos Szeredid7133112006-04-10 22:54:55 -07001598 spin_unlock(&fc->lock);
Miklos Szeredibafa9652006-06-25 05:48:51 -07001599 fuse_conn_put(fc);
Jeff Dike385a17b2006-04-10 22:54:52 -07001600 }
Miklos Szeredif543f252006-01-16 22:14:35 -08001601
Miklos Szeredi334f4852005-09-09 13:10:27 -07001602 return 0;
1603}
Tejun Heo08cbf542009-04-14 10:54:53 +09001604EXPORT_SYMBOL_GPL(fuse_dev_release);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001605
Jeff Dike385a17b2006-04-10 22:54:52 -07001606static int fuse_dev_fasync(int fd, struct file *file, int on)
1607{
1608 struct fuse_conn *fc = fuse_get_conn(file);
1609 if (!fc)
Miklos Szeredia87046d2006-04-10 22:54:56 -07001610 return -EPERM;
Jeff Dike385a17b2006-04-10 22:54:52 -07001611
1612 /* No locking - fasync_helper does its own locking */
1613 return fasync_helper(fd, file, on, &fc->fasync);
1614}
1615
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08001616const struct file_operations fuse_dev_operations = {
Miklos Szeredi334f4852005-09-09 13:10:27 -07001617 .owner = THIS_MODULE,
1618 .llseek = no_llseek,
Badari Pulavartyee0b3e62006-09-30 23:28:47 -07001619 .read = do_sync_read,
1620 .aio_read = fuse_dev_read,
Miklos Szeredic3021622010-05-25 15:06:07 +02001621 .splice_read = fuse_dev_splice_read,
Badari Pulavartyee0b3e62006-09-30 23:28:47 -07001622 .write = do_sync_write,
1623 .aio_write = fuse_dev_write,
Miklos Szeredidd3bb142010-05-25 15:06:06 +02001624 .splice_write = fuse_dev_splice_write,
Miklos Szeredi334f4852005-09-09 13:10:27 -07001625 .poll = fuse_dev_poll,
1626 .release = fuse_dev_release,
Jeff Dike385a17b2006-04-10 22:54:52 -07001627 .fasync = fuse_dev_fasync,
Miklos Szeredi334f4852005-09-09 13:10:27 -07001628};
Tejun Heo08cbf542009-04-14 10:54:53 +09001629EXPORT_SYMBOL_GPL(fuse_dev_operations);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001630
1631static struct miscdevice fuse_miscdevice = {
1632 .minor = FUSE_MINOR,
1633 .name = "fuse",
1634 .fops = &fuse_dev_operations,
1635};
1636
1637int __init fuse_dev_init(void)
1638{
1639 int err = -ENOMEM;
1640 fuse_req_cachep = kmem_cache_create("fuse_request",
1641 sizeof(struct fuse_req),
Paul Mundt20c2df82007-07-20 10:11:58 +09001642 0, 0, NULL);
Miklos Szeredi334f4852005-09-09 13:10:27 -07001643 if (!fuse_req_cachep)
1644 goto out;
1645
1646 err = misc_register(&fuse_miscdevice);
1647 if (err)
1648 goto out_cache_clean;
1649
1650 return 0;
1651
1652 out_cache_clean:
1653 kmem_cache_destroy(fuse_req_cachep);
1654 out:
1655 return err;
1656}
1657
1658void fuse_dev_cleanup(void)
1659{
1660 misc_deregister(&fuse_miscdevice);
1661 kmem_cache_destroy(fuse_req_cachep);
1662}