blob: 30bed76ab2bb98b5f82db1da000a70372409d00b [file] [log] [blame]
Miklos Szeredi85c74fc2001-10-28 19:44:14 +00001/*
Miklos Szeredie56818b2004-12-12 11:45:24 +00002 FUSE: Filesystem in Userspace
Miklos Szeredi149f6072005-01-10 12:29:28 +00003 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
Miklos Szeredi85c74fc2001-10-28 19:44:14 +00004
Miklos Szeredie56818b2004-12-12 11:45:24 +00005 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
Miklos Szeredi85c74fc2001-10-28 19:44:14 +00007*/
8
9#include "fuse_i.h"
10
Miklos Szeredia25d4c22004-11-23 22:32:16 +000011#include <linux/init.h>
Miklos Szeredi13ed4822004-11-20 11:12:21 +000012#include <linux/module.h>
Miklos Szeredi85c74fc2001-10-28 19:44:14 +000013#include <linux/poll.h>
Miklos Szeredie56818b2004-12-12 11:45:24 +000014#include <linux/uio.h>
Miklos Szeredi162bcbb2004-11-29 23:43:44 +000015#include <linux/miscdevice.h>
Miklos Szeredi83a07442004-11-30 18:25:20 +000016#include <linux/pagemap.h>
Miklos Szeredi85c74fc2001-10-28 19:44:14 +000017#include <linux/file.h>
Miklos Szeredi2a927272005-01-07 11:14:15 +000018#include <linux/slab.h>
Miklos Szeredi85c74fc2001-10-28 19:44:14 +000019
Miklos Szerediec12fad2005-06-08 10:26:34 +000020#ifdef MODULE_ALIAS_MISCDEV
21MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22#endif
23
Miklos Szeredi43696432001-11-18 19:15:05 +000024static kmem_cache_t *fuse_req_cachep;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +000025
Miklos Szeredi3d60e762004-11-11 14:44:04 +000026static inline struct fuse_conn *fuse_get_conn(struct file *file)
27{
28 struct fuse_conn *fc;
29 spin_lock(&fuse_lock);
Miklos Szeredi13ed4822004-11-20 11:12:21 +000030 fc = file->private_data;
Miklos Szeredi0111f9d2005-04-22 12:04:55 +000031 if (fc && !fc->mounted)
Miklos Szeredi3d60e762004-11-11 14:44:04 +000032 fc = NULL;
33 spin_unlock(&fuse_lock);
34 return fc;
35}
36
Miklos Szeredi31d51192004-12-07 18:28:35 +000037static inline void fuse_request_init(struct fuse_req *req)
38{
39 memset(req, 0, sizeof(*req));
40 INIT_LIST_HEAD(&req->list);
41 init_waitqueue_head(&req->waitq);
Miklos Szeredie56818b2004-12-12 11:45:24 +000042 atomic_set(&req->count, 1);
Miklos Szeredi31d51192004-12-07 18:28:35 +000043}
44
Miklos Szeredi7eafcce2004-06-19 22:42:38 +000045struct fuse_req *fuse_request_alloc(void)
Miklos Szeredi43696432001-11-18 19:15:05 +000046{
Miklos Szeredi13ed4822004-11-20 11:12:21 +000047 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
Miklos Szeredi31d51192004-12-07 18:28:35 +000048 if (req)
49 fuse_request_init(req);
Miklos Szeredi43696432001-11-18 19:15:05 +000050 return req;
51}
52
Miklos Szeredi7eafcce2004-06-19 22:42:38 +000053void fuse_request_free(struct fuse_req *req)
Miklos Szeredi43696432001-11-18 19:15:05 +000054{
55 kmem_cache_free(fuse_req_cachep, req);
56}
57
Miklos Szeredie56818b2004-12-12 11:45:24 +000058#ifdef KERNEL_2_6
59static inline void block_sigs(sigset_t *oldset)
60{
Miklos Szeredi4fcbff62005-01-11 20:21:08 +000061 sigset_t mask;
Miklos Szeredie56818b2004-12-12 11:45:24 +000062
Miklos Szeredi4fcbff62005-01-11 20:21:08 +000063 siginitsetinv(&mask, sigmask(SIGKILL));
64 sigprocmask(SIG_BLOCK, &mask, oldset);
Miklos Szeredie56818b2004-12-12 11:45:24 +000065}
66
67static inline void restore_sigs(sigset_t *oldset)
68{
69 sigprocmask(SIG_SETMASK, oldset, NULL);
70}
71#else
Miklos Szeredi57762852005-01-21 16:46:58 +000072#ifdef HAVE_RECALC_SIGPENDING_TSK
73static inline void block_sigs(sigset_t *oldset)
74{
75 spin_lock_irq(&current->sighand->siglock);
76 *oldset = current->blocked;
77 siginitsetinv(&current->blocked, sigmask(SIGKILL) & ~oldset->sig[0]);
78 recalc_sigpending();
79 spin_unlock_irq(&current->sighand->siglock);
80}
81
82static inline void restore_sigs(sigset_t *oldset)
83{
84 spin_lock_irq(&current->sighand->siglock);
85 current->blocked = *oldset;
86 recalc_sigpending();
87 spin_unlock_irq(&current->sighand->siglock);
88}
89#else
Miklos Szeredie56818b2004-12-12 11:45:24 +000090static inline void block_sigs(sigset_t *oldset)
91{
92 spin_lock_irq(&current->sigmask_lock);
93 *oldset = current->blocked;
94 siginitsetinv(&current->blocked, sigmask(SIGKILL) & ~oldset->sig[0]);
95 recalc_sigpending(current);
96 spin_unlock_irq(&current->sigmask_lock);
97}
98
99static inline void restore_sigs(sigset_t *oldset)
100{
101 spin_lock_irq(&current->sigmask_lock);
102 current->blocked = *oldset;
103 recalc_sigpending(current);
104 spin_unlock_irq(&current->sigmask_lock);
105}
106#endif
Miklos Szeredi57762852005-01-21 16:46:58 +0000107#endif
Miklos Szeredie56818b2004-12-12 11:45:24 +0000108
Miklos Szeredi069c9502004-07-16 16:17:02 +0000109void fuse_reset_request(struct fuse_req *req)
110{
111 int preallocated = req->preallocated;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000112 BUG_ON(atomic_read(&req->count) != 1);
Miklos Szeredi31d51192004-12-07 18:28:35 +0000113 fuse_request_init(req);
Miklos Szeredi069c9502004-07-16 16:17:02 +0000114 req->preallocated = preallocated;
115}
116
Miklos Szeredie56818b2004-12-12 11:45:24 +0000117static void __fuse_get_request(struct fuse_req *req)
118{
119 atomic_inc(&req->count);
120}
121
122/* Must be called with > 1 refcount */
123static void __fuse_put_request(struct fuse_req *req)
124{
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000125 BUG_ON(atomic_read(&req->count) < 2);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000126 atomic_dec(&req->count);
127}
128
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000129static struct fuse_req *do_get_request(struct fuse_conn *fc)
130{
131 struct fuse_req *req;
132
133 spin_lock(&fuse_lock);
134 BUG_ON(list_empty(&fc->unused_list));
135 req = list_entry(fc->unused_list.next, struct fuse_req, list);
136 list_del_init(&req->list);
137 spin_unlock(&fuse_lock);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000138 fuse_request_init(req);
139 req->preallocated = 1;
Miklos Szeredia2c5e562004-10-19 22:01:21 +0000140 req->in.h.uid = current->fsuid;
141 req->in.h.gid = current->fsgid;
142 req->in.h.pid = current->pid;
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000143 return req;
144}
145
Miklos Szeredif94e0102005-05-12 14:56:34 +0000146/* This can return NULL, but only in case it's interrupted by a SIGKILL */
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000147struct fuse_req *fuse_get_request(struct fuse_conn *fc)
148{
Miklos Szeredie56818b2004-12-12 11:45:24 +0000149 int intr;
150 sigset_t oldset;
151
152 block_sigs(&oldset);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000153 intr = down_interruptible(&fc->outstanding_sem);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000154 restore_sigs(&oldset);
155 return intr ? NULL : do_get_request(fc);
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000156}
157
Miklos Szeredi7db35c02005-01-17 09:46:28 +0000158static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000159{
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000160 spin_lock(&fuse_lock);
161 if (req->preallocated)
Miklos Szeredid3dd2d52004-06-22 18:46:02 +0000162 list_add(&req->list, &fc->unused_list);
Miklos Szeredi31fa41c2005-03-04 17:28:51 +0000163 else
164 fuse_request_free(req);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000165
Miklos Szeredifcf9f8d2005-09-08 14:28:54 +0000166 /* If we are in debt decrease that first */
167 if (fc->outstanding_debt)
168 fc->outstanding_debt--;
169 else
170 up(&fc->outstanding_sem);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000171 spin_unlock(&fuse_lock);
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000172}
173
Miklos Szeredie56818b2004-12-12 11:45:24 +0000174void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
175{
176 if (atomic_dec_and_test(&req->count))
177 fuse_putback_request(fc, req);
178}
179
Miklos Szeredid17da462005-03-21 11:47:04 +0000180void fuse_release_background(struct fuse_req *req)
181{
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000182 iput(req->inode);
183 iput(req->inode2);
Miklos Szeredid17da462005-03-21 11:47:04 +0000184 if (req->file)
185 fput(req->file);
186 spin_lock(&fuse_lock);
187 list_del(&req->bg_entry);
188 spin_unlock(&fuse_lock);
189}
190
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000191/*
192 * This function is called when a request is finished. Either a reply
193 * has arrived or it was interrupted (and not yet sent) or some error
Miklos Szeredib75d4b92005-10-11 10:12:08 +0000194 * occurred during communication with userspace, or the device file was
195 * closed. It decreases the reference count for the request. In case
196 * of a background request the reference to the stored objects are
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000197 * released. The requester thread is woken up (if still waiting), and
198 * finally the request is either freed or put on the unused_list
199 *
200 * Called with fuse_lock, unlocks it
201 */
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000202static void request_end(struct fuse_conn *fc, struct fuse_req *req)
203{
Miklos Szeredie56818b2004-12-12 11:45:24 +0000204 int putback;
205 req->finished = 1;
206 putback = atomic_dec_and_test(&req->count);
207 spin_unlock(&fuse_lock);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000208 if (req->background) {
Miklos Szeredid17da462005-03-21 11:47:04 +0000209 down_read(&fc->sbput_sem);
Miklos Szeredi0111f9d2005-04-22 12:04:55 +0000210 if (fc->mounted)
Miklos Szeredid17da462005-03-21 11:47:04 +0000211 fuse_release_background(req);
212 up_read(&fc->sbput_sem);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000213 }
Miklos Szeredie56818b2004-12-12 11:45:24 +0000214 wake_up(&req->waitq);
Miklos Szeredi3f0005f2005-01-04 19:24:31 +0000215 if (req->in.h.opcode == FUSE_INIT) {
216 int i;
Miklos Szeredieab72ef2005-03-31 19:59:12 +0000217
218 if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION)
219 fc->conn_error = 1;
220
Miklos Szeredi2a807ec2005-01-10 11:50:10 +0000221 /* After INIT reply is received other requests can go
222 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
223 up()s on outstanding_sem. The last up() is done in
224 fuse_putback_request() */
Miklos Szeredi3f0005f2005-01-04 19:24:31 +0000225 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
226 up(&fc->outstanding_sem);
Miklos Szeredid9079a72005-10-26 15:29:06 +0000227 } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
228 /* Special case for failed iget in CREATE */
229 u64 nodeid = req->in.h.nodeid;
230 __fuse_get_request(req);
231 fuse_reset_request(req);
232 fuse_send_forget(fc, req, nodeid, 1);
233 putback = 0;
Miklos Szeredi3f0005f2005-01-04 19:24:31 +0000234 }
Miklos Szeredie56818b2004-12-12 11:45:24 +0000235 if (putback)
236 fuse_putback_request(fc, req);
237}
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000238
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000239/*
240 * Unfortunately request interruption not just solves the deadlock
241 * problem, it causes problems too. These stem from the fact, that an
242 * interrupted request is continued to be processed in userspace,
243 * while all the locks and object references (inode and file) held
244 * during the operation are released.
245 *
246 * To release the locks is exactly why there's a need to interrupt the
247 * request, so there's not a lot that can be done about this, except
248 * introduce additional locking in userspace.
249 *
250 * More important is to keep inode and file references until userspace
251 * has replied, otherwise FORGET and RELEASE could be sent while the
252 * inode/file is still used by the filesystem.
253 *
254 * For this reason the concept of "background" request is introduced.
255 * An interrupted request is backgrounded if it has been already sent
256 * to userspace. Backgrounding involves getting an extra reference to
257 * inode(s) or file used in the request, and adding the request to
258 * fc->background list. When a reply is received for a background
259 * request, the object references are released, and the request is
260 * removed from the list. If the filesystem is unmounted while there
261 * are still background requests, the list is walked and references
262 * are released as if a reply was received.
263 *
264 * There's one more use for a background request. The RELEASE message is
265 * always sent as background, since it doesn't return an error or
266 * data.
267 */
Miklos Szeredid17da462005-03-21 11:47:04 +0000268static void background_request(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000269{
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000270 req->background = 1;
Miklos Szeredid17da462005-03-21 11:47:04 +0000271 list_add(&req->bg_entry, &fc->background);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000272 if (req->inode)
273 req->inode = igrab(req->inode);
274 if (req->inode2)
275 req->inode2 = igrab(req->inode2);
276 if (req->file)
277 get_file(req->file);
278}
279
280/* Called with fuse_lock held. Releases, and then reacquires it. */
Miklos Szeredif94e0102005-05-12 14:56:34 +0000281static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredie56818b2004-12-12 11:45:24 +0000282{
Miklos Szeredif94e0102005-05-12 14:56:34 +0000283 sigset_t oldset;
Miklos Szeredia7acd7f2005-02-02 11:31:36 +0000284
Miklos Szeredie56818b2004-12-12 11:45:24 +0000285 spin_unlock(&fuse_lock);
Miklos Szeredif94e0102005-05-12 14:56:34 +0000286 block_sigs(&oldset);
287 wait_event_interruptible(req->waitq, req->finished);
288 restore_sigs(&oldset);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000289 spin_lock(&fuse_lock);
Miklos Szeredif94e0102005-05-12 14:56:34 +0000290 if (req->finished)
Miklos Szeredie56818b2004-12-12 11:45:24 +0000291 return;
292
Miklos Szeredif94e0102005-05-12 14:56:34 +0000293 req->out.h.error = -EINTR;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000294 req->interrupted = 1;
295 if (req->locked) {
296 /* This is uninterruptible sleep, because data is
297 being copied to/from the buffers of req. During
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000298 locked state, there mustn't be any filesystem
Miklos Szeredie56818b2004-12-12 11:45:24 +0000299 operation (e.g. page fault), since that could lead
300 to deadlock */
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000301 spin_unlock(&fuse_lock);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000302 wait_event(req->waitq, !req->locked);
303 spin_lock(&fuse_lock);
304 }
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000305 if (!req->sent && !list_empty(&req->list)) {
Miklos Szeredie56818b2004-12-12 11:45:24 +0000306 list_del(&req->list);
307 __fuse_put_request(req);
Miklos Szerediff30a6c2005-02-02 10:53:59 +0000308 } else if (!req->finished && req->sent)
Miklos Szeredid17da462005-03-21 11:47:04 +0000309 background_request(fc, req);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000310}
311
312static unsigned len_args(unsigned numargs, struct fuse_arg *args)
313{
314 unsigned nbytes = 0;
315 unsigned i;
316
317 for (i = 0; i < numargs; i++)
318 nbytes += args[i].size;
319
320 return nbytes;
321}
322
323static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
324{
325 fc->reqctr++;
326 /* zero is special */
327 if (fc->reqctr == 0)
328 fc->reqctr = 1;
329 req->in.h.unique = fc->reqctr;
Miklos Szeredicdae16e2005-01-14 11:43:22 +0000330 req->in.h.len = sizeof(struct fuse_in_header) +
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000331 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
Miklos Szeredifcf9f8d2005-09-08 14:28:54 +0000332 if (!req->preallocated) {
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000333 /* If request is not preallocated (either FORGET or
Miklos Szeredifcf9f8d2005-09-08 14:28:54 +0000334 RELEASE), then still decrease outstanding_sem, so
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000335 user can't open infinite number of files while not
336 processing the RELEASE requests. However for
337 efficiency do it without blocking, so if down()
338 would block, just increase the debt instead */
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000339 if (down_trylock(&fc->outstanding_sem))
340 fc->outstanding_debt++;
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000341 }
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000342 list_add_tail(&req->list, &fc->pending);
343 wake_up(&fc->waitq);
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000344}
345
Miklos Szeredif94e0102005-05-12 14:56:34 +0000346/*
347 * This can only be interrupted by a SIGKILL
348 */
349void request_send(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi43696432001-11-18 19:15:05 +0000350{
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000351 req->isreply = 1;
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000352 spin_lock(&fuse_lock);
Miklos Szeredi0111f9d2005-04-22 12:04:55 +0000353 if (!fc->connected)
Miklos Szeredieab72ef2005-03-31 19:59:12 +0000354 req->out.h.error = -ENOTCONN;
355 else if (fc->conn_error)
356 req->out.h.error = -ECONNREFUSED;
357 else {
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000358 queue_request(fc, req);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000359 /* acquire extra reference, since request is still needed
360 after request_end() */
361 __fuse_get_request(req);
362
Miklos Szeredif94e0102005-05-12 14:56:34 +0000363 request_wait_answer(fc, req);
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000364 }
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000365 spin_unlock(&fuse_lock);
Miklos Szeredi43696432001-11-18 19:15:05 +0000366}
367
Miklos Szeredi7db35c02005-01-17 09:46:28 +0000368static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
Miklos Szeredi43696432001-11-18 19:15:05 +0000369{
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000370 spin_lock(&fuse_lock);
Miklos Szeredi0111f9d2005-04-22 12:04:55 +0000371 if (fc->connected) {
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000372 queue_request(fc, req);
Miklos Szeredie79dc7e2004-06-23 05:57:30 +0000373 spin_unlock(&fuse_lock);
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000374 } else {
Miklos Szeredi7eafcce2004-06-19 22:42:38 +0000375 req->out.h.error = -ENOTCONN;
376 request_end(fc, req);
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000377 }
Miklos Szeredi7c35cf92004-01-14 16:56:49 +0000378}
379
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000380void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
381{
382 req->isreply = 0;
383 request_send_nowait(fc, req);
384}
385
386void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
387{
388 req->isreply = 1;
Miklos Szeredid17da462005-03-21 11:47:04 +0000389 spin_lock(&fuse_lock);
390 background_request(fc, req);
391 spin_unlock(&fuse_lock);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000392 request_send_nowait(fc, req);
393}
394
Miklos Szeredi3f0005f2005-01-04 19:24:31 +0000395void fuse_send_init(struct fuse_conn *fc)
396{
397 /* This is called from fuse_read_super() so there's guaranteed
398 to be a request available */
399 struct fuse_req *req = do_get_request(fc);
400 struct fuse_init_in_out *arg = &req->misc.init_in_out;
401 arg->major = FUSE_KERNEL_VERSION;
402 arg->minor = FUSE_KERNEL_MINOR_VERSION;
403 req->in.h.opcode = FUSE_INIT;
404 req->in.numargs = 1;
405 req->in.args[0].size = sizeof(*arg);
406 req->in.args[0].value = arg;
407 req->out.numargs = 1;
408 req->out.args[0].size = sizeof(*arg);
409 req->out.args[0].value = arg;
410 request_send_background(fc, req);
411}
412
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000413/*
414 * Lock the request. Up to the next unlock_request() there mustn't be
415 * anything that could cause a page-fault. If the request was already
416 * interrupted bail out.
417 */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000418static inline int lock_request(struct fuse_req *req)
419{
420 int err = 0;
421 if (req) {
422 spin_lock(&fuse_lock);
423 if (req->interrupted)
424 err = -ENOENT;
425 else
426 req->locked = 1;
427 spin_unlock(&fuse_lock);
428 }
429 return err;
430}
431
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000432/*
433 * Unlock request. If it was interrupted during being locked, the
434 * requester thread is currently waiting for it to be unlocked, so
435 * wake it up.
436 */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000437static inline void unlock_request(struct fuse_req *req)
438{
439 if (req) {
440 spin_lock(&fuse_lock);
441 req->locked = 0;
442 if (req->interrupted)
443 wake_up(&req->waitq);
444 spin_unlock(&fuse_lock);
445 }
446}
447
448struct fuse_copy_state {
449 int write;
450 struct fuse_req *req;
451 const struct iovec *iov;
452 unsigned long nr_segs;
453 unsigned long seglen;
454 unsigned long addr;
455 struct page *pg;
456 void *mapaddr;
457 void *buf;
458 unsigned len;
459};
460
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000461static void fuse_copy_init(struct fuse_copy_state *cs, int write,
462 struct fuse_req *req, const struct iovec *iov,
463 unsigned long nr_segs)
Miklos Szeredie56818b2004-12-12 11:45:24 +0000464{
Miklos Szeredie56818b2004-12-12 11:45:24 +0000465 memset(cs, 0, sizeof(*cs));
466 cs->write = write;
467 cs->req = req;
468 cs->iov = iov;
469 cs->nr_segs = nr_segs;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000470}
471
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000472/* Unmap and put previous page of userspace buffer */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000473static inline void fuse_copy_finish(struct fuse_copy_state *cs)
474{
475 if (cs->mapaddr) {
476 kunmap_atomic(cs->mapaddr, KM_USER0);
477 if (cs->write) {
478 flush_dcache_page(cs->pg);
479 set_page_dirty_lock(cs->pg);
480 }
481 put_page(cs->pg);
482 cs->mapaddr = NULL;
483 }
484}
485
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000486/*
487 * Get another pagefull of userspace buffer, and map it to kernel
488 * address space, and lock request
489 */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000490static int fuse_copy_fill(struct fuse_copy_state *cs)
491{
492 unsigned long offset;
493 int err;
494
495 unlock_request(cs->req);
496 fuse_copy_finish(cs);
497 if (!cs->seglen) {
498 BUG_ON(!cs->nr_segs);
499 cs->seglen = cs->iov[0].iov_len;
500 cs->addr = (unsigned long) cs->iov[0].iov_base;
501 cs->iov ++;
502 cs->nr_segs --;
503 }
504 down_read(&current->mm->mmap_sem);
505 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
506 &cs->pg, NULL);
507 up_read(&current->mm->mmap_sem);
508 if (err < 0)
509 return err;
510 BUG_ON(err != 1);
511 offset = cs->addr % PAGE_SIZE;
512 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
513 cs->buf = cs->mapaddr + offset;
514 cs->len = min(PAGE_SIZE - offset, cs->seglen);
515 cs->seglen -= cs->len;
516 cs->addr += cs->len;
517
518 return lock_request(cs->req);
519}
520
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000521/* Do as much copy to/from userspace buffer as we can */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000522static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val,
523 unsigned *size)
524{
525 unsigned ncpy = min(*size, cs->len);
526 if (val) {
527 if (cs->write)
528 memcpy(cs->buf, *val, ncpy);
529 else
530 memcpy(*val, cs->buf, ncpy);
531 *val += ncpy;
532 }
533 *size -= ncpy;
534 cs->len -= ncpy;
535 cs->buf += ncpy;
536 return ncpy;
537}
538
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000539/*
540 * Copy a page in the request to/from the userspace buffer. Must be
541 * done atomically
542 */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000543static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
544 unsigned offset, unsigned count, int zeroing)
545{
546 if (page && zeroing && count < PAGE_SIZE) {
547 void *mapaddr = kmap_atomic(page, KM_USER1);
548 memset(mapaddr, 0, PAGE_SIZE);
549 kunmap_atomic(mapaddr, KM_USER1);
550 }
551 while (count) {
552 int err;
553 if (!cs->len && (err = fuse_copy_fill(cs)))
554 return err;
555 if (page) {
556 void *mapaddr = kmap_atomic(page, KM_USER1);
557 void *buf = mapaddr + offset;
558 offset += fuse_copy_do(cs, &buf, &count);
559 kunmap_atomic(mapaddr, KM_USER1);
560 } else
561 offset += fuse_copy_do(cs, NULL, &count);
562 }
563 if (page && !cs->write)
564 flush_dcache_page(page);
565 return 0;
566}
567
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000568/* Copy pages in the request to/from userspace buffer */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000569static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
570 int zeroing)
571{
572 unsigned i;
573 struct fuse_req *req = cs->req;
574 unsigned offset = req->page_offset;
575 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
576
Miklos Szeredi46960ce2005-06-02 09:05:00 +0000577 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
Miklos Szeredie56818b2004-12-12 11:45:24 +0000578 struct page *page = req->pages[i];
579 int err = fuse_copy_page(cs, page, offset, count, zeroing);
580 if (err)
581 return err;
582
583 nbytes -= count;
584 count = min(nbytes, (unsigned) PAGE_SIZE);
585 offset = 0;
586 }
587 return 0;
588}
589
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000590/* Copy a single argument in the request to/from userspace buffer */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000591static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
592{
593 while (size) {
594 int err;
595 if (!cs->len && (err = fuse_copy_fill(cs)))
596 return err;
597 fuse_copy_do(cs, &val, &size);
598 }
599 return 0;
600}
601
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000602/* Copy request arguments to/from userspace buffer */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000603static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
604 unsigned argpages, struct fuse_arg *args,
605 int zeroing)
606{
607 int err = 0;
608 unsigned i;
609
610 for (i = 0; !err && i < numargs; i++) {
611 struct fuse_arg *arg = &args[i];
612 if (i == numargs - 1 && argpages)
613 err = fuse_copy_pages(cs, arg->size, zeroing);
614 else
615 err = fuse_copy_one(cs, arg->value, arg->size);
616 }
617 return err;
618}
619
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000620/* Wait until a request is available on the pending list */
Miklos Szeredi43696432001-11-18 19:15:05 +0000621static void request_wait(struct fuse_conn *fc)
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000622{
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000623 DECLARE_WAITQUEUE(wait, current);
Miklos Szeredi43696432001-11-18 19:15:05 +0000624
Miklos Szeredifff56ab2001-11-16 10:12:59 +0000625 add_wait_queue_exclusive(&fc->waitq, &wait);
Miklos Szeredi0111f9d2005-04-22 12:04:55 +0000626 while (fc->mounted && list_empty(&fc->pending)) {
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000627 set_current_state(TASK_INTERRUPTIBLE);
Miklos Szeredic26c14d2004-04-09 17:48:32 +0000628 if (signal_pending(current))
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000629 break;
Miklos Szeredi43696432001-11-18 19:15:05 +0000630
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000631 spin_unlock(&fuse_lock);
632 schedule();
633 spin_lock(&fuse_lock);
634 }
635 set_current_state(TASK_RUNNING);
636 remove_wait_queue(&fc->waitq, &wait);
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000637}
638
Miklos Szeredieab72ef2005-03-31 19:59:12 +0000639#ifndef KERNEL_2_6
640static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
641{
642 unsigned long seg;
643 size_t ret = 0;
644
645 for (seg = 0; seg < nr_segs; seg++)
646 ret += iov[seg].iov_len;
647 return ret;
648}
649#endif
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000650/*
651 * Read a single request into the userspace filesystem's buffer. This
652 * function waits until a request is available, then removes it from
653 * the pending list and copies request data to userspace buffer. If
654 * no reply is needed (FORGET) or request has been interrupted or
655 * there was an error during the copying then it's finished by calling
656 * request_end(). Otherwise add it to the processing list, and set
657 * the 'sent' flag.
658 */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000659static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
660 unsigned long nr_segs, loff_t *off)
Miklos Szeredi43696432001-11-18 19:15:05 +0000661{
Miklos Szeredi039322d2004-12-01 18:39:12 +0000662 int err;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000663 struct fuse_conn *fc;
664 struct fuse_req *req;
665 struct fuse_in *in;
666 struct fuse_copy_state cs;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000667 unsigned reqsize;
Miklos Szerediaa63b6b2004-12-03 13:24:35 +0000668
Miklos Szeredie56818b2004-12-12 11:45:24 +0000669 spin_lock(&fuse_lock);
670 fc = file->private_data;
671 err = -EPERM;
672 if (!fc)
673 goto err_unlock;
674 request_wait(fc);
675 err = -ENODEV;
Miklos Szeredi0111f9d2005-04-22 12:04:55 +0000676 if (!fc->mounted)
Miklos Szeredie56818b2004-12-12 11:45:24 +0000677 goto err_unlock;
Miklos Szeredi0fcfa032004-12-13 15:22:28 +0000678 err = -ERESTARTSYS;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000679 if (list_empty(&fc->pending))
680 goto err_unlock;
Miklos Szeredi43696432001-11-18 19:15:05 +0000681
Miklos Szeredie56818b2004-12-12 11:45:24 +0000682 req = list_entry(fc->pending.next, struct fuse_req, list);
683 list_del_init(&req->list);
684 spin_unlock(&fuse_lock);
Miklos Szeredi039322d2004-12-01 18:39:12 +0000685
Miklos Szeredie56818b2004-12-12 11:45:24 +0000686 in = &req->in;
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000687 reqsize = req->in.h.len;
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000688 fuse_copy_init(&cs, 1, req, iov, nr_segs);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000689 err = -EINVAL;
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000690 if (iov_length(iov, nr_segs) >= reqsize) {
Miklos Szeredie56818b2004-12-12 11:45:24 +0000691 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
692 if (!err)
693 err = fuse_copy_args(&cs, in->numargs, in->argpages,
694 (struct fuse_arg *) in->args, 0);
Miklos Szeredi43696432001-11-18 19:15:05 +0000695 }
Miklos Szeredie56818b2004-12-12 11:45:24 +0000696 fuse_copy_finish(&cs);
Miklos Szeredi43696432001-11-18 19:15:05 +0000697
Miklos Szeredie56818b2004-12-12 11:45:24 +0000698 spin_lock(&fuse_lock);
699 req->locked = 0;
700 if (!err && req->interrupted)
701 err = -ENOENT;
702 if (err) {
703 if (!req->interrupted)
704 req->out.h.error = -EIO;
705 request_end(fc, req);
706 return err;
707 }
708 if (!req->isreply)
709 request_end(fc, req);
710 else {
711 req->sent = 1;
712 list_add_tail(&req->list, &fc->processing);
713 spin_unlock(&fuse_lock);
714 }
715 return reqsize;
716
717 err_unlock:
718 spin_unlock(&fuse_lock);
719 return err;
Miklos Szeredi43696432001-11-18 19:15:05 +0000720}
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000721
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000722static ssize_t fuse_dev_read(struct file *file, char __user *buf,
723 size_t nbytes, loff_t *off)
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000724{
Miklos Szeredie56818b2004-12-12 11:45:24 +0000725 struct iovec iov;
726 iov.iov_len = nbytes;
727 iov.iov_base = buf;
728 return fuse_dev_readv(file, &iov, 1, off);
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000729}
730
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000731/* Look up request on processing list by unique ID */
Miklos Szeredi8d4d1b82005-07-22 11:58:47 +0000732static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000733{
734 struct list_head *entry;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000735
736 list_for_each(entry, &fc->processing) {
Miklos Szeredie56818b2004-12-12 11:45:24 +0000737 struct fuse_req *req;
738 req = list_entry(entry, struct fuse_req, list);
739 if (req->in.h.unique == unique)
740 return req;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000741 }
Miklos Szeredie56818b2004-12-12 11:45:24 +0000742 return NULL;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000743}
744
Miklos Szeredie56818b2004-12-12 11:45:24 +0000745static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
746 unsigned nbytes)
Miklos Szeredi83a07442004-11-30 18:25:20 +0000747{
Miklos Szeredie56818b2004-12-12 11:45:24 +0000748 unsigned reqsize = sizeof(struct fuse_out_header);
Miklos Szeredi039322d2004-12-01 18:39:12 +0000749
Miklos Szeredie56818b2004-12-12 11:45:24 +0000750 if (out->h.error)
751 return nbytes != reqsize ? -EINVAL : 0;
Miklos Szeredi039322d2004-12-01 18:39:12 +0000752
Miklos Szeredie56818b2004-12-12 11:45:24 +0000753 reqsize += len_args(out->numargs, out->args);
Miklos Szeredi83a07442004-11-30 18:25:20 +0000754
Miklos Szeredie56818b2004-12-12 11:45:24 +0000755 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
756 return -EINVAL;
757 else if (reqsize > nbytes) {
758 struct fuse_arg *lastarg = &out->args[out->numargs-1];
759 unsigned diffsize = reqsize - nbytes;
760 if (diffsize > lastarg->size)
Miklos Szeredi0e535082003-10-13 10:08:06 +0000761 return -EINVAL;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000762 lastarg->size -= diffsize;
Miklos Szeredi43696432001-11-18 19:15:05 +0000763 }
Miklos Szeredie56818b2004-12-12 11:45:24 +0000764 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
765 out->page_zeroing);
Miklos Szeredi43696432001-11-18 19:15:05 +0000766}
767
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000768/*
769 * Write a single reply to a request. First the header is copied from
770 * the write buffer. The request is then searched on the processing
771 * list by the unique ID found in the header. If found, then remove
772 * it from the list and copy the rest of the buffer to the request.
773 * The request is finished by calling request_end()
774 */
Miklos Szeredie56818b2004-12-12 11:45:24 +0000775static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
776 unsigned long nr_segs, loff_t *off)
Miklos Szeredida4e4862003-09-08 11:14:11 +0000777{
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000778 int err;
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000779 unsigned nbytes = iov_length(iov, nr_segs);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000780 struct fuse_req *req;
781 struct fuse_out_header oh;
782 struct fuse_copy_state cs;
783 struct fuse_conn *fc = fuse_get_conn(file);
784 if (!fc)
785 return -ENODEV;
Miklos Szeredia13d9002004-11-02 17:32:03 +0000786
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000787 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000788 if (nbytes < sizeof(struct fuse_out_header))
Miklos Szeredi0e535082003-10-13 10:08:06 +0000789 return -EINVAL;
Miklos Szeredida4e4862003-09-08 11:14:11 +0000790
Miklos Szeredie56818b2004-12-12 11:45:24 +0000791 err = fuse_copy_one(&cs, &oh, sizeof(oh));
792 if (err)
793 goto err_finish;
794 err = -EINVAL;
Miklos Szeredicdae16e2005-01-14 11:43:22 +0000795 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000796 oh.len != nbytes)
Miklos Szeredie56818b2004-12-12 11:45:24 +0000797 goto err_finish;
Miklos Szerediaa63b6b2004-12-03 13:24:35 +0000798
Miklos Szeredie56818b2004-12-12 11:45:24 +0000799 spin_lock(&fuse_lock);
800 req = request_find(fc, oh.unique);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000801 err = -EINVAL;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000802 if (!req)
803 goto err_unlock;
Miklos Szeredida4e4862003-09-08 11:14:11 +0000804
Miklos Szeredie56818b2004-12-12 11:45:24 +0000805 list_del_init(&req->list);
Miklos Szeredi0f62d722005-01-04 12:45:54 +0000806 if (req->interrupted) {
807 request_end(fc, req);
808 fuse_copy_finish(&cs);
809 return -ENOENT;
810 }
Miklos Szeredie56818b2004-12-12 11:45:24 +0000811 req->out.h = oh;
812 req->locked = 1;
813 cs.req = req;
814 spin_unlock(&fuse_lock);
815
816 err = copy_out_args(&cs, &req->out, nbytes);
817 fuse_copy_finish(&cs);
818
819 spin_lock(&fuse_lock);
820 req->locked = 0;
821 if (!err) {
822 if (req->interrupted)
823 err = -ENOENT;
Miklos Szeredie56818b2004-12-12 11:45:24 +0000824 } else if (!req->interrupted)
825 req->out.h.error = -EIO;
826 request_end(fc, req);
827
828 return err ? err : nbytes;
829
830 err_unlock:
831 spin_unlock(&fuse_lock);
832 err_finish:
833 fuse_copy_finish(&cs);
Miklos Szeredida4e4862003-09-08 11:14:11 +0000834 return err;
835}
Miklos Szerediaa63b6b2004-12-03 13:24:35 +0000836
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000837static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000838 size_t nbytes, loff_t *off)
839{
Miklos Szeredie56818b2004-12-12 11:45:24 +0000840 struct iovec iov;
841 iov.iov_len = nbytes;
842 iov.iov_base = (char __user *) buf;
843 return fuse_dev_writev(file, &iov, 1, off);
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000844}
845
Miklos Szeredi83a07442004-11-30 18:25:20 +0000846static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000847{
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000848 struct fuse_conn *fc = fuse_get_conn(file);
Miklos Szeredi83a07442004-11-30 18:25:20 +0000849 unsigned mask = POLLOUT | POLLWRNORM;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000850
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000851 if (!fc)
852 return -ENODEV;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000853
854 poll_wait(file, &fc->waitq, wait);
855
856 spin_lock(&fuse_lock);
857 if (!list_empty(&fc->pending))
858 mask |= POLLIN | POLLRDNORM;
859 spin_unlock(&fuse_lock);
860
861 return mask;
862}
863
Miklos Szeredi407e6a72005-03-25 12:19:43 +0000864/* Abort all requests on the given list (pending or processing) */
Miklos Szeredi43696432001-11-18 19:15:05 +0000865static void end_requests(struct fuse_conn *fc, struct list_head *head)
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000866{
Miklos Szeredic26c14d2004-04-09 17:48:32 +0000867 while (!list_empty(head)) {
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000868 struct fuse_req *req;
869 req = list_entry(head->next, struct fuse_req, list);
870 list_del_init(&req->list);
Miklos Szeredie56818b2004-12-12 11:45:24 +0000871 req->out.h.error = -ECONNABORTED;
872 request_end(fc, req);
873 spin_lock(&fuse_lock);
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000874 }
875}
876
877static int fuse_dev_release(struct inode *inode, struct file *file)
878{
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000879 struct fuse_conn *fc;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000880
881 spin_lock(&fuse_lock);
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000882 fc = file->private_data;
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000883 if (fc) {
Miklos Szeredi0111f9d2005-04-22 12:04:55 +0000884 fc->connected = 0;
Miklos Szeredi3d60e762004-11-11 14:44:04 +0000885 end_requests(fc, &fc->pending);
886 end_requests(fc, &fc->processing);
887 fuse_release_conn(fc);
888 }
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000889 spin_unlock(&fuse_lock);
890 return 0;
891}
892
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000893struct file_operations fuse_dev_operations = {
Miklos Szeredie8663f32004-01-13 15:33:12 +0000894 .owner = THIS_MODULE,
Miklos Szeredi092b50b2005-01-20 18:05:07 +0000895 .llseek = no_llseek,
Miklos Szeredie8663f32004-01-13 15:33:12 +0000896 .read = fuse_dev_read,
Miklos Szeredie56818b2004-12-12 11:45:24 +0000897 .readv = fuse_dev_readv,
Miklos Szeredie8663f32004-01-13 15:33:12 +0000898 .write = fuse_dev_write,
Miklos Szeredie56818b2004-12-12 11:45:24 +0000899 .writev = fuse_dev_writev,
Miklos Szeredie8663f32004-01-13 15:33:12 +0000900 .poll = fuse_dev_poll,
Miklos Szeredie8663f32004-01-13 15:33:12 +0000901 .release = fuse_dev_release,
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000902};
903
Miklos Szeredi162bcbb2004-11-29 23:43:44 +0000904static struct miscdevice fuse_miscdevice = {
905 .minor = FUSE_MINOR,
906 .name = "fuse",
907 .fops = &fuse_dev_operations,
908};
909
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000910int __init fuse_dev_init(void)
911{
Miklos Szeredi3f0005f2005-01-04 19:24:31 +0000912 int err = -ENOMEM;
Miklos Szeredi2a807ec2005-01-10 11:50:10 +0000913 fuse_req_cachep = kmem_cache_create("fuse_request",
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000914 sizeof(struct fuse_req),
915 0, 0, NULL, NULL);
916 if (!fuse_req_cachep)
Miklos Szeredi3f0005f2005-01-04 19:24:31 +0000917 goto out;
Miklos Szerediaa63b6b2004-12-03 13:24:35 +0000918
Miklos Szeredi162bcbb2004-11-29 23:43:44 +0000919 err = misc_register(&fuse_miscdevice);
920 if (err)
921 goto out_cache_clean;
922
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000923 return 0;
Miklos Szeredi162bcbb2004-11-29 23:43:44 +0000924
925 out_cache_clean:
926 kmem_cache_destroy(fuse_req_cachep);
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000927 out:
928 return err;
Miklos Szeredi85c74fc2001-10-28 19:44:14 +0000929}
930
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000931void fuse_dev_cleanup(void)
932{
Miklos Szeredi162bcbb2004-11-29 23:43:44 +0000933 misc_deregister(&fuse_miscdevice);
Miklos Szeredi13ed4822004-11-20 11:12:21 +0000934 kmem_cache_destroy(fuse_req_cachep);
935}