blob: 5da5ef616b85c8c34f4e676c1d5db4e3ed63d00b [file] [log] [blame]
Mike Marshall5db11c22015-07-17 10:38:12 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050011#include "orangefs-kernel.h"
12#include "orangefs-dev-proto.h"
13#include "orangefs-bufmap.h"
Mike Marshall5db11c22015-07-17 10:38:12 -040014
15#include <linux/debugfs.h>
16#include <linux/slab.h>
17
18/* this file implements the /dev/pvfs2-req device node */
19
20static int open_access_count;
21
22#define DUMP_DEVICE_ERROR() \
23do { \
24 gossip_err("*****************************************************\n");\
Yi Liu8bb8aef2015-11-24 15:12:14 -050025 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
Mike Marshall5db11c22015-07-17 10:38:12 -040026 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
Yi Liu8bb8aef2015-11-24 15:12:14 -050027 "are no ", ORANGEFS_REQDEVICE_NAME); \
Mike Marshall5db11c22015-07-17 10:38:12 -040028 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
Yi Liu8bb8aef2015-11-24 15:12:14 -050032 ORANGEFS_REQDEVICE_NAME); \
Mike Marshall5db11c22015-07-17 10:38:12 -040033 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35} while (0)
36
37static int hash_func(__u64 tag, int table_size)
38{
Mike Marshall2c590d52015-07-24 10:37:15 -040039 return do_div(tag, (unsigned int)table_size);
Mike Marshall5db11c22015-07-17 10:38:12 -040040}
41
Yi Liu8bb8aef2015-11-24 15:12:14 -050042static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
Mike Marshall5db11c22015-07-17 10:38:12 -040043{
44 int index = hash_func(op->tag, hash_table_size);
45
46 spin_lock(&htable_ops_in_progress_lock);
47 list_add_tail(&op->list, &htable_ops_in_progress[index]);
48 spin_unlock(&htable_ops_in_progress_lock);
49}
50
Yi Liu8bb8aef2015-11-24 15:12:14 -050051static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
Mike Marshall5db11c22015-07-17 10:38:12 -040052{
Yi Liu8bb8aef2015-11-24 15:12:14 -050053 struct orangefs_kernel_op_s *op, *next;
Mike Marshall5db11c22015-07-17 10:38:12 -040054 int index;
55
56 index = hash_func(tag, hash_table_size);
57
58 spin_lock(&htable_ops_in_progress_lock);
59 list_for_each_entry_safe(op,
60 next,
61 &htable_ops_in_progress[index],
62 list) {
63 if (op->tag == tag) {
64 list_del(&op->list);
65 spin_unlock(&htable_ops_in_progress_lock);
66 return op;
67 }
68 }
69
70 spin_unlock(&htable_ops_in_progress_lock);
71 return NULL;
72}
73
Yi Liu8bb8aef2015-11-24 15:12:14 -050074static int orangefs_devreq_open(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -040075{
76 int ret = -EINVAL;
77
78 if (!(file->f_flags & O_NONBLOCK)) {
Mike Marshall97f10022015-12-11 16:45:03 -050079 gossip_err("%s: device cannot be opened in blocking mode\n",
80 __func__);
Mike Marshall5db11c22015-07-17 10:38:12 -040081 goto out;
82 }
83 ret = -EACCES;
Mike Marshall97f10022015-12-11 16:45:03 -050084 gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
Mike Marshall5db11c22015-07-17 10:38:12 -040085 mutex_lock(&devreq_mutex);
86
87 if (open_access_count == 0) {
88 ret = generic_file_open(inode, file);
89 if (ret == 0)
90 open_access_count++;
91 } else {
92 DUMP_DEVICE_ERROR();
93 }
94 mutex_unlock(&devreq_mutex);
95
96out:
97
98 gossip_debug(GOSSIP_DEV_DEBUG,
99 "pvfs2-client-core: open device complete (ret = %d)\n",
100 ret);
101 return ret;
102}
103
Mike Marshall97f10022015-12-11 16:45:03 -0500104/* Function for read() callers into the device */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500105static ssize_t orangefs_devreq_read(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400106 char __user *buf,
107 size_t count, loff_t *offset)
108{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500109 struct orangefs_kernel_op_s *op, *temp;
110 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
111 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
112 struct orangefs_kernel_op_s *cur_op = NULL;
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500113 unsigned long ret;
Mike Marshall5db11c22015-07-17 10:38:12 -0400114
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500115 /* We do not support blocking IO. */
Mike Marshall5db11c22015-07-17 10:38:12 -0400116 if (!(file->f_flags & O_NONBLOCK)) {
Mike Marshall97f10022015-12-11 16:45:03 -0500117 gossip_err("%s: blocking read from client-core.\n",
118 __func__);
Mike Marshall5db11c22015-07-17 10:38:12 -0400119 return -EINVAL;
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500120 }
121
122 /*
Martin Brandenburga762ae62015-12-15 14:22:06 -0500123 * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500124 * always read with that size buffer.
125 */
Martin Brandenburga762ae62015-12-15 14:22:06 -0500126 if (count != MAX_DEV_REQ_UPSIZE) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500127 gossip_err("orangefs: client-core tried to read wrong size\n");
128 return -EINVAL;
129 }
130
131 /* Get next op (if any) from top of list. */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500132 spin_lock(&orangefs_request_list_lock);
133 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500134 __s32 fsid;
135 /* This lock is held past the end of the loop when we break. */
136 spin_lock(&op->lock);
137
138 fsid = fsid_of_op(op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500139 if (fsid != ORANGEFS_FS_ID_NULL) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500140 int ret;
141 /* Skip ops whose filesystem needs to be mounted. */
142 ret = fs_mount_pending(fsid);
143 if (ret == 1) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400144 gossip_debug(GOSSIP_DEV_DEBUG,
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500145 "orangefs: skipping op tag %llu %s\n",
146 llu(op->tag), get_opname_string(op));
147 spin_unlock(&op->lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400148 continue;
Mike Marshall97f10022015-12-11 16:45:03 -0500149 /*
150 * Skip ops whose filesystem we don't know about unless
151 * it is being mounted.
152 */
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500153 /* XXX: is there a better way to detect this? */
154 } else if (ret == -1 &&
Mike Marshall97f10022015-12-11 16:45:03 -0500155 !(op->upcall.type ==
156 ORANGEFS_VFS_OP_FS_MOUNT ||
157 op->upcall.type ==
158 ORANGEFS_VFS_OP_GETATTR)) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500159 gossip_debug(GOSSIP_DEV_DEBUG,
160 "orangefs: skipping op tag %llu %s\n",
161 llu(op->tag), get_opname_string(op));
162 gossip_err(
163 "orangefs: ERROR: fs_mount_pending %d\n",
164 fsid);
165 spin_unlock(&op->lock);
166 continue;
Mike Marshall5db11c22015-07-17 10:38:12 -0400167 }
168 }
Mike Marshall5db11c22015-07-17 10:38:12 -0400169 /*
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500170 * Either this op does not pertain to a filesystem, is mounting
171 * a filesystem, or pertains to a mounted filesystem. Let it
172 * through.
Mike Marshall5db11c22015-07-17 10:38:12 -0400173 */
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500174 cur_op = op;
175 break;
Mike Marshall5db11c22015-07-17 10:38:12 -0400176 }
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500177
178 /*
179 * At this point we either have a valid op and can continue or have not
180 * found an op and must ask the client to try again later.
181 */
182 if (!cur_op) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500183 spin_unlock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500184 return -EAGAIN;
185 }
186
187 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
188 llu(cur_op->tag), get_opname_string(cur_op));
189
190 /*
191 * Such an op should never be on the list in the first place. If so, we
192 * will abort.
193 */
194 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
195 gossip_err("orangefs: ERROR: Current op already queued.\n");
196 list_del(&cur_op->list);
197 spin_unlock(&cur_op->lock);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500198 spin_unlock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500199 return -EAGAIN;
200 }
201
202 /*
203 * Set the operation to be in progress and move it between lists since
204 * it has been sent to the client.
205 */
206 set_op_state_inprogress(cur_op);
207
208 list_del(&cur_op->list);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500209 spin_unlock(&orangefs_request_list_lock);
210 orangefs_devreq_add_op(cur_op);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500211 spin_unlock(&cur_op->lock);
212
213 /* Push the upcall out. */
214 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
215 if (ret != 0)
216 goto error;
217 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
218 if (ret != 0)
219 goto error;
220 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
221 if (ret != 0)
222 goto error;
223 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500224 sizeof(struct orangefs_upcall_s));
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500225 if (ret != 0)
226 goto error;
227
228 /* The client only asks to read one size buffer. */
Martin Brandenburga762ae62015-12-15 14:22:06 -0500229 return MAX_DEV_REQ_UPSIZE;
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500230error:
231 /*
232 * We were unable to copy the op data to the client. Put the op back in
233 * list. If client has crashed, the op will be purged later when the
234 * device is released.
235 */
236 gossip_err("orangefs: Failed to copy data to user space\n");
Yi Liu8bb8aef2015-11-24 15:12:14 -0500237 spin_lock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500238 spin_lock(&cur_op->lock);
239 set_op_state_waiting(cur_op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500240 orangefs_devreq_remove_op(cur_op->tag);
241 list_add(&cur_op->list, &orangefs_request_list);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500242 spin_unlock(&cur_op->lock);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500243 spin_unlock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500244 return -EFAULT;
Mike Marshall5db11c22015-07-17 10:38:12 -0400245}
246
Mike Marshall97f10022015-12-11 16:45:03 -0500247/*
Mike Marshallb3ae4752016-01-13 11:18:12 -0500248 * Function for writev() callers into the device.
249 *
250 * Userspace should have written:
251 * - __u32 version
252 * - __u32 magic
253 * - __u64 tag
254 * - struct orangefs_downcall_s
255 * - trailer buffer (in the case of READDIR operations)
Mike Marshall97f10022015-12-11 16:45:03 -0500256 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500257static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
Mike Marshall5db11c22015-07-17 10:38:12 -0400258 struct iov_iter *iter)
259{
Mike Marshallb3ae4752016-01-13 11:18:12 -0500260 ssize_t ret;
261 struct orangefs_kernel_op_s *op = NULL;
262 struct {
263 __u32 version;
264 __u32 magic;
265 __u64 tag;
266 } head;
267 int total = ret = iov_iter_count(iter);
268 int n;
269 int downcall_size = sizeof(struct orangefs_downcall_s);
270 int head_size = sizeof(head);
271
272 gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
273 __func__,
274 total,
275 ret);
276
277 if (total < MAX_DEV_REQ_DOWNSIZE) {
Mike Marshallcf0c2772016-01-19 12:04:40 -0500278 gossip_err("%s: total:%d: must be at least:%u:\n",
Mike Marshallb3ae4752016-01-13 11:18:12 -0500279 __func__,
280 total,
Mike Marshallcf0c2772016-01-19 12:04:40 -0500281 (unsigned int) MAX_DEV_REQ_DOWNSIZE);
Mike Marshallb3ae4752016-01-13 11:18:12 -0500282 ret = -EFAULT;
283 goto out;
284 }
285
286 n = copy_from_iter(&head, head_size, iter);
287 if (n < head_size) {
288 gossip_err("%s: failed to copy head.\n", __func__);
289 ret = -EFAULT;
290 goto out;
291 }
292
293 if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
294 gossip_err("%s: userspace claims version"
295 "%d, minimum version required: %d.\n",
296 __func__,
297 head.version,
298 ORANGEFS_MINIMUM_USERSPACE_VERSION);
299 ret = -EPROTO;
300 goto out;
301 }
302
303 if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
304 gossip_err("Error: Device magic number does not match.\n");
305 ret = -EPROTO;
306 goto out;
307 }
308
309 op = orangefs_devreq_remove_op(head.tag);
310 if (!op) {
311 gossip_err("WARNING: No one's waiting for tag %llu\n",
312 llu(head.tag));
313 goto out;
314 }
315
316 get_op(op); /* increase ref count. */
317
318 n = copy_from_iter(&op->downcall, downcall_size, iter);
319 if (n != downcall_size) {
320 gossip_err("%s: failed to copy downcall.\n", __func__);
321 put_op(op);
322 ret = -EFAULT;
323 goto out;
324 }
325
326 if (op->downcall.status)
327 goto wakeup;
328
329 /*
330 * We've successfully peeled off the head and the downcall.
331 * Something has gone awry if total doesn't equal the
332 * sum of head_size, downcall_size and trailer_size.
333 */
334 if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
335 gossip_err("%s: funky write, head_size:%d"
336 ": downcall_size:%d: trailer_size:%lld"
337 ": total size:%d:\n",
338 __func__,
339 head_size,
340 downcall_size,
341 op->downcall.trailer_size,
342 total);
343 put_op(op);
344 ret = -EFAULT;
345 goto out;
346 }
347
348 /* Only READDIR operations should have trailers. */
349 if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
350 (op->downcall.trailer_size != 0)) {
351 gossip_err("%s: %x operation with trailer.",
352 __func__,
353 op->downcall.type);
354 put_op(op);
355 ret = -EFAULT;
356 goto out;
357 }
358
359 /* READDIR operations should always have trailers. */
360 if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
361 (op->downcall.trailer_size == 0)) {
362 gossip_err("%s: %x operation with no trailer.",
363 __func__,
364 op->downcall.type);
365 put_op(op);
366 ret = -EFAULT;
367 goto out;
368 }
369
370 if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
371 goto wakeup;
372
373 op->downcall.trailer_buf =
374 vmalloc(op->downcall.trailer_size);
375 if (op->downcall.trailer_buf == NULL) {
376 gossip_err("%s: failed trailer vmalloc.\n",
377 __func__);
378 put_op(op);
379 ret = -ENOMEM;
380 goto out;
381 }
382 memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
383 n = copy_from_iter(op->downcall.trailer_buf,
384 op->downcall.trailer_size,
385 iter);
386 if (n != op->downcall.trailer_size) {
387 gossip_err("%s: failed to copy trailer.\n", __func__);
388 vfree(op->downcall.trailer_buf);
389 put_op(op);
390 ret = -EFAULT;
391 goto out;
392 }
393
394wakeup:
395
396 /*
397 * If this operation is an I/O operation we need to wait
398 * for all data to be copied before we can return to avoid
399 * buffer corruption and races that can pull the buffers
400 * out from under us.
401 *
402 * Essentially we're synchronizing with other parts of the
403 * vfs implicitly by not allowing the user space
404 * application reading/writing this device to return until
405 * the buffers are done being used.
406 */
407 if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
408 int timed_out = 0;
409 DEFINE_WAIT(wait_entry);
410
411 /*
412 * tell the vfs op waiting on a waitqueue
413 * that this op is done
414 */
415 spin_lock(&op->lock);
416 set_op_state_serviced(op);
417 spin_unlock(&op->lock);
418
419 wake_up_interruptible(&op->waitq);
420
421 while (1) {
422 spin_lock(&op->lock);
423 prepare_to_wait_exclusive(
424 &op->io_completion_waitq,
425 &wait_entry,
426 TASK_INTERRUPTIBLE);
427 if (op->io_completed) {
428 spin_unlock(&op->lock);
429 break;
430 }
431 spin_unlock(&op->lock);
432
433 if (!signal_pending(current)) {
434 int timeout =
435 MSECS_TO_JIFFIES(1000 *
436 op_timeout_secs);
437 if (!schedule_timeout(timeout)) {
438 gossip_debug(GOSSIP_DEV_DEBUG,
439 "%s: timed out.\n",
440 __func__);
441 timed_out = 1;
442 break;
443 }
444 continue;
445 }
446
447 gossip_debug(GOSSIP_DEV_DEBUG,
448 "%s: signal on I/O wait, aborting\n",
449 __func__);
450 break;
451 }
452
453 spin_lock(&op->lock);
454 finish_wait(&op->io_completion_waitq, &wait_entry);
455 spin_unlock(&op->lock);
456
457 /* NOTE: for I/O operations we handle releasing the op
458 * object except in the case of timeout. the reason we
459 * can't free the op in timeout cases is that the op
460 * service logic in the vfs retries operations using
461 * the same op ptr, thus it can't be freed.
462 */
463 if (!timed_out)
464 op_release(op);
465 } else {
466 /*
467 * tell the vfs op waiting on a waitqueue that
468 * this op is done
469 */
470 spin_lock(&op->lock);
471 set_op_state_serviced(op);
472 spin_unlock(&op->lock);
473 /*
474 * for every other operation (i.e. non-I/O), we need to
475 * wake up the callers for downcall completion
476 * notification
477 */
478 wake_up_interruptible(&op->waitq);
479 }
480out:
481 return ret;
Mike Marshall5db11c22015-07-17 10:38:12 -0400482}
483
484/* Returns whether any FS are still pending remounted */
485static int mark_all_pending_mounts(void)
486{
487 int unmounted = 1;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500488 struct orangefs_sb_info_s *orangefs_sb = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400489
Yi Liu8bb8aef2015-11-24 15:12:14 -0500490 spin_lock(&orangefs_superblocks_lock);
491 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400492 /* All of these file system require a remount */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500493 orangefs_sb->mount_pending = 1;
Mike Marshall5db11c22015-07-17 10:38:12 -0400494 unmounted = 0;
495 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500496 spin_unlock(&orangefs_superblocks_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400497 return unmounted;
498}
499
500/*
501 * Determine if a given file system needs to be remounted or not
502 * Returns -1 on error
503 * 0 if already mounted
504 * 1 if needs remount
505 */
506int fs_mount_pending(__s32 fsid)
507{
508 int mount_pending = -1;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500509 struct orangefs_sb_info_s *orangefs_sb = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400510
Yi Liu8bb8aef2015-11-24 15:12:14 -0500511 spin_lock(&orangefs_superblocks_lock);
512 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
513 if (orangefs_sb->fs_id == fsid) {
514 mount_pending = orangefs_sb->mount_pending;
Mike Marshall5db11c22015-07-17 10:38:12 -0400515 break;
516 }
517 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500518 spin_unlock(&orangefs_superblocks_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400519 return mount_pending;
520}
521
522/*
523 * NOTE: gets called when the last reference to this device is dropped.
524 * Using the open_access_count variable, we enforce a reference count
525 * on this file so that it can be opened by only one process at a time.
526 * the devreq_mutex is used to make sure all i/o has completed
Yi Liu8bb8aef2015-11-24 15:12:14 -0500527 * before we call orangefs_bufmap_finalize, and similar such tricky
Mike Marshall5db11c22015-07-17 10:38:12 -0400528 * situations
529 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500530static int orangefs_devreq_release(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -0400531{
532 int unmounted = 0;
533
534 gossip_debug(GOSSIP_DEV_DEBUG,
535 "%s:pvfs2-client-core: exiting, closing device\n",
536 __func__);
537
538 mutex_lock(&devreq_mutex);
Martin Brandenburg7d221482016-01-04 15:05:28 -0500539 if (orangefs_get_bufmap_init())
Martin Brandenburg90d26aa2015-12-14 15:26:38 -0500540 orangefs_bufmap_finalize();
Mike Marshall5db11c22015-07-17 10:38:12 -0400541
542 open_access_count--;
543
544 unmounted = mark_all_pending_mounts();
Yi Liu8bb8aef2015-11-24 15:12:14 -0500545 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400546 (unmounted ? "UNMOUNTED" : "MOUNTED"));
547 mutex_unlock(&devreq_mutex);
548
549 /*
550 * Walk through the list of ops in the request list, mark them
551 * as purged and wake them up.
552 */
553 purge_waiting_ops();
554 /*
555 * Walk through the hash table of in progress operations; mark
556 * them as purged and wake them up
557 */
558 purge_inprogress_ops();
559 gossip_debug(GOSSIP_DEV_DEBUG,
560 "pvfs2-client-core: device close complete\n");
561 return 0;
562}
563
564int is_daemon_in_service(void)
565{
566 int in_service;
567
568 /*
569 * What this function does is checks if client-core is alive
570 * based on the access count we maintain on the device.
571 */
572 mutex_lock(&devreq_mutex);
573 in_service = open_access_count == 1 ? 0 : -EIO;
574 mutex_unlock(&devreq_mutex);
575 return in_service;
576}
577
578static inline long check_ioctl_command(unsigned int command)
579{
580 /* Check for valid ioctl codes */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500581 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400582 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
583 command,
584 _IOC_TYPE(command),
Yi Liu8bb8aef2015-11-24 15:12:14 -0500585 ORANGEFS_DEV_MAGIC);
Mike Marshall5db11c22015-07-17 10:38:12 -0400586 return -EINVAL;
587 }
588 /* and valid ioctl commands */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500589 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400590 gossip_err("Invalid ioctl command number [%d >= %d]\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500591 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
Mike Marshall5db11c22015-07-17 10:38:12 -0400592 return -ENOIOCTLCMD;
593 }
594 return 0;
595}
596
597static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
598{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500599 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
Martin Brandenburga762ae62015-12-15 14:22:06 -0500600 static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
601 static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500602 struct ORANGEFS_dev_map_desc user_desc;
Mike Marshall5db11c22015-07-17 10:38:12 -0400603 int ret = 0;
604 struct dev_mask_info_s mask_info = { 0 };
605 struct dev_mask2_info_s mask2_info = { 0, 0 };
606 int upstream_kmod = 1;
607 struct list_head *tmp = NULL;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500608 struct orangefs_sb_info_s *orangefs_sb = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400609
610 /* mtmoore: add locking here */
611
612 switch (command) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500613 case ORANGEFS_DEV_GET_MAGIC:
Mike Marshall5db11c22015-07-17 10:38:12 -0400614 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
615 -EIO :
616 0);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500617 case ORANGEFS_DEV_GET_MAX_UPSIZE:
Mike Marshall5db11c22015-07-17 10:38:12 -0400618 return ((put_user(max_up_size,
619 (__s32 __user *) arg) == -EFAULT) ?
620 -EIO :
621 0);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500622 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
Mike Marshall5db11c22015-07-17 10:38:12 -0400623 return ((put_user(max_down_size,
624 (__s32 __user *) arg) == -EFAULT) ?
625 -EIO :
626 0);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500627 case ORANGEFS_DEV_MAP:
Mike Marshall5db11c22015-07-17 10:38:12 -0400628 ret = copy_from_user(&user_desc,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500629 (struct ORANGEFS_dev_map_desc __user *)
Mike Marshall5db11c22015-07-17 10:38:12 -0400630 arg,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500631 sizeof(struct ORANGEFS_dev_map_desc));
Martin Brandenburg7d221482016-01-04 15:05:28 -0500632 if (orangefs_get_bufmap_init()) {
Martin Brandenburg90d26aa2015-12-14 15:26:38 -0500633 return -EINVAL;
634 } else {
635 return ret ?
636 -EIO :
637 orangefs_bufmap_initialize(&user_desc);
638 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500639 case ORANGEFS_DEV_REMOUNT_ALL:
Mike Marshall5db11c22015-07-17 10:38:12 -0400640 gossip_debug(GOSSIP_DEV_DEBUG,
Mike Marshall97f10022015-12-11 16:45:03 -0500641 "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
642 __func__);
Mike Marshall5db11c22015-07-17 10:38:12 -0400643
644 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500645 * remount all mounted orangefs volumes to regain the lost
Mike Marshall5db11c22015-07-17 10:38:12 -0400646 * dynamic mount tables (if any) -- NOTE: this is done
647 * without keeping the superblock list locked due to the
648 * upcall/downcall waiting. also, the request semaphore is
649 * used to ensure that no operations will be serviced until
650 * all of the remounts are serviced (to avoid ops between
651 * mounts to fail)
652 */
653 ret = mutex_lock_interruptible(&request_mutex);
654 if (ret < 0)
655 return ret;
656 gossip_debug(GOSSIP_DEV_DEBUG,
Mike Marshall97f10022015-12-11 16:45:03 -0500657 "%s: priority remount in progress\n",
658 __func__);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500659 list_for_each(tmp, &orangefs_superblocks) {
660 orangefs_sb =
Mike Marshall97f10022015-12-11 16:45:03 -0500661 list_entry(tmp,
662 struct orangefs_sb_info_s,
663 list);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500664 if (orangefs_sb && (orangefs_sb->sb)) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400665 gossip_debug(GOSSIP_DEV_DEBUG,
Mike Marshall97f10022015-12-11 16:45:03 -0500666 "%s: Remounting SB %p\n",
667 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500668 orangefs_sb);
Mike Marshall5db11c22015-07-17 10:38:12 -0400669
Yi Liu8bb8aef2015-11-24 15:12:14 -0500670 ret = orangefs_remount(orangefs_sb->sb);
Mike Marshall5db11c22015-07-17 10:38:12 -0400671 if (ret) {
672 gossip_debug(GOSSIP_DEV_DEBUG,
673 "SB %p remount failed\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500674 orangefs_sb);
Mike Marshall97f10022015-12-11 16:45:03 -0500675 break;
Mike Marshall5db11c22015-07-17 10:38:12 -0400676 }
677 }
678 }
679 gossip_debug(GOSSIP_DEV_DEBUG,
Mike Marshall97f10022015-12-11 16:45:03 -0500680 "%s: priority remount complete\n",
681 __func__);
Mike Marshall5db11c22015-07-17 10:38:12 -0400682 mutex_unlock(&request_mutex);
683 return ret;
684
Yi Liu8bb8aef2015-11-24 15:12:14 -0500685 case ORANGEFS_DEV_UPSTREAM:
Mike Marshall5db11c22015-07-17 10:38:12 -0400686 ret = copy_to_user((void __user *)arg,
687 &upstream_kmod,
688 sizeof(upstream_kmod));
689
690 if (ret != 0)
691 return -EIO;
692 else
693 return ret;
694
Yi Liu8bb8aef2015-11-24 15:12:14 -0500695 case ORANGEFS_DEV_CLIENT_MASK:
Mike Marshall5db11c22015-07-17 10:38:12 -0400696 ret = copy_from_user(&mask2_info,
697 (void __user *)arg,
698 sizeof(struct dev_mask2_info_s));
699
700 if (ret != 0)
701 return -EIO;
702
703 client_debug_mask.mask1 = mask2_info.mask1_value;
704 client_debug_mask.mask2 = mask2_info.mask2_value;
705
706 pr_info("%s: client debug mask has been been received "
707 ":%llx: :%llx:\n",
708 __func__,
709 (unsigned long long)client_debug_mask.mask1,
710 (unsigned long long)client_debug_mask.mask2);
711
712 return ret;
713
Yi Liu8bb8aef2015-11-24 15:12:14 -0500714 case ORANGEFS_DEV_CLIENT_STRING:
Mike Marshall5db11c22015-07-17 10:38:12 -0400715 ret = copy_from_user(&client_debug_array_string,
716 (void __user *)arg,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500717 ORANGEFS_MAX_DEBUG_STRING_LEN);
Mike Marshall5db11c22015-07-17 10:38:12 -0400718 if (ret != 0) {
Mike Marshall97f10022015-12-11 16:45:03 -0500719 pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400720 __func__);
721 return -EIO;
722 }
723
Mike Marshall97f10022015-12-11 16:45:03 -0500724 pr_info("%s: client debug array string has been received.\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400725 __func__);
726
727 if (!help_string_initialized) {
728
729 /* Free the "we don't know yet" default string... */
730 kfree(debug_help_string);
731
732 /* build a proper debug help string */
733 if (orangefs_prepare_debugfs_help_string(0)) {
Mike Marshall97f10022015-12-11 16:45:03 -0500734 gossip_err("%s: no debug help string \n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400735 __func__);
736 return -EIO;
737 }
738
739 /* Replace the boilerplate boot-time debug-help file. */
740 debugfs_remove(help_file_dentry);
741
742 help_file_dentry =
743 debugfs_create_file(
744 ORANGEFS_KMOD_DEBUG_HELP_FILE,
745 0444,
746 debug_dir,
747 debug_help_string,
748 &debug_help_fops);
749
750 if (!help_file_dentry) {
751 gossip_err("%s: debugfs_create_file failed for"
752 " :%s:!\n",
753 __func__,
754 ORANGEFS_KMOD_DEBUG_HELP_FILE);
755 return -EIO;
756 }
757 }
758
759 debug_mask_to_string(&client_debug_mask, 1);
760
761 debugfs_remove(client_debug_dentry);
762
Yi Liu8bb8aef2015-11-24 15:12:14 -0500763 orangefs_client_debug_init();
Mike Marshall5db11c22015-07-17 10:38:12 -0400764
765 help_string_initialized++;
766
767 return ret;
768
Yi Liu8bb8aef2015-11-24 15:12:14 -0500769 case ORANGEFS_DEV_DEBUG:
Mike Marshall5db11c22015-07-17 10:38:12 -0400770 ret = copy_from_user(&mask_info,
771 (void __user *)arg,
772 sizeof(mask_info));
773
774 if (ret != 0)
775 return -EIO;
776
777 if (mask_info.mask_type == KERNEL_MASK) {
778 if ((mask_info.mask_value == 0)
779 && (kernel_mask_set_mod_init)) {
780 /*
781 * the kernel debug mask was set when the
782 * kernel module was loaded; don't override
783 * it if the client-core was started without
Yi Liu8bb8aef2015-11-24 15:12:14 -0500784 * a value for ORANGEFS_KMODMASK.
Mike Marshall5db11c22015-07-17 10:38:12 -0400785 */
786 return 0;
787 }
788 debug_mask_to_string(&mask_info.mask_value,
789 mask_info.mask_type);
790 gossip_debug_mask = mask_info.mask_value;
Mike Marshall97f10022015-12-11 16:45:03 -0500791 pr_info("%s: kernel debug mask has been modified to "
Mike Marshall5db11c22015-07-17 10:38:12 -0400792 ":%s: :%llx:\n",
Mike Marshall97f10022015-12-11 16:45:03 -0500793 __func__,
Mike Marshall5db11c22015-07-17 10:38:12 -0400794 kernel_debug_string,
795 (unsigned long long)gossip_debug_mask);
796 } else if (mask_info.mask_type == CLIENT_MASK) {
797 debug_mask_to_string(&mask_info.mask_value,
798 mask_info.mask_type);
Mike Marshall97f10022015-12-11 16:45:03 -0500799 pr_info("%s: client debug mask has been modified to"
Mike Marshall5db11c22015-07-17 10:38:12 -0400800 ":%s: :%llx:\n",
Mike Marshall97f10022015-12-11 16:45:03 -0500801 __func__,
Mike Marshall5db11c22015-07-17 10:38:12 -0400802 client_debug_string,
803 llu(mask_info.mask_value));
804 } else {
805 gossip_lerr("Invalid mask type....\n");
806 return -EINVAL;
807 }
808
809 return ret;
810
811 default:
812 return -ENOIOCTLCMD;
813 }
814 return -ENOIOCTLCMD;
815}
816
Yi Liu8bb8aef2015-11-24 15:12:14 -0500817static long orangefs_devreq_ioctl(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400818 unsigned int command, unsigned long arg)
819{
820 long ret;
821
822 /* Check for properly constructed commands */
823 ret = check_ioctl_command(command);
824 if (ret < 0)
825 return (int)ret;
826
827 return (int)dispatch_ioctl_command(command, arg);
828}
829
830#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
831
Yi Liu8bb8aef2015-11-24 15:12:14 -0500832/* Compat structure for the ORANGEFS_DEV_MAP ioctl */
833struct ORANGEFS_dev_map_desc32 {
Mike Marshall5db11c22015-07-17 10:38:12 -0400834 compat_uptr_t ptr;
835 __s32 total_size;
836 __s32 size;
837 __s32 count;
838};
839
840static unsigned long translate_dev_map26(unsigned long args, long *error)
841{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500842 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
Mike Marshall5db11c22015-07-17 10:38:12 -0400843 /*
844 * Depending on the architecture, allocate some space on the
845 * user-call-stack based on our expected layout.
846 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500847 struct ORANGEFS_dev_map_desc __user *p =
Mike Marshall5db11c22015-07-17 10:38:12 -0400848 compat_alloc_user_space(sizeof(*p));
Mike Marshall84d02152015-07-28 13:27:51 -0400849 compat_uptr_t addr;
Mike Marshall5db11c22015-07-17 10:38:12 -0400850
851 *error = 0;
852 /* get the ptr from the 32 bit user-space */
853 if (get_user(addr, &p32->ptr))
854 goto err;
855 /* try to put that into a 64-bit layout */
856 if (put_user(compat_ptr(addr), &p->ptr))
857 goto err;
858 /* copy the remaining fields */
859 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
860 goto err;
861 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
862 goto err;
863 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
864 goto err;
865 return (unsigned long)p;
866err:
867 *error = -EFAULT;
868 return 0;
869}
870
871/*
872 * 32 bit user-space apps' ioctl handlers when kernel modules
873 * is compiled as a 64 bit one
874 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500875static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
Mike Marshall5db11c22015-07-17 10:38:12 -0400876 unsigned long args)
877{
878 long ret;
879 unsigned long arg = args;
880
881 /* Check for properly constructed commands */
882 ret = check_ioctl_command(cmd);
883 if (ret < 0)
884 return ret;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500885 if (cmd == ORANGEFS_DEV_MAP) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400886 /*
887 * convert the arguments to what we expect internally
888 * in kernel space
889 */
890 arg = translate_dev_map26(args, &ret);
891 if (ret < 0) {
892 gossip_err("Could not translate dev map\n");
893 return ret;
894 }
895 }
896 /* no other ioctl requires translation */
897 return dispatch_ioctl_command(cmd, arg);
898}
899
Mike Marshall2c590d52015-07-24 10:37:15 -0400900#endif /* CONFIG_COMPAT is in .config */
901
902/*
903 * The following two ioctl32 functions had been refactored into the above
904 * CONFIG_COMPAT ifdef, but that was an over simplification that was
905 * not noticed until we tried to compile on power pc...
906 */
907#if (defined(CONFIG_COMPAT) && !defined(HAVE_REGISTER_IOCTL32_CONVERSION)) || !defined(CONFIG_COMPAT)
Yi Liu8bb8aef2015-11-24 15:12:14 -0500908static int orangefs_ioctl32_init(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400909{
910 return 0;
911}
912
Yi Liu8bb8aef2015-11-24 15:12:14 -0500913static void orangefs_ioctl32_cleanup(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400914{
915 return;
916}
Mike Marshall2c590d52015-07-24 10:37:15 -0400917#endif
Mike Marshall5db11c22015-07-17 10:38:12 -0400918
919/* the assigned character device major number */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500920static int orangefs_dev_major;
Mike Marshall5db11c22015-07-17 10:38:12 -0400921
922/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500923 * Initialize orangefs device specific state:
Mike Marshall5db11c22015-07-17 10:38:12 -0400924 * Must be called at module load time only
925 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500926int orangefs_dev_init(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400927{
928 int ret;
929
930 /* register the ioctl32 sub-system */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500931 ret = orangefs_ioctl32_init();
Mike Marshall5db11c22015-07-17 10:38:12 -0400932 if (ret < 0)
933 return ret;
934
Yi Liu8bb8aef2015-11-24 15:12:14 -0500935 /* register orangefs-req device */
936 orangefs_dev_major = register_chrdev(0,
937 ORANGEFS_REQDEVICE_NAME,
938 &orangefs_devreq_file_operations);
939 if (orangefs_dev_major < 0) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400940 gossip_debug(GOSSIP_DEV_DEBUG,
941 "Failed to register /dev/%s (error %d)\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500942 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
943 orangefs_ioctl32_cleanup();
944 return orangefs_dev_major;
Mike Marshall5db11c22015-07-17 10:38:12 -0400945 }
946
947 gossip_debug(GOSSIP_DEV_DEBUG,
948 "*** /dev/%s character device registered ***\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500949 ORANGEFS_REQDEVICE_NAME);
Mike Marshall5db11c22015-07-17 10:38:12 -0400950 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500951 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
Mike Marshall5db11c22015-07-17 10:38:12 -0400952 return 0;
953}
954
Yi Liu8bb8aef2015-11-24 15:12:14 -0500955void orangefs_dev_cleanup(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400956{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500957 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
Mike Marshall5db11c22015-07-17 10:38:12 -0400958 gossip_debug(GOSSIP_DEV_DEBUG,
959 "*** /dev/%s character device unregistered ***\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500960 ORANGEFS_REQDEVICE_NAME);
Mike Marshall5db11c22015-07-17 10:38:12 -0400961 /* unregister the ioctl32 sub-system */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500962 orangefs_ioctl32_cleanup();
Mike Marshall5db11c22015-07-17 10:38:12 -0400963}
964
Yi Liu8bb8aef2015-11-24 15:12:14 -0500965static unsigned int orangefs_devreq_poll(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400966 struct poll_table_struct *poll_table)
967{
968 int poll_revent_mask = 0;
969
970 if (open_access_count == 1) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500971 poll_wait(file, &orangefs_request_list_waitq, poll_table);
Mike Marshall5db11c22015-07-17 10:38:12 -0400972
Yi Liu8bb8aef2015-11-24 15:12:14 -0500973 spin_lock(&orangefs_request_list_lock);
974 if (!list_empty(&orangefs_request_list))
Mike Marshall5db11c22015-07-17 10:38:12 -0400975 poll_revent_mask |= POLL_IN;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500976 spin_unlock(&orangefs_request_list_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400977 }
978 return poll_revent_mask;
979}
980
Yi Liu8bb8aef2015-11-24 15:12:14 -0500981const struct file_operations orangefs_devreq_file_operations = {
Mike Marshall5db11c22015-07-17 10:38:12 -0400982 .owner = THIS_MODULE,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500983 .read = orangefs_devreq_read,
984 .write_iter = orangefs_devreq_write_iter,
985 .open = orangefs_devreq_open,
986 .release = orangefs_devreq_release,
987 .unlocked_ioctl = orangefs_devreq_ioctl,
Mike Marshall5db11c22015-07-17 10:38:12 -0400988
989#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500990 .compat_ioctl = orangefs_devreq_compat_ioctl,
Mike Marshall5db11c22015-07-17 10:38:12 -0400991#endif
Yi Liu8bb8aef2015-11-24 15:12:14 -0500992 .poll = orangefs_devreq_poll
Mike Marshall5db11c22015-07-17 10:38:12 -0400993};