blob: e18149f0975b8e09387df16cb2fa567b139dbe6c [file] [log] [blame]
Mike Marshall5db11c22015-07-17 10:38:12 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * Changes by Acxiom Corporation to add protocol version to kernel
5 * communication, Copyright Acxiom Corporation, 2005.
6 *
7 * See COPYING in top-level directory.
8 */
9
10#include "protocol.h"
11#include "pvfs2-kernel.h"
12#include "pvfs2-dev-proto.h"
13#include "pvfs2-bufmap.h"
14
15#include <linux/debugfs.h>
16#include <linux/slab.h>
17
18/* this file implements the /dev/pvfs2-req device node */
19
20static int open_access_count;
21
22#define DUMP_DEVICE_ERROR() \
23do { \
24 gossip_err("*****************************************************\n");\
Yi Liu8bb8aef2015-11-24 15:12:14 -050025 gossip_err("ORANGEFS Device Error: You cannot open the device file "); \
Mike Marshall5db11c22015-07-17 10:38:12 -040026 gossip_err("\n/dev/%s more than once. Please make sure that\nthere " \
Yi Liu8bb8aef2015-11-24 15:12:14 -050027 "are no ", ORANGEFS_REQDEVICE_NAME); \
Mike Marshall5db11c22015-07-17 10:38:12 -040028 gossip_err("instances of a program using this device\ncurrently " \
29 "running. (You must verify this!)\n"); \
30 gossip_err("For example, you can use the lsof program as follows:\n");\
31 gossip_err("'lsof | grep %s' (run this as root)\n", \
Yi Liu8bb8aef2015-11-24 15:12:14 -050032 ORANGEFS_REQDEVICE_NAME); \
Mike Marshall5db11c22015-07-17 10:38:12 -040033 gossip_err(" open_access_count = %d\n", open_access_count); \
34 gossip_err("*****************************************************\n");\
35} while (0)
36
37static int hash_func(__u64 tag, int table_size)
38{
Mike Marshall2c590d52015-07-24 10:37:15 -040039 return do_div(tag, (unsigned int)table_size);
Mike Marshall5db11c22015-07-17 10:38:12 -040040}
41
Yi Liu8bb8aef2015-11-24 15:12:14 -050042static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
Mike Marshall5db11c22015-07-17 10:38:12 -040043{
44 int index = hash_func(op->tag, hash_table_size);
45
46 spin_lock(&htable_ops_in_progress_lock);
47 list_add_tail(&op->list, &htable_ops_in_progress[index]);
48 spin_unlock(&htable_ops_in_progress_lock);
49}
50
Yi Liu8bb8aef2015-11-24 15:12:14 -050051static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
Mike Marshall5db11c22015-07-17 10:38:12 -040052{
Yi Liu8bb8aef2015-11-24 15:12:14 -050053 struct orangefs_kernel_op_s *op, *next;
Mike Marshall5db11c22015-07-17 10:38:12 -040054 int index;
55
56 index = hash_func(tag, hash_table_size);
57
58 spin_lock(&htable_ops_in_progress_lock);
59 list_for_each_entry_safe(op,
60 next,
61 &htable_ops_in_progress[index],
62 list) {
63 if (op->tag == tag) {
64 list_del(&op->list);
65 spin_unlock(&htable_ops_in_progress_lock);
66 return op;
67 }
68 }
69
70 spin_unlock(&htable_ops_in_progress_lock);
71 return NULL;
72}
73
Yi Liu8bb8aef2015-11-24 15:12:14 -050074static int orangefs_devreq_open(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -040075{
76 int ret = -EINVAL;
77
78 if (!(file->f_flags & O_NONBLOCK)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -050079 gossip_err("orangefs: device cannot be opened in blocking mode\n");
Mike Marshall5db11c22015-07-17 10:38:12 -040080 goto out;
81 }
82 ret = -EACCES;
83 gossip_debug(GOSSIP_DEV_DEBUG, "pvfs2-client-core: opening device\n");
84 mutex_lock(&devreq_mutex);
85
86 if (open_access_count == 0) {
87 ret = generic_file_open(inode, file);
88 if (ret == 0)
89 open_access_count++;
90 } else {
91 DUMP_DEVICE_ERROR();
92 }
93 mutex_unlock(&devreq_mutex);
94
95out:
96
97 gossip_debug(GOSSIP_DEV_DEBUG,
98 "pvfs2-client-core: open device complete (ret = %d)\n",
99 ret);
100 return ret;
101}
102
Yi Liu8bb8aef2015-11-24 15:12:14 -0500103static ssize_t orangefs_devreq_read(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400104 char __user *buf,
105 size_t count, loff_t *offset)
106{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500107 struct orangefs_kernel_op_s *op, *temp;
108 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
109 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
110 struct orangefs_kernel_op_s *cur_op = NULL;
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500111 unsigned long ret;
Mike Marshall5db11c22015-07-17 10:38:12 -0400112
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500113 /* We do not support blocking IO. */
Mike Marshall5db11c22015-07-17 10:38:12 -0400114 if (!(file->f_flags & O_NONBLOCK)) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500115 gossip_err("orangefs: blocking reads are not supported! (pvfs2-client-core bug)\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400116 return -EINVAL;
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500117 }
118
119 /*
120 * The client will do an ioctl to find MAX_ALIGNED_DEV_REQ_UPSIZE, then
121 * always read with that size buffer.
122 */
123 if (count != MAX_ALIGNED_DEV_REQ_UPSIZE) {
124 gossip_err("orangefs: client-core tried to read wrong size\n");
125 return -EINVAL;
126 }
127
128 /* Get next op (if any) from top of list. */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500129 spin_lock(&orangefs_request_list_lock);
130 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500131 __s32 fsid;
132 /* This lock is held past the end of the loop when we break. */
133 spin_lock(&op->lock);
134
135 fsid = fsid_of_op(op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500136 if (fsid != ORANGEFS_FS_ID_NULL) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500137 int ret;
138 /* Skip ops whose filesystem needs to be mounted. */
139 ret = fs_mount_pending(fsid);
140 if (ret == 1) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400141 gossip_debug(GOSSIP_DEV_DEBUG,
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500142 "orangefs: skipping op tag %llu %s\n",
143 llu(op->tag), get_opname_string(op));
144 spin_unlock(&op->lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400145 continue;
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500146 /* Skip ops whose filesystem we don't know about unless
147 * it is being mounted. */
148 /* XXX: is there a better way to detect this? */
149 } else if (ret == -1 &&
Yi Liu8bb8aef2015-11-24 15:12:14 -0500150 !(op->upcall.type == ORANGEFS_VFS_OP_FS_MOUNT ||
151 op->upcall.type == ORANGEFS_VFS_OP_GETATTR)) {
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500152 gossip_debug(GOSSIP_DEV_DEBUG,
153 "orangefs: skipping op tag %llu %s\n",
154 llu(op->tag), get_opname_string(op));
155 gossip_err(
156 "orangefs: ERROR: fs_mount_pending %d\n",
157 fsid);
158 spin_unlock(&op->lock);
159 continue;
Mike Marshall5db11c22015-07-17 10:38:12 -0400160 }
161 }
Mike Marshall5db11c22015-07-17 10:38:12 -0400162 /*
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500163 * Either this op does not pertain to a filesystem, is mounting
164 * a filesystem, or pertains to a mounted filesystem. Let it
165 * through.
Mike Marshall5db11c22015-07-17 10:38:12 -0400166 */
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500167 cur_op = op;
168 break;
Mike Marshall5db11c22015-07-17 10:38:12 -0400169 }
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500170
171 /*
172 * At this point we either have a valid op and can continue or have not
173 * found an op and must ask the client to try again later.
174 */
175 if (!cur_op) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500176 spin_unlock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500177 return -EAGAIN;
178 }
179
180 gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
181 llu(cur_op->tag), get_opname_string(cur_op));
182
183 /*
184 * Such an op should never be on the list in the first place. If so, we
185 * will abort.
186 */
187 if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
188 gossip_err("orangefs: ERROR: Current op already queued.\n");
189 list_del(&cur_op->list);
190 spin_unlock(&cur_op->lock);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500191 spin_unlock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500192 return -EAGAIN;
193 }
194
195 /*
196 * Set the operation to be in progress and move it between lists since
197 * it has been sent to the client.
198 */
199 set_op_state_inprogress(cur_op);
200
201 list_del(&cur_op->list);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500202 spin_unlock(&orangefs_request_list_lock);
203 orangefs_devreq_add_op(cur_op);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500204 spin_unlock(&cur_op->lock);
205
206 /* Push the upcall out. */
207 ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
208 if (ret != 0)
209 goto error;
210 ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
211 if (ret != 0)
212 goto error;
213 ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
214 if (ret != 0)
215 goto error;
216 ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500217 sizeof(struct orangefs_upcall_s));
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500218 if (ret != 0)
219 goto error;
220
221 /* The client only asks to read one size buffer. */
222 return MAX_ALIGNED_DEV_REQ_UPSIZE;
223error:
224 /*
225 * We were unable to copy the op data to the client. Put the op back in
226 * list. If client has crashed, the op will be purged later when the
227 * device is released.
228 */
229 gossip_err("orangefs: Failed to copy data to user space\n");
Yi Liu8bb8aef2015-11-24 15:12:14 -0500230 spin_lock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500231 spin_lock(&cur_op->lock);
232 set_op_state_waiting(cur_op);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500233 orangefs_devreq_remove_op(cur_op->tag);
234 list_add(&cur_op->list, &orangefs_request_list);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500235 spin_unlock(&cur_op->lock);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500236 spin_unlock(&orangefs_request_list_lock);
Martin Brandenburg24c8d082015-11-13 14:26:10 -0500237 return -EFAULT;
Mike Marshall5db11c22015-07-17 10:38:12 -0400238}
239
240/* Function for writev() callers into the device */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500241static ssize_t orangefs_devreq_writev(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400242 const struct iovec *iov,
243 size_t count,
244 loff_t *offset)
245{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500246 struct orangefs_kernel_op_s *op = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400247 void *buffer = NULL;
248 void *ptr = NULL;
249 unsigned long i = 0;
250 static int max_downsize = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
251 int ret = 0, num_remaining = max_downsize;
252 int notrailer_count = 4; /* num elements in iovec without trailer */
253 int payload_size = 0;
254 __s32 magic = 0;
255 __s32 proto_ver = 0;
256 __u64 tag = 0;
257 ssize_t total_returned_size = 0;
258
259 /* Either there is a trailer or there isn't */
260 if (count != notrailer_count && count != (notrailer_count + 1)) {
Mike Marshall2c590d52015-07-24 10:37:15 -0400261 gossip_err("Error: Number of iov vectors is (%zu) and notrailer count is %d\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400262 count,
263 notrailer_count);
264 return -EPROTO;
265 }
266 buffer = dev_req_alloc();
267 if (!buffer)
268 return -ENOMEM;
269 ptr = buffer;
270
271 for (i = 0; i < notrailer_count; i++) {
272 if (iov[i].iov_len > num_remaining) {
273 gossip_err
274 ("writev error: Freeing buffer and returning\n");
275 dev_req_release(buffer);
276 return -EMSGSIZE;
277 }
278 ret = copy_from_user(ptr, iov[i].iov_base, iov[i].iov_len);
279 if (ret) {
280 gossip_err("Failed to copy data from user space\n");
281 dev_req_release(buffer);
282 return -EIO;
283 }
284 num_remaining -= iov[i].iov_len;
285 ptr += iov[i].iov_len;
286 payload_size += iov[i].iov_len;
287 }
288 total_returned_size = payload_size;
289
290 /* these elements are currently 8 byte aligned (8 bytes for (version +
291 * magic) 8 bytes for tag). If you add another element, either
292 * make it 8 bytes big, or use get_unaligned when asigning.
293 */
294 ptr = buffer;
295 proto_ver = *((__s32 *) ptr);
296 ptr += sizeof(__s32);
297
298 magic = *((__s32 *) ptr);
299 ptr += sizeof(__s32);
300
301 tag = *((__u64 *) ptr);
302 ptr += sizeof(__u64);
303
Yi Liu8bb8aef2015-11-24 15:12:14 -0500304 if (magic != ORANGEFS_DEVREQ_MAGIC) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400305 gossip_err("Error: Device magic number does not match.\n");
306 dev_req_release(buffer);
307 return -EPROTO;
308 }
309
310 /*
311 * proto_ver = 20902 for 2.9.2
312 */
313
Yi Liu8bb8aef2015-11-24 15:12:14 -0500314 op = orangefs_devreq_remove_op(tag);
Mike Marshall5db11c22015-07-17 10:38:12 -0400315 if (op) {
316 /* Increase ref count! */
317 get_op(op);
318 /* cut off magic and tag from payload size */
319 payload_size -= (2 * sizeof(__s32) + sizeof(__u64));
Yi Liu8bb8aef2015-11-24 15:12:14 -0500320 if (payload_size <= sizeof(struct orangefs_downcall_s))
Mike Marshall5db11c22015-07-17 10:38:12 -0400321 /* copy the passed in downcall into the op */
322 memcpy(&op->downcall,
323 ptr,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500324 sizeof(struct orangefs_downcall_s));
Mike Marshall5db11c22015-07-17 10:38:12 -0400325 else
326 gossip_debug(GOSSIP_DEV_DEBUG,
327 "writev: Ignoring %d bytes\n",
328 payload_size);
329
330 /* Do not allocate needlessly if client-core forgets
331 * to reset trailer size on op errors.
332 */
333 if (op->downcall.status == 0 && op->downcall.trailer_size > 0) {
Al Viroade1d482015-10-09 17:51:36 -0400334 __u64 trailer_size = op->downcall.trailer_size;
335 size_t size;
Mike Marshall5db11c22015-07-17 10:38:12 -0400336 gossip_debug(GOSSIP_DEV_DEBUG,
337 "writev: trailer size %ld\n",
Mike Marshallb5bbc842015-11-13 14:39:15 -0500338 (unsigned long)trailer_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400339 if (count != (notrailer_count + 1)) {
Al Viroade1d482015-10-09 17:51:36 -0400340 gossip_err("Error: trailer size (%ld) is non-zero, no trailer elements though? (%zu)\n", (unsigned long)trailer_size, count);
Mike Marshall5db11c22015-07-17 10:38:12 -0400341 dev_req_release(buffer);
342 put_op(op);
343 return -EPROTO;
344 }
Al Viroade1d482015-10-09 17:51:36 -0400345 size = iov[notrailer_count].iov_len;
346 if (size > trailer_size) {
347 gossip_err("writev error: trailer size (%ld) != iov_len (%zd)\n", (unsigned long)trailer_size, size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400348 dev_req_release(buffer);
349 put_op(op);
350 return -EMSGSIZE;
351 }
352 /* Allocate a buffer large enough to hold the
353 * trailer bytes.
354 */
Al Viroade1d482015-10-09 17:51:36 -0400355 op->downcall.trailer_buf = vmalloc(trailer_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400356 if (op->downcall.trailer_buf != NULL) {
357 gossip_debug(GOSSIP_DEV_DEBUG, "vmalloc: %p\n",
358 op->downcall.trailer_buf);
359 ret = copy_from_user(op->downcall.trailer_buf,
360 iov[notrailer_count].
361 iov_base,
Al Viroade1d482015-10-09 17:51:36 -0400362 size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400363 if (ret) {
364 gossip_err("Failed to copy trailer data from user space\n");
365 dev_req_release(buffer);
366 gossip_debug(GOSSIP_DEV_DEBUG,
367 "vfree: %p\n",
368 op->downcall.trailer_buf);
369 vfree(op->downcall.trailer_buf);
370 op->downcall.trailer_buf = NULL;
371 put_op(op);
372 return -EIO;
373 }
Al Viroade1d482015-10-09 17:51:36 -0400374 memset(op->downcall.trailer_buf + size, 0,
375 trailer_size - size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400376 } else {
377 /* Change downcall status */
378 op->downcall.status = -ENOMEM;
379 gossip_err("writev: could not vmalloc for trailer!\n");
380 }
381 }
382
383 /* if this operation is an I/O operation and if it was
384 * initiated on behalf of a *synchronous* VFS I/O operation,
385 * only then we need to wait
386 * for all data to be copied before we can return to avoid
387 * buffer corruption and races that can pull the buffers
388 * out from under us.
389 *
390 * Essentially we're synchronizing with other parts of the
391 * vfs implicitly by not allowing the user space
392 * application reading/writing this device to return until
393 * the buffers are done being used.
394 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500395 if (op->upcall.type == ORANGEFS_VFS_OP_FILE_IO &&
396 op->upcall.req.io.async_vfs_io == ORANGEFS_VFS_SYNC_IO) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400397 int timed_out = 0;
398 DECLARE_WAITQUEUE(wait_entry, current);
399
400 /* tell the vfs op waiting on a waitqueue
401 * that this op is done
402 */
403 spin_lock(&op->lock);
404 set_op_state_serviced(op);
405 spin_unlock(&op->lock);
406
407 add_wait_queue_exclusive(&op->io_completion_waitq,
408 &wait_entry);
409 wake_up_interruptible(&op->waitq);
410
411 while (1) {
412 set_current_state(TASK_INTERRUPTIBLE);
413
414 spin_lock(&op->lock);
415 if (op->io_completed) {
416 spin_unlock(&op->lock);
417 break;
418 }
419 spin_unlock(&op->lock);
420
421 if (!signal_pending(current)) {
422 int timeout =
423 MSECS_TO_JIFFIES(1000 *
424 op_timeout_secs);
425 if (!schedule_timeout(timeout)) {
426 gossip_debug(GOSSIP_DEV_DEBUG, "*** I/O wait time is up\n");
427 timed_out = 1;
428 break;
429 }
430 continue;
431 }
432
433 gossip_debug(GOSSIP_DEV_DEBUG, "*** signal on I/O wait -- aborting\n");
434 break;
435 }
436
437 set_current_state(TASK_RUNNING);
438 remove_wait_queue(&op->io_completion_waitq,
439 &wait_entry);
440
441 /* NOTE: for I/O operations we handle releasing the op
442 * object except in the case of timeout. the reason we
443 * can't free the op in timeout cases is that the op
444 * service logic in the vfs retries operations using
445 * the same op ptr, thus it can't be freed.
446 */
447 if (!timed_out)
448 op_release(op);
449 } else {
450
451 /*
452 * tell the vfs op waiting on a waitqueue that
453 * this op is done
454 */
455 spin_lock(&op->lock);
456 set_op_state_serviced(op);
457 spin_unlock(&op->lock);
458 /*
Mike Marshall54804942015-10-05 13:44:24 -0400459 * for every other operation (i.e. non-I/O), we need to
460 * wake up the callers for downcall completion
461 * notification
Mike Marshall5db11c22015-07-17 10:38:12 -0400462 */
463 wake_up_interruptible(&op->waitq);
464 }
465 } else {
466 /* ignore downcalls that we're not interested in */
467 gossip_debug(GOSSIP_DEV_DEBUG,
468 "WARNING: No one's waiting for tag %llu\n",
469 llu(tag));
470 }
471 dev_req_release(buffer);
472
473 return total_returned_size;
474}
475
Yi Liu8bb8aef2015-11-24 15:12:14 -0500476static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
Mike Marshall5db11c22015-07-17 10:38:12 -0400477 struct iov_iter *iter)
478{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500479 return orangefs_devreq_writev(iocb->ki_filp,
Mike Marshall5db11c22015-07-17 10:38:12 -0400480 iter->iov,
481 iter->nr_segs,
482 &iocb->ki_pos);
483}
484
485/* Returns whether any FS are still pending remounted */
486static int mark_all_pending_mounts(void)
487{
488 int unmounted = 1;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500489 struct orangefs_sb_info_s *orangefs_sb = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400490
Yi Liu8bb8aef2015-11-24 15:12:14 -0500491 spin_lock(&orangefs_superblocks_lock);
492 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400493 /* All of these file system require a remount */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500494 orangefs_sb->mount_pending = 1;
Mike Marshall5db11c22015-07-17 10:38:12 -0400495 unmounted = 0;
496 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500497 spin_unlock(&orangefs_superblocks_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400498 return unmounted;
499}
500
501/*
502 * Determine if a given file system needs to be remounted or not
503 * Returns -1 on error
504 * 0 if already mounted
505 * 1 if needs remount
506 */
507int fs_mount_pending(__s32 fsid)
508{
509 int mount_pending = -1;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500510 struct orangefs_sb_info_s *orangefs_sb = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400511
Yi Liu8bb8aef2015-11-24 15:12:14 -0500512 spin_lock(&orangefs_superblocks_lock);
513 list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
514 if (orangefs_sb->fs_id == fsid) {
515 mount_pending = orangefs_sb->mount_pending;
Mike Marshall5db11c22015-07-17 10:38:12 -0400516 break;
517 }
518 }
Yi Liu8bb8aef2015-11-24 15:12:14 -0500519 spin_unlock(&orangefs_superblocks_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400520 return mount_pending;
521}
522
523/*
524 * NOTE: gets called when the last reference to this device is dropped.
525 * Using the open_access_count variable, we enforce a reference count
526 * on this file so that it can be opened by only one process at a time.
527 * the devreq_mutex is used to make sure all i/o has completed
Yi Liu8bb8aef2015-11-24 15:12:14 -0500528 * before we call orangefs_bufmap_finalize, and similar such tricky
Mike Marshall5db11c22015-07-17 10:38:12 -0400529 * situations
530 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500531static int orangefs_devreq_release(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -0400532{
533 int unmounted = 0;
534
535 gossip_debug(GOSSIP_DEV_DEBUG,
536 "%s:pvfs2-client-core: exiting, closing device\n",
537 __func__);
538
539 mutex_lock(&devreq_mutex);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500540 orangefs_bufmap_finalize();
Mike Marshall5db11c22015-07-17 10:38:12 -0400541
542 open_access_count--;
543
544 unmounted = mark_all_pending_mounts();
Yi Liu8bb8aef2015-11-24 15:12:14 -0500545 gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400546 (unmounted ? "UNMOUNTED" : "MOUNTED"));
547 mutex_unlock(&devreq_mutex);
548
549 /*
550 * Walk through the list of ops in the request list, mark them
551 * as purged and wake them up.
552 */
553 purge_waiting_ops();
554 /*
555 * Walk through the hash table of in progress operations; mark
556 * them as purged and wake them up
557 */
558 purge_inprogress_ops();
559 gossip_debug(GOSSIP_DEV_DEBUG,
560 "pvfs2-client-core: device close complete\n");
561 return 0;
562}
563
564int is_daemon_in_service(void)
565{
566 int in_service;
567
568 /*
569 * What this function does is checks if client-core is alive
570 * based on the access count we maintain on the device.
571 */
572 mutex_lock(&devreq_mutex);
573 in_service = open_access_count == 1 ? 0 : -EIO;
574 mutex_unlock(&devreq_mutex);
575 return in_service;
576}
577
578static inline long check_ioctl_command(unsigned int command)
579{
580 /* Check for valid ioctl codes */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500581 if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400582 gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
583 command,
584 _IOC_TYPE(command),
Yi Liu8bb8aef2015-11-24 15:12:14 -0500585 ORANGEFS_DEV_MAGIC);
Mike Marshall5db11c22015-07-17 10:38:12 -0400586 return -EINVAL;
587 }
588 /* and valid ioctl commands */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500589 if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400590 gossip_err("Invalid ioctl command number [%d >= %d]\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500591 _IOC_NR(command), ORANGEFS_DEV_MAXNR);
Mike Marshall5db11c22015-07-17 10:38:12 -0400592 return -ENOIOCTLCMD;
593 }
594 return 0;
595}
596
597static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
598{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500599 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
Mike Marshall5db11c22015-07-17 10:38:12 -0400600 static __s32 max_up_size = MAX_ALIGNED_DEV_REQ_UPSIZE;
601 static __s32 max_down_size = MAX_ALIGNED_DEV_REQ_DOWNSIZE;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500602 struct ORANGEFS_dev_map_desc user_desc;
Mike Marshall5db11c22015-07-17 10:38:12 -0400603 int ret = 0;
604 struct dev_mask_info_s mask_info = { 0 };
605 struct dev_mask2_info_s mask2_info = { 0, 0 };
606 int upstream_kmod = 1;
607 struct list_head *tmp = NULL;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500608 struct orangefs_sb_info_s *orangefs_sb = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400609
610 /* mtmoore: add locking here */
611
612 switch (command) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500613 case ORANGEFS_DEV_GET_MAGIC:
Mike Marshall5db11c22015-07-17 10:38:12 -0400614 return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
615 -EIO :
616 0);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500617 case ORANGEFS_DEV_GET_MAX_UPSIZE:
Mike Marshall5db11c22015-07-17 10:38:12 -0400618 return ((put_user(max_up_size,
619 (__s32 __user *) arg) == -EFAULT) ?
620 -EIO :
621 0);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500622 case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
Mike Marshall5db11c22015-07-17 10:38:12 -0400623 return ((put_user(max_down_size,
624 (__s32 __user *) arg) == -EFAULT) ?
625 -EIO :
626 0);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500627 case ORANGEFS_DEV_MAP:
Mike Marshall5db11c22015-07-17 10:38:12 -0400628 ret = copy_from_user(&user_desc,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500629 (struct ORANGEFS_dev_map_desc __user *)
Mike Marshall5db11c22015-07-17 10:38:12 -0400630 arg,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500631 sizeof(struct ORANGEFS_dev_map_desc));
632 return ret ? -EIO : orangefs_bufmap_initialize(&user_desc);
633 case ORANGEFS_DEV_REMOUNT_ALL:
Mike Marshall5db11c22015-07-17 10:38:12 -0400634 gossip_debug(GOSSIP_DEV_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500635 "orangefs_devreq_ioctl: got ORANGEFS_DEV_REMOUNT_ALL\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400636
637 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500638 * remount all mounted orangefs volumes to regain the lost
Mike Marshall5db11c22015-07-17 10:38:12 -0400639 * dynamic mount tables (if any) -- NOTE: this is done
640 * without keeping the superblock list locked due to the
641 * upcall/downcall waiting. also, the request semaphore is
642 * used to ensure that no operations will be serviced until
643 * all of the remounts are serviced (to avoid ops between
644 * mounts to fail)
645 */
646 ret = mutex_lock_interruptible(&request_mutex);
647 if (ret < 0)
648 return ret;
649 gossip_debug(GOSSIP_DEV_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500650 "orangefs_devreq_ioctl: priority remount in progress\n");
651 list_for_each(tmp, &orangefs_superblocks) {
652 orangefs_sb =
653 list_entry(tmp, struct orangefs_sb_info_s, list);
654 if (orangefs_sb && (orangefs_sb->sb)) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400655 gossip_debug(GOSSIP_DEV_DEBUG,
656 "Remounting SB %p\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500657 orangefs_sb);
Mike Marshall5db11c22015-07-17 10:38:12 -0400658
Yi Liu8bb8aef2015-11-24 15:12:14 -0500659 ret = orangefs_remount(orangefs_sb->sb);
Mike Marshall5db11c22015-07-17 10:38:12 -0400660 if (ret) {
661 gossip_debug(GOSSIP_DEV_DEBUG,
662 "SB %p remount failed\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500663 orangefs_sb);
Mike Marshall5db11c22015-07-17 10:38:12 -0400664 break;
665 }
666 }
667 }
668 gossip_debug(GOSSIP_DEV_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500669 "orangefs_devreq_ioctl: priority remount complete\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400670 mutex_unlock(&request_mutex);
671 return ret;
672
Yi Liu8bb8aef2015-11-24 15:12:14 -0500673 case ORANGEFS_DEV_UPSTREAM:
Mike Marshall5db11c22015-07-17 10:38:12 -0400674 ret = copy_to_user((void __user *)arg,
675 &upstream_kmod,
676 sizeof(upstream_kmod));
677
678 if (ret != 0)
679 return -EIO;
680 else
681 return ret;
682
Yi Liu8bb8aef2015-11-24 15:12:14 -0500683 case ORANGEFS_DEV_CLIENT_MASK:
Mike Marshall5db11c22015-07-17 10:38:12 -0400684 ret = copy_from_user(&mask2_info,
685 (void __user *)arg,
686 sizeof(struct dev_mask2_info_s));
687
688 if (ret != 0)
689 return -EIO;
690
691 client_debug_mask.mask1 = mask2_info.mask1_value;
692 client_debug_mask.mask2 = mask2_info.mask2_value;
693
694 pr_info("%s: client debug mask has been been received "
695 ":%llx: :%llx:\n",
696 __func__,
697 (unsigned long long)client_debug_mask.mask1,
698 (unsigned long long)client_debug_mask.mask2);
699
700 return ret;
701
Yi Liu8bb8aef2015-11-24 15:12:14 -0500702 case ORANGEFS_DEV_CLIENT_STRING:
Mike Marshall5db11c22015-07-17 10:38:12 -0400703 ret = copy_from_user(&client_debug_array_string,
704 (void __user *)arg,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500705 ORANGEFS_MAX_DEBUG_STRING_LEN);
Mike Marshall5db11c22015-07-17 10:38:12 -0400706 if (ret != 0) {
707 pr_info("%s: "
Yi Liu8bb8aef2015-11-24 15:12:14 -0500708 "ORANGEFS_DEV_CLIENT_STRING: copy_from_user failed"
Mike Marshall5db11c22015-07-17 10:38:12 -0400709 "\n",
710 __func__);
711 return -EIO;
712 }
713
714 pr_info("%s: client debug array string has been been received."
715 "\n",
716 __func__);
717
718 if (!help_string_initialized) {
719
720 /* Free the "we don't know yet" default string... */
721 kfree(debug_help_string);
722
723 /* build a proper debug help string */
724 if (orangefs_prepare_debugfs_help_string(0)) {
725 gossip_err("%s: "
726 "prepare_debugfs_help_string failed"
727 "\n",
728 __func__);
729 return -EIO;
730 }
731
732 /* Replace the boilerplate boot-time debug-help file. */
733 debugfs_remove(help_file_dentry);
734
735 help_file_dentry =
736 debugfs_create_file(
737 ORANGEFS_KMOD_DEBUG_HELP_FILE,
738 0444,
739 debug_dir,
740 debug_help_string,
741 &debug_help_fops);
742
743 if (!help_file_dentry) {
744 gossip_err("%s: debugfs_create_file failed for"
745 " :%s:!\n",
746 __func__,
747 ORANGEFS_KMOD_DEBUG_HELP_FILE);
748 return -EIO;
749 }
750 }
751
752 debug_mask_to_string(&client_debug_mask, 1);
753
754 debugfs_remove(client_debug_dentry);
755
Yi Liu8bb8aef2015-11-24 15:12:14 -0500756 orangefs_client_debug_init();
Mike Marshall5db11c22015-07-17 10:38:12 -0400757
758 help_string_initialized++;
759
760 return ret;
761
Yi Liu8bb8aef2015-11-24 15:12:14 -0500762 case ORANGEFS_DEV_DEBUG:
Mike Marshall5db11c22015-07-17 10:38:12 -0400763 ret = copy_from_user(&mask_info,
764 (void __user *)arg,
765 sizeof(mask_info));
766
767 if (ret != 0)
768 return -EIO;
769
770 if (mask_info.mask_type == KERNEL_MASK) {
771 if ((mask_info.mask_value == 0)
772 && (kernel_mask_set_mod_init)) {
773 /*
774 * the kernel debug mask was set when the
775 * kernel module was loaded; don't override
776 * it if the client-core was started without
Yi Liu8bb8aef2015-11-24 15:12:14 -0500777 * a value for ORANGEFS_KMODMASK.
Mike Marshall5db11c22015-07-17 10:38:12 -0400778 */
779 return 0;
780 }
781 debug_mask_to_string(&mask_info.mask_value,
782 mask_info.mask_type);
783 gossip_debug_mask = mask_info.mask_value;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500784 pr_info("ORANGEFS: kernel debug mask has been modified to "
Mike Marshall5db11c22015-07-17 10:38:12 -0400785 ":%s: :%llx:\n",
786 kernel_debug_string,
787 (unsigned long long)gossip_debug_mask);
788 } else if (mask_info.mask_type == CLIENT_MASK) {
789 debug_mask_to_string(&mask_info.mask_value,
790 mask_info.mask_type);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500791 pr_info("ORANGEFS: client debug mask has been modified to"
Mike Marshall5db11c22015-07-17 10:38:12 -0400792 ":%s: :%llx:\n",
793 client_debug_string,
794 llu(mask_info.mask_value));
795 } else {
796 gossip_lerr("Invalid mask type....\n");
797 return -EINVAL;
798 }
799
800 return ret;
801
802 default:
803 return -ENOIOCTLCMD;
804 }
805 return -ENOIOCTLCMD;
806}
807
Yi Liu8bb8aef2015-11-24 15:12:14 -0500808static long orangefs_devreq_ioctl(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400809 unsigned int command, unsigned long arg)
810{
811 long ret;
812
813 /* Check for properly constructed commands */
814 ret = check_ioctl_command(command);
815 if (ret < 0)
816 return (int)ret;
817
818 return (int)dispatch_ioctl_command(command, arg);
819}
820
821#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
822
Yi Liu8bb8aef2015-11-24 15:12:14 -0500823/* Compat structure for the ORANGEFS_DEV_MAP ioctl */
824struct ORANGEFS_dev_map_desc32 {
Mike Marshall5db11c22015-07-17 10:38:12 -0400825 compat_uptr_t ptr;
826 __s32 total_size;
827 __s32 size;
828 __s32 count;
829};
830
831static unsigned long translate_dev_map26(unsigned long args, long *error)
832{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500833 struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
Mike Marshall5db11c22015-07-17 10:38:12 -0400834 /*
835 * Depending on the architecture, allocate some space on the
836 * user-call-stack based on our expected layout.
837 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500838 struct ORANGEFS_dev_map_desc __user *p =
Mike Marshall5db11c22015-07-17 10:38:12 -0400839 compat_alloc_user_space(sizeof(*p));
Mike Marshall84d02152015-07-28 13:27:51 -0400840 compat_uptr_t addr;
Mike Marshall5db11c22015-07-17 10:38:12 -0400841
842 *error = 0;
843 /* get the ptr from the 32 bit user-space */
844 if (get_user(addr, &p32->ptr))
845 goto err;
846 /* try to put that into a 64-bit layout */
847 if (put_user(compat_ptr(addr), &p->ptr))
848 goto err;
849 /* copy the remaining fields */
850 if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
851 goto err;
852 if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
853 goto err;
854 if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
855 goto err;
856 return (unsigned long)p;
857err:
858 *error = -EFAULT;
859 return 0;
860}
861
862/*
863 * 32 bit user-space apps' ioctl handlers when kernel modules
864 * is compiled as a 64 bit one
865 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500866static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
Mike Marshall5db11c22015-07-17 10:38:12 -0400867 unsigned long args)
868{
869 long ret;
870 unsigned long arg = args;
871
872 /* Check for properly constructed commands */
873 ret = check_ioctl_command(cmd);
874 if (ret < 0)
875 return ret;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500876 if (cmd == ORANGEFS_DEV_MAP) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400877 /*
878 * convert the arguments to what we expect internally
879 * in kernel space
880 */
881 arg = translate_dev_map26(args, &ret);
882 if (ret < 0) {
883 gossip_err("Could not translate dev map\n");
884 return ret;
885 }
886 }
887 /* no other ioctl requires translation */
888 return dispatch_ioctl_command(cmd, arg);
889}
890
Mike Marshall2c590d52015-07-24 10:37:15 -0400891#endif /* CONFIG_COMPAT is in .config */
892
893/*
894 * The following two ioctl32 functions had been refactored into the above
895 * CONFIG_COMPAT ifdef, but that was an over simplification that was
896 * not noticed until we tried to compile on power pc...
897 */
898#if (defined(CONFIG_COMPAT) && !defined(HAVE_REGISTER_IOCTL32_CONVERSION)) || !defined(CONFIG_COMPAT)
Yi Liu8bb8aef2015-11-24 15:12:14 -0500899static int orangefs_ioctl32_init(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400900{
901 return 0;
902}
903
Yi Liu8bb8aef2015-11-24 15:12:14 -0500904static void orangefs_ioctl32_cleanup(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400905{
906 return;
907}
Mike Marshall2c590d52015-07-24 10:37:15 -0400908#endif
Mike Marshall5db11c22015-07-17 10:38:12 -0400909
910/* the assigned character device major number */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500911static int orangefs_dev_major;
Mike Marshall5db11c22015-07-17 10:38:12 -0400912
913/*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500914 * Initialize orangefs device specific state:
Mike Marshall5db11c22015-07-17 10:38:12 -0400915 * Must be called at module load time only
916 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500917int orangefs_dev_init(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400918{
919 int ret;
920
921 /* register the ioctl32 sub-system */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500922 ret = orangefs_ioctl32_init();
Mike Marshall5db11c22015-07-17 10:38:12 -0400923 if (ret < 0)
924 return ret;
925
Yi Liu8bb8aef2015-11-24 15:12:14 -0500926 /* register orangefs-req device */
927 orangefs_dev_major = register_chrdev(0,
928 ORANGEFS_REQDEVICE_NAME,
929 &orangefs_devreq_file_operations);
930 if (orangefs_dev_major < 0) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400931 gossip_debug(GOSSIP_DEV_DEBUG,
932 "Failed to register /dev/%s (error %d)\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500933 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
934 orangefs_ioctl32_cleanup();
935 return orangefs_dev_major;
Mike Marshall5db11c22015-07-17 10:38:12 -0400936 }
937
938 gossip_debug(GOSSIP_DEV_DEBUG,
939 "*** /dev/%s character device registered ***\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500940 ORANGEFS_REQDEVICE_NAME);
Mike Marshall5db11c22015-07-17 10:38:12 -0400941 gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500942 ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
Mike Marshall5db11c22015-07-17 10:38:12 -0400943 return 0;
944}
945
Yi Liu8bb8aef2015-11-24 15:12:14 -0500946void orangefs_dev_cleanup(void)
Mike Marshall5db11c22015-07-17 10:38:12 -0400947{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500948 unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
Mike Marshall5db11c22015-07-17 10:38:12 -0400949 gossip_debug(GOSSIP_DEV_DEBUG,
950 "*** /dev/%s character device unregistered ***\n",
Yi Liu8bb8aef2015-11-24 15:12:14 -0500951 ORANGEFS_REQDEVICE_NAME);
Mike Marshall5db11c22015-07-17 10:38:12 -0400952 /* unregister the ioctl32 sub-system */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500953 orangefs_ioctl32_cleanup();
Mike Marshall5db11c22015-07-17 10:38:12 -0400954}
955
Yi Liu8bb8aef2015-11-24 15:12:14 -0500956static unsigned int orangefs_devreq_poll(struct file *file,
Mike Marshall5db11c22015-07-17 10:38:12 -0400957 struct poll_table_struct *poll_table)
958{
959 int poll_revent_mask = 0;
960
961 if (open_access_count == 1) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500962 poll_wait(file, &orangefs_request_list_waitq, poll_table);
Mike Marshall5db11c22015-07-17 10:38:12 -0400963
Yi Liu8bb8aef2015-11-24 15:12:14 -0500964 spin_lock(&orangefs_request_list_lock);
965 if (!list_empty(&orangefs_request_list))
Mike Marshall5db11c22015-07-17 10:38:12 -0400966 poll_revent_mask |= POLL_IN;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500967 spin_unlock(&orangefs_request_list_lock);
Mike Marshall5db11c22015-07-17 10:38:12 -0400968 }
969 return poll_revent_mask;
970}
971
Yi Liu8bb8aef2015-11-24 15:12:14 -0500972const struct file_operations orangefs_devreq_file_operations = {
Mike Marshall5db11c22015-07-17 10:38:12 -0400973 .owner = THIS_MODULE,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500974 .read = orangefs_devreq_read,
975 .write_iter = orangefs_devreq_write_iter,
976 .open = orangefs_devreq_open,
977 .release = orangefs_devreq_release,
978 .unlocked_ioctl = orangefs_devreq_ioctl,
Mike Marshall5db11c22015-07-17 10:38:12 -0400979
980#ifdef CONFIG_COMPAT /* CONFIG_COMPAT is in .config */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500981 .compat_ioctl = orangefs_devreq_compat_ioctl,
Mike Marshall5db11c22015-07-17 10:38:12 -0400982#endif
Yi Liu8bb8aef2015-11-24 15:12:14 -0500983 .poll = orangefs_devreq_poll
Mike Marshall5db11c22015-07-17 10:38:12 -0400984};