blob: 9a439b2e8bde32db429c945d915b9ac9fb1df690 [file] [log] [blame]
Mike Marshall5db11c22015-07-17 10:38:12 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * See COPYING in top-level directory.
5 */
6
7/*
8 * Linux VFS file operations.
9 */
10
11#include "protocol.h"
12#include "pvfs2-kernel.h"
13#include "pvfs2-bufmap.h"
14#include <linux/fs.h>
15#include <linux/pagemap.h>
16
17#define wake_up_daemon_for_return(op) \
18do { \
19 spin_lock(&op->lock); \
20 op->io_completed = 1; \
21 spin_unlock(&op->lock); \
22 wake_up_interruptible(&op->io_completion_waitq);\
23} while (0)
24
25/*
26 * Copy to client-core's address space from the buffers specified
27 * by the iovec upto total_size bytes.
28 * NOTE: the iovector can either contain addresses which
29 * can futher be kernel-space or user-space addresses.
30 * or it can pointers to struct page's
31 */
32static int precopy_buffers(struct pvfs2_bufmap *bufmap,
33 int buffer_index,
Al Viroa5c126a2015-10-08 17:54:31 -040034 struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040035 size_t total_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040036{
37 int ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -040038 /*
39 * copy data from application/kernel by pulling it out
40 * of the iovec.
41 */
Mike Marshall4d1c4402015-09-04 10:31:16 -040042
43
44 if (total_size) {
Mike Marshall4d1c4402015-09-04 10:31:16 -040045 ret = pvfs_bufmap_copy_from_iovec(bufmap,
Al Viroa5c126a2015-10-08 17:54:31 -040046 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040047 buffer_index,
48 total_size);
49 if (ret < 0)
50 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
51 __func__,
52 (long)ret);
Mike Marshall4d1c4402015-09-04 10:31:16 -040053 }
54
Mike Marshall5db11c22015-07-17 10:38:12 -040055 if (ret < 0)
56 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
57 __func__,
58 (long)ret);
59 return ret;
60}
61
62/*
63 * Copy from client-core's address space to the buffers specified
64 * by the iovec upto total_size bytes.
65 * NOTE: the iovector can either contain addresses which
66 * can futher be kernel-space or user-space addresses.
67 * or it can pointers to struct page's
68 */
69static int postcopy_buffers(struct pvfs2_bufmap *bufmap,
70 int buffer_index,
Al Viro5f0e3c92015-10-08 17:52:44 -040071 struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040072 size_t total_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040073{
74 int ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -040075 /*
76 * copy data to application/kernel by pushing it out to
77 * the iovec. NOTE; target buffers can be addresses or
78 * struct page pointers.
79 */
80 if (total_size) {
Mike Marshall4d1c4402015-09-04 10:31:16 -040081 ret = pvfs_bufmap_copy_to_iovec(bufmap,
Al Viro5f0e3c92015-10-08 17:52:44 -040082 iter,
Al Viro5c278222015-10-08 17:43:58 -040083 buffer_index,
84 total_size);
Mike Marshall5db11c22015-07-17 10:38:12 -040085 if (ret < 0)
Mike Marshall4d1c4402015-09-04 10:31:16 -040086 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
Mike Marshall5db11c22015-07-17 10:38:12 -040087 __func__,
88 (long)ret);
89 }
90 return ret;
91}
92
93/*
94 * Post and wait for the I/O upcall to finish
95 */
96static ssize_t wait_for_direct_io(enum PVFS_io_type type, struct inode *inode,
Al Viro3c2fcfc2015-10-08 18:00:26 -040097 loff_t *offset, struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040098 size_t total_size, loff_t readahead_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040099{
100 struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
101 struct pvfs2_khandle *handle = &pvfs2_inode->refn.khandle;
102 struct pvfs2_bufmap *bufmap = NULL;
103 struct pvfs2_kernel_op_s *new_op = NULL;
104 int buffer_index = -1;
105 ssize_t ret;
106
107 new_op = op_alloc(PVFS2_VFS_OP_FILE_IO);
108 if (!new_op) {
109 ret = -ENOMEM;
110 goto out;
111 }
112 /* synchronous I/O */
113 new_op->upcall.req.io.async_vfs_io = PVFS_VFS_SYNC_IO;
114 new_op->upcall.req.io.readahead_size = readahead_size;
115 new_op->upcall.req.io.io_type = type;
116 new_op->upcall.req.io.refn = pvfs2_inode->refn;
117
118populate_shared_memory:
119 /* get a shared buffer index */
120 ret = pvfs_bufmap_get(&bufmap, &buffer_index);
121 if (ret < 0) {
122 gossip_debug(GOSSIP_FILE_DEBUG,
123 "%s: pvfs_bufmap_get failure (%ld)\n",
124 __func__, (long)ret);
125 goto out;
126 }
127 gossip_debug(GOSSIP_FILE_DEBUG,
128 "%s(%pU): GET op %p -> buffer_index %d\n",
129 __func__,
130 handle,
131 new_op,
132 buffer_index);
133
134 new_op->uses_shared_memory = 1;
135 new_op->upcall.req.io.buf_index = buffer_index;
136 new_op->upcall.req.io.count = total_size;
137 new_op->upcall.req.io.offset = *offset;
138
139 gossip_debug(GOSSIP_FILE_DEBUG,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400140 "%s(%pU): offset: %llu total_size: %zd\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400141 __func__,
142 handle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400143 llu(*offset),
144 total_size);
145 /*
146 * Stage 1: copy the buffers into client-core's address space
147 * precopy_buffers only pertains to writes.
148 */
149 if (type == PVFS_IO_WRITE) {
150 ret = precopy_buffers(bufmap,
151 buffer_index,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400152 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400153 total_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400154 if (ret < 0)
155 goto out;
156 }
157
158 gossip_debug(GOSSIP_FILE_DEBUG,
159 "%s(%pU): Calling post_io_request with tag (%llu)\n",
160 __func__,
161 handle,
162 llu(new_op->tag));
163
164 /* Stage 2: Service the I/O operation */
165 ret = service_operation(new_op,
166 type == PVFS_IO_WRITE ?
167 "file_write" :
168 "file_read",
169 get_interruptible_flag(inode));
170
171 /*
172 * If service_operation() returns -EAGAIN #and# the operation was
173 * purged from pvfs2_request_list or htable_ops_in_progress, then
174 * we know that the client was restarted, causing the shared memory
175 * area to be wiped clean. To restart a write operation in this
176 * case, we must re-copy the data from the user's iovec to a NEW
177 * shared memory location. To restart a read operation, we must get
178 * a new shared memory location.
179 */
180 if (ret == -EAGAIN && op_state_purged(new_op)) {
181 pvfs_bufmap_put(bufmap, buffer_index);
182 gossip_debug(GOSSIP_FILE_DEBUG,
183 "%s:going to repopulate_shared_memory.\n",
184 __func__);
185 goto populate_shared_memory;
186 }
187
188 if (ret < 0) {
189 handle_io_error(); /* defined in pvfs2-kernel.h */
190 /*
Mike Marshall54804942015-10-05 13:44:24 -0400191 * don't write an error to syslog on signaled operation
192 * termination unless we've got debugging turned on, as
193 * this can happen regularly (i.e. ctrl-c)
Mike Marshall5db11c22015-07-17 10:38:12 -0400194 */
195 if (ret == -EINTR)
196 gossip_debug(GOSSIP_FILE_DEBUG,
197 "%s: returning error %ld\n", __func__,
198 (long)ret);
199 else
200 gossip_err("%s: error in %s handle %pU, returning %zd\n",
201 __func__,
202 type == PVFS_IO_READ ?
203 "read from" : "write to",
204 handle, ret);
205 goto out;
206 }
207
208 /*
209 * Stage 3: Post copy buffers from client-core's address space
210 * postcopy_buffers only pertains to reads.
211 */
212 if (type == PVFS_IO_READ) {
213 ret = postcopy_buffers(bufmap,
214 buffer_index,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400215 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400216 new_op->downcall.resp.io.amt_complete);
Mike Marshall5db11c22015-07-17 10:38:12 -0400217 if (ret < 0) {
218 /*
219 * put error codes in downcall so that handle_io_error()
220 * preserves it properly
221 */
222 new_op->downcall.status = ret;
223 handle_io_error();
224 goto out;
225 }
226 }
227 gossip_debug(GOSSIP_FILE_DEBUG,
228 "%s(%pU): Amount written as returned by the sys-io call:%d\n",
229 __func__,
230 handle,
231 (int)new_op->downcall.resp.io.amt_complete);
232
233 ret = new_op->downcall.resp.io.amt_complete;
234
235 /*
Mike Marshall54804942015-10-05 13:44:24 -0400236 * tell the device file owner waiting on I/O that this read has
237 * completed and it can return now. in this exact case, on
238 * wakeup the daemon will free the op, so we *cannot* touch it
239 * after this.
Mike Marshall5db11c22015-07-17 10:38:12 -0400240 */
241 wake_up_daemon_for_return(new_op);
242 new_op = NULL;
243
244out:
245 if (buffer_index >= 0) {
246 pvfs_bufmap_put(bufmap, buffer_index);
247 gossip_debug(GOSSIP_FILE_DEBUG,
248 "%s(%pU): PUT buffer_index %d\n",
249 __func__, handle, buffer_index);
250 buffer_index = -1;
251 }
252 if (new_op) {
253 op_release(new_op);
254 new_op = NULL;
255 }
256 return ret;
257}
258
259/*
260 * The reason we need to do this is to be able to support readv and writev
261 * that are larger than (pvfs_bufmap_size_query()) Default is
262 * PVFS2_BUFMAP_DEFAULT_DESC_SIZE MB. What that means is that we will
263 * create a new io vec descriptor for those memory addresses that
264 * go beyond the limit. Return value for this routine is negative in case
265 * of errors and 0 in case of success.
266 *
267 * Further, the new_nr_segs pointer is updated to hold the new value
268 * of number of iovecs, the new_vec pointer is updated to hold the pointer
269 * to the new split iovec, and the size array is an array of integers holding
270 * the number of iovecs that straddle pvfs_bufmap_size_query().
271 * The max_new_nr_segs value is computed by the caller and returned.
272 * (It will be (count of all iov_len/ block_size) + 1).
273 */
274static int split_iovecs(unsigned long max_new_nr_segs, /* IN */
275 unsigned long nr_segs, /* IN */
276 const struct iovec *original_iovec, /* IN */
277 unsigned long *new_nr_segs, /* OUT */
278 struct iovec **new_vec, /* OUT */
279 unsigned long *seg_count, /* OUT */
280 unsigned long **seg_array) /* OUT */
281{
282 unsigned long seg;
283 unsigned long count = 0;
284 unsigned long begin_seg;
285 unsigned long tmpnew_nr_segs = 0;
286 struct iovec *new_iovec = NULL;
287 struct iovec *orig_iovec;
288 unsigned long *sizes = NULL;
289 unsigned long sizes_count = 0;
290
291 if (nr_segs <= 0 ||
292 original_iovec == NULL ||
293 new_nr_segs == NULL ||
294 new_vec == NULL ||
295 seg_count == NULL ||
296 seg_array == NULL ||
297 max_new_nr_segs <= 0) {
298 gossip_err("Invalid parameters to split_iovecs\n");
299 return -EINVAL;
300 }
301 *new_nr_segs = 0;
302 *new_vec = NULL;
303 *seg_count = 0;
304 *seg_array = NULL;
305 /* copy the passed in iovec descriptor to a temp structure */
306 orig_iovec = kmalloc_array(nr_segs,
307 sizeof(*orig_iovec),
308 PVFS2_BUFMAP_GFP_FLAGS);
309 if (orig_iovec == NULL) {
310 gossip_err(
311 "split_iovecs: Could not allocate memory for %lu bytes!\n",
312 (unsigned long)(nr_segs * sizeof(*orig_iovec)));
313 return -ENOMEM;
314 }
315 new_iovec = kcalloc(max_new_nr_segs,
316 sizeof(*new_iovec),
317 PVFS2_BUFMAP_GFP_FLAGS);
318 if (new_iovec == NULL) {
319 kfree(orig_iovec);
320 gossip_err(
321 "split_iovecs: Could not allocate memory for %lu bytes!\n",
322 (unsigned long)(max_new_nr_segs * sizeof(*new_iovec)));
323 return -ENOMEM;
324 }
325 sizes = kcalloc(max_new_nr_segs,
326 sizeof(*sizes),
327 PVFS2_BUFMAP_GFP_FLAGS);
328 if (sizes == NULL) {
329 kfree(new_iovec);
330 kfree(orig_iovec);
331 gossip_err(
332 "split_iovecs: Could not allocate memory for %lu bytes!\n",
333 (unsigned long)(max_new_nr_segs * sizeof(*sizes)));
334 return -ENOMEM;
335 }
336 /* copy the passed in iovec to a temp structure */
337 memcpy(orig_iovec, original_iovec, nr_segs * sizeof(*orig_iovec));
338 begin_seg = 0;
339repeat:
340 for (seg = begin_seg; seg < nr_segs; seg++) {
341 if (tmpnew_nr_segs >= max_new_nr_segs ||
342 sizes_count >= max_new_nr_segs) {
343 kfree(sizes);
344 kfree(orig_iovec);
345 kfree(new_iovec);
346 gossip_err
347 ("split_iovecs: exceeded the index limit (%lu)\n",
348 tmpnew_nr_segs);
349 return -EINVAL;
350 }
351 if (count + orig_iovec[seg].iov_len <
352 pvfs_bufmap_size_query()) {
353 count += orig_iovec[seg].iov_len;
354 memcpy(&new_iovec[tmpnew_nr_segs],
355 &orig_iovec[seg],
356 sizeof(*new_iovec));
357 tmpnew_nr_segs++;
358 sizes[sizes_count]++;
359 } else {
360 new_iovec[tmpnew_nr_segs].iov_base =
361 orig_iovec[seg].iov_base;
362 new_iovec[tmpnew_nr_segs].iov_len =
363 (pvfs_bufmap_size_query() - count);
364 tmpnew_nr_segs++;
365 sizes[sizes_count]++;
366 sizes_count++;
367 begin_seg = seg;
368 orig_iovec[seg].iov_base +=
369 (pvfs_bufmap_size_query() - count);
370 orig_iovec[seg].iov_len -=
371 (pvfs_bufmap_size_query() - count);
372 count = 0;
373 break;
374 }
375 }
376 if (seg != nr_segs)
377 goto repeat;
378 else
379 sizes_count++;
380
381 *new_nr_segs = tmpnew_nr_segs;
382 /* new_iovec is freed by the caller */
383 *new_vec = new_iovec;
384 *seg_count = sizes_count;
385 /* seg_array is also freed by the caller */
386 *seg_array = sizes;
387 kfree(orig_iovec);
388 return 0;
389}
390
391static long bound_max_iovecs(const struct iovec *curr, unsigned long nr_segs,
392 ssize_t *total_count)
393{
394 unsigned long i;
395 long max_nr_iovecs;
396 ssize_t total;
397 ssize_t count;
398
399 total = 0;
400 count = 0;
401 max_nr_iovecs = 0;
402 for (i = 0; i < nr_segs; i++) {
403 const struct iovec *iv = &curr[i];
404
405 count += iv->iov_len;
406 if (unlikely((ssize_t) (count | iv->iov_len) < 0))
407 return -EINVAL;
408 if (total + iv->iov_len < pvfs_bufmap_size_query()) {
409 total += iv->iov_len;
410 max_nr_iovecs++;
411 } else {
412 total =
413 (total + iv->iov_len - pvfs_bufmap_size_query());
414 max_nr_iovecs += (total / pvfs_bufmap_size_query() + 2);
415 }
416 }
417 *total_count = count;
418 return max_nr_iovecs;
419}
420
421/*
422 * Common entry point for read/write/readv/writev
423 * This function will dispatch it to either the direct I/O
424 * or buffered I/O path depending on the mount options and/or
425 * augmented/extended metadata attached to the file.
426 * Note: File extended attributes override any mount options.
427 */
428static ssize_t do_readv_writev(enum PVFS_io_type type, struct file *file,
429 loff_t *offset, const struct iovec *iov, unsigned long nr_segs)
430{
431 struct inode *inode = file->f_mapping->host;
432 struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
433 struct pvfs2_khandle *handle = &pvfs2_inode->refn.khandle;
434 ssize_t ret;
435 ssize_t total_count;
436 unsigned int to_free;
437 size_t count;
438 unsigned long seg;
Mike Marshalleeaa3d42015-07-29 13:36:37 -0400439 unsigned long new_nr_segs;
440 unsigned long max_new_nr_segs;
441 unsigned long seg_count;
442 unsigned long *seg_array;
443 struct iovec *iovecptr;
444 struct iovec *ptr;
Mike Marshall5db11c22015-07-17 10:38:12 -0400445
446 total_count = 0;
447 ret = -EINVAL;
448 count = 0;
449 to_free = 0;
450
451 /* Compute total and max number of segments after split */
452 max_new_nr_segs = bound_max_iovecs(iov, nr_segs, &count);
Mike Marshall5db11c22015-07-17 10:38:12 -0400453
454 gossip_debug(GOSSIP_FILE_DEBUG,
455 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
456 __func__,
457 handle,
458 (int)count);
459
460 if (type == PVFS_IO_WRITE) {
461 gossip_debug(GOSSIP_FILE_DEBUG,
462 "%s(%pU): proceeding with offset : %llu, "
463 "size %d\n",
464 __func__,
465 handle,
466 llu(*offset),
467 (int)count);
468 }
469
470 if (count == 0) {
471 ret = 0;
472 goto out;
473 }
474
475 /*
476 * if the total size of data transfer requested is greater than
477 * the kernel-set blocksize of PVFS2, then we split the iovecs
478 * such that no iovec description straddles a block size limit
479 */
480
481 gossip_debug(GOSSIP_FILE_DEBUG,
482 "%s: pvfs_bufmap_size:%d\n",
483 __func__,
484 pvfs_bufmap_size_query());
485
486 if (count > pvfs_bufmap_size_query()) {
487 /*
488 * Split up the given iovec description such that
489 * no iovec descriptor straddles over the block-size limitation.
490 * This makes us our job easier to stage the I/O.
491 * In addition, this function will also compute an array
492 * with seg_count entries that will store the number of
493 * segments that straddle the block-size boundaries.
494 */
495 ret = split_iovecs(max_new_nr_segs, /* IN */
496 nr_segs, /* IN */
497 iov, /* IN */
498 &new_nr_segs, /* OUT */
499 &iovecptr, /* OUT */
500 &seg_count, /* OUT */
501 &seg_array); /* OUT */
502 if (ret < 0) {
503 gossip_err("%s: Failed to split iovecs to satisfy larger than blocksize readv/writev request %zd\n",
504 __func__,
505 ret);
506 goto out;
507 }
508 gossip_debug(GOSSIP_FILE_DEBUG,
509 "%s: Splitting iovecs from %lu to %lu"
510 " [max_new %lu]\n",
511 __func__,
512 nr_segs,
513 new_nr_segs,
514 max_new_nr_segs);
515 /* We must free seg_array and iovecptr */
516 to_free = 1;
517 } else {
518 new_nr_segs = nr_segs;
519 /* use the given iovec description */
520 iovecptr = (struct iovec *)iov;
521 /* There is only 1 element in the seg_array */
522 seg_count = 1;
523 /* and its value is the number of segments passed in */
524 seg_array = &nr_segs;
525 /* We dont have to free up anything */
526 to_free = 0;
527 }
528 ptr = iovecptr;
529
530 gossip_debug(GOSSIP_FILE_DEBUG,
531 "%s(%pU) %zd@%llu\n",
532 __func__,
533 handle,
534 count,
535 llu(*offset));
536 gossip_debug(GOSSIP_FILE_DEBUG,
537 "%s(%pU): new_nr_segs: %lu, seg_count: %lu\n",
538 __func__,
539 handle,
540 new_nr_segs, seg_count);
541
542/* PVFS2_KERNEL_DEBUG is a CFLAGS define. */
543#ifdef PVFS2_KERNEL_DEBUG
544 for (seg = 0; seg < new_nr_segs; seg++)
545 gossip_debug(GOSSIP_FILE_DEBUG,
546 "%s: %d) %p to %p [%d bytes]\n",
547 __func__,
548 (int)seg + 1,
549 iovecptr[seg].iov_base,
550 iovecptr[seg].iov_base + iovecptr[seg].iov_len,
551 (int)iovecptr[seg].iov_len);
552 for (seg = 0; seg < seg_count; seg++)
553 gossip_debug(GOSSIP_FILE_DEBUG,
554 "%s: %zd) %lu\n",
555 __func__,
556 seg + 1,
557 seg_array[seg]);
558#endif
559 seg = 0;
560 while (total_count < count) {
Al Viro3c2fcfc2015-10-08 18:00:26 -0400561 struct iov_iter iter;
Mike Marshall5db11c22015-07-17 10:38:12 -0400562 size_t each_count;
563 size_t amt_complete;
564
565 /* how much to transfer in this loop iteration */
566 each_count =
567 (((count - total_count) > pvfs_bufmap_size_query()) ?
568 pvfs_bufmap_size_query() :
569 (count - total_count));
570
571 gossip_debug(GOSSIP_FILE_DEBUG,
572 "%s(%pU): size of each_count(%d)\n",
573 __func__,
574 handle,
575 (int)each_count);
576 gossip_debug(GOSSIP_FILE_DEBUG,
577 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
578 __func__,
579 handle,
580 (int)*offset);
581
Al Viro3c2fcfc2015-10-08 18:00:26 -0400582 iov_iter_init(&iter, type == PVFS_IO_READ ? READ : WRITE,
583 ptr, seg_array[seg], each_count);
584
585 ret = wait_for_direct_io(type, inode, offset, &iter,
586 each_count, 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400587 gossip_debug(GOSSIP_FILE_DEBUG,
588 "%s(%pU): return from wait_for_io:%d\n",
589 __func__,
590 handle,
591 (int)ret);
592
593 if (ret < 0)
594 goto out;
595
596 /* advance the iovec pointer */
597 ptr += seg_array[seg];
598 seg++;
599 *offset += ret;
600 total_count += ret;
601 amt_complete = ret;
602
603 gossip_debug(GOSSIP_FILE_DEBUG,
604 "%s(%pU): AFTER wait_for_io: offset is %d\n",
605 __func__,
606 handle,
607 (int)*offset);
608
609 /*
610 * if we got a short I/O operations,
611 * fall out and return what we got so far
612 */
613 if (amt_complete < each_count)
614 break;
615 } /*end while */
616
617 if (total_count > 0)
618 ret = total_count;
619out:
620 if (to_free) {
621 kfree(iovecptr);
622 kfree(seg_array);
623 }
624 if (ret > 0) {
625 if (type == PVFS_IO_READ) {
626 file_accessed(file);
627 } else {
628 SetMtimeFlag(pvfs2_inode);
629 inode->i_mtime = CURRENT_TIME;
630 mark_inode_dirty_sync(inode);
631 }
632 }
633
634 gossip_debug(GOSSIP_FILE_DEBUG,
635 "%s(%pU): Value(%d) returned.\n",
636 __func__,
637 handle,
638 (int)ret);
639
640 return ret;
641}
642
643/*
644 * Read data from a specified offset in a file (referenced by inode).
645 * Data may be placed either in a user or kernel buffer.
646 */
647ssize_t pvfs2_inode_read(struct inode *inode,
648 char __user *buf,
649 size_t count,
650 loff_t *offset,
651 loff_t readahead_size)
652{
653 struct pvfs2_inode_s *pvfs2_inode = PVFS2_I(inode);
654 size_t bufmap_size;
655 struct iovec vec;
Al Viro3c2fcfc2015-10-08 18:00:26 -0400656 struct iov_iter iter;
Mike Marshall5db11c22015-07-17 10:38:12 -0400657 ssize_t ret = -EINVAL;
658
659 g_pvfs2_stats.reads++;
660
661 vec.iov_base = buf;
662 vec.iov_len = count;
663
664 bufmap_size = pvfs_bufmap_size_query();
665 if (count > bufmap_size) {
666 gossip_debug(GOSSIP_FILE_DEBUG,
667 "%s: count is too large (%zd/%zd)!\n",
668 __func__, count, bufmap_size);
669 return -EINVAL;
670 }
671
672 gossip_debug(GOSSIP_FILE_DEBUG,
673 "%s(%pU) %zd@%llu\n",
674 __func__,
675 &pvfs2_inode->refn.khandle,
676 count,
677 llu(*offset));
678
Al Viro3c2fcfc2015-10-08 18:00:26 -0400679 iov_iter_init(&iter, READ, &vec, 1, count);
680 ret = wait_for_direct_io(PVFS_IO_READ, inode, offset, &iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400681 count, readahead_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400682 if (ret > 0)
683 *offset += ret;
684
685 gossip_debug(GOSSIP_FILE_DEBUG,
686 "%s(%pU): Value(%zd) returned.\n",
687 __func__,
688 &pvfs2_inode->refn.khandle,
689 ret);
690
691 return ret;
692}
693
694static ssize_t pvfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
695{
696 struct file *file = iocb->ki_filp;
697 loff_t pos = *(&iocb->ki_pos);
698 ssize_t rc = 0;
699 unsigned long nr_segs = iter->nr_segs;
700
701 BUG_ON(iocb->private);
702
703 gossip_debug(GOSSIP_FILE_DEBUG, "pvfs2_file_read_iter\n");
704
705 g_pvfs2_stats.reads++;
706
707 rc = do_readv_writev(PVFS_IO_READ,
708 file,
709 &pos,
710 iter->iov,
711 nr_segs);
712 iocb->ki_pos = pos;
713
714 return rc;
715}
716
717static ssize_t pvfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
718{
719 struct file *file = iocb->ki_filp;
720 loff_t pos = *(&iocb->ki_pos);
721 unsigned long nr_segs = iter->nr_segs;
722 ssize_t rc;
723
724 BUG_ON(iocb->private);
725
726 gossip_debug(GOSSIP_FILE_DEBUG, "pvfs2_file_write_iter\n");
727
728 mutex_lock(&file->f_mapping->host->i_mutex);
729
730 /* Make sure generic_write_checks sees an up to date inode size. */
731 if (file->f_flags & O_APPEND) {
732 rc = pvfs2_inode_getattr(file->f_mapping->host,
733 PVFS_ATTR_SYS_SIZE);
734 if (rc) {
735 gossip_err("%s: pvfs2_inode_getattr failed, rc:%zd:.\n",
736 __func__, rc);
737 goto out;
738 }
739 }
740
741 if (file->f_pos > i_size_read(file->f_mapping->host))
742 pvfs2_i_size_write(file->f_mapping->host, file->f_pos);
743
744 rc = generic_write_checks(iocb, iter);
745
746 if (rc <= 0) {
747 gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
748 __func__, rc);
749 goto out;
750 }
751
752 rc = do_readv_writev(PVFS_IO_WRITE,
753 file,
754 &pos,
755 iter->iov,
756 nr_segs);
757 if (rc < 0) {
758 gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
759 __func__, rc);
760 goto out;
761 }
762
763 iocb->ki_pos = pos;
764 g_pvfs2_stats.writes++;
765
766out:
767
768 mutex_unlock(&file->f_mapping->host->i_mutex);
769 return rc;
770}
771
772/*
773 * Perform a miscellaneous operation on a file.
774 */
Mike Marshall84d02152015-07-28 13:27:51 -0400775static long pvfs2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Mike Marshall5db11c22015-07-17 10:38:12 -0400776{
777 int ret = -ENOTTY;
778 __u64 val = 0;
779 unsigned long uval;
780
781 gossip_debug(GOSSIP_FILE_DEBUG,
782 "pvfs2_ioctl: called with cmd %d\n",
783 cmd);
784
785 /*
786 * we understand some general ioctls on files, such as the immutable
787 * and append flags
788 */
789 if (cmd == FS_IOC_GETFLAGS) {
790 val = 0;
791 ret = pvfs2_xattr_get_default(file->f_path.dentry,
792 "user.pvfs2.meta_hint",
793 &val,
794 sizeof(val),
795 0);
796 if (ret < 0 && ret != -ENODATA)
797 return ret;
798 else if (ret == -ENODATA)
799 val = 0;
800 uval = val;
801 gossip_debug(GOSSIP_FILE_DEBUG,
802 "pvfs2_ioctl: FS_IOC_GETFLAGS: %llu\n",
803 (unsigned long long)uval);
804 return put_user(uval, (int __user *)arg);
805 } else if (cmd == FS_IOC_SETFLAGS) {
806 ret = 0;
807 if (get_user(uval, (int __user *)arg))
808 return -EFAULT;
809 /*
810 * PVFS_MIRROR_FL is set internally when the mirroring mode
811 * is turned on for a file. The user is not allowed to turn
812 * on this bit, but the bit is present if the user first gets
813 * the flags and then updates the flags with some new
814 * settings. So, we ignore it in the following edit. bligon.
815 */
816 if ((uval & ~PVFS_MIRROR_FL) &
817 (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
818 gossip_err("pvfs2_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
819 return -EINVAL;
820 }
821 val = uval;
822 gossip_debug(GOSSIP_FILE_DEBUG,
823 "pvfs2_ioctl: FS_IOC_SETFLAGS: %llu\n",
824 (unsigned long long)val);
825 ret = pvfs2_xattr_set_default(file->f_path.dentry,
826 "user.pvfs2.meta_hint",
827 &val,
828 sizeof(val),
829 0,
830 0);
831 }
832
833 return ret;
834}
835
836/*
837 * Memory map a region of a file.
838 */
839static int pvfs2_file_mmap(struct file *file, struct vm_area_struct *vma)
840{
841 gossip_debug(GOSSIP_FILE_DEBUG,
842 "pvfs2_file_mmap: called on %s\n",
843 (file ?
844 (char *)file->f_path.dentry->d_name.name :
845 (char *)"Unknown"));
846
847 /* set the sequential readahead hint */
848 vma->vm_flags |= VM_SEQ_READ;
849 vma->vm_flags &= ~VM_RAND_READ;
Martin Brandenburg35390802015-09-30 13:11:54 -0400850
851 /* Use readonly mmap since we cannot support writable maps. */
852 return generic_file_readonly_mmap(file, vma);
Mike Marshall5db11c22015-07-17 10:38:12 -0400853}
854
855#define mapping_nrpages(idata) ((idata)->nrpages)
856
857/*
858 * Called to notify the module that there are no more references to
859 * this file (i.e. no processes have it open).
860 *
861 * \note Not called when each file is closed.
862 */
Mike Marshall84d02152015-07-28 13:27:51 -0400863static int pvfs2_file_release(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -0400864{
865 gossip_debug(GOSSIP_FILE_DEBUG,
866 "pvfs2_file_release: called on %s\n",
867 file->f_path.dentry->d_name.name);
868
869 pvfs2_flush_inode(inode);
870
871 /*
Mike Marshall54804942015-10-05 13:44:24 -0400872 * remove all associated inode pages from the page cache and mmap
873 * readahead cache (if any); this forces an expensive refresh of
874 * data for the next caller of mmap (or 'get_block' accesses)
Mike Marshall5db11c22015-07-17 10:38:12 -0400875 */
876 if (file->f_path.dentry->d_inode &&
877 file->f_path.dentry->d_inode->i_mapping &&
878 mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
879 truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
880 0);
881 return 0;
882}
883
884/*
885 * Push all data for a specific file onto permanent storage.
886 */
Mike Marshall84d02152015-07-28 13:27:51 -0400887static int pvfs2_fsync(struct file *file,
888 loff_t start,
889 loff_t end,
890 int datasync)
Mike Marshall5db11c22015-07-17 10:38:12 -0400891{
892 int ret = -EINVAL;
893 struct pvfs2_inode_s *pvfs2_inode =
894 PVFS2_I(file->f_path.dentry->d_inode);
895 struct pvfs2_kernel_op_s *new_op = NULL;
896
897 /* required call */
898 filemap_write_and_wait_range(file->f_mapping, start, end);
899
900 new_op = op_alloc(PVFS2_VFS_OP_FSYNC);
901 if (!new_op)
902 return -ENOMEM;
903 new_op->upcall.req.fsync.refn = pvfs2_inode->refn;
904
905 ret = service_operation(new_op,
906 "pvfs2_fsync",
907 get_interruptible_flag(file->f_path.dentry->d_inode));
908
909 gossip_debug(GOSSIP_FILE_DEBUG,
910 "pvfs2_fsync got return value of %d\n",
911 ret);
912
913 op_release(new_op);
914
915 pvfs2_flush_inode(file->f_path.dentry->d_inode);
916 return ret;
917}
918
919/*
920 * Change the file pointer position for an instance of an open file.
921 *
922 * \note If .llseek is overriden, we must acquire lock as described in
923 * Documentation/filesystems/Locking.
924 *
925 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
926 * require much changes to the FS
927 */
Mike Marshall84d02152015-07-28 13:27:51 -0400928static loff_t pvfs2_file_llseek(struct file *file, loff_t offset, int origin)
Mike Marshall5db11c22015-07-17 10:38:12 -0400929{
930 int ret = -EINVAL;
931 struct inode *inode = file->f_path.dentry->d_inode;
932
933 if (!inode) {
934 gossip_err("pvfs2_file_llseek: invalid inode (NULL)\n");
935 return ret;
936 }
937
938 if (origin == PVFS2_SEEK_END) {
939 /*
940 * revalidate the inode's file size.
941 * NOTE: We are only interested in file size here,
942 * so we set mask accordingly.
943 */
944 ret = pvfs2_inode_getattr(inode, PVFS_ATTR_SYS_SIZE);
945 if (ret) {
946 gossip_debug(GOSSIP_FILE_DEBUG,
947 "%s:%s:%d calling make bad inode\n",
948 __FILE__,
949 __func__,
950 __LINE__);
951 pvfs2_make_bad_inode(inode);
952 return ret;
953 }
954 }
955
956 gossip_debug(GOSSIP_FILE_DEBUG,
Mike Marshall54804942015-10-05 13:44:24 -0400957 "pvfs2_file_llseek: offset is %ld | origin is %d"
958 " | inode size is %lu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400959 (long)offset,
960 origin,
961 (unsigned long)file->f_path.dentry->d_inode->i_size);
962
963 return generic_file_llseek(file, offset, origin);
964}
965
966/*
967 * Support local locks (locks that only this kernel knows about)
968 * if Orangefs was mounted -o local_lock.
969 */
Mike Marshall84d02152015-07-28 13:27:51 -0400970static int pvfs2_lock(struct file *filp, int cmd, struct file_lock *fl)
Mike Marshall5db11c22015-07-17 10:38:12 -0400971{
Mike Marshallf957ae22015-09-24 12:53:05 -0400972 int rc = -EINVAL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400973
974 if (PVFS2_SB(filp->f_inode->i_sb)->flags & PVFS2_OPT_LOCAL_LOCK) {
975 if (cmd == F_GETLK) {
976 rc = 0;
977 posix_test_lock(filp, fl);
978 } else {
979 rc = posix_lock_file(filp, fl, NULL);
980 }
981 }
982
983 return rc;
984}
985
986/** PVFS2 implementation of VFS file operations */
987const struct file_operations pvfs2_file_operations = {
988 .llseek = pvfs2_file_llseek,
989 .read_iter = pvfs2_file_read_iter,
990 .write_iter = pvfs2_file_write_iter,
991 .lock = pvfs2_lock,
992 .unlocked_ioctl = pvfs2_ioctl,
993 .mmap = pvfs2_file_mmap,
994 .open = generic_file_open,
995 .release = pvfs2_file_release,
996 .fsync = pvfs2_fsync,
997};