blob: 40b38057b826eff4badd9d8bfa517e0dbac93ce0 [file] [log] [blame]
Mike Marshall5db11c22015-07-17 10:38:12 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * See COPYING in top-level directory.
5 */
6
7/*
8 * Linux VFS file operations.
9 */
10
11#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050012#include "orangefs-kernel.h"
13#include "orangefs-bufmap.h"
Mike Marshall5db11c22015-07-17 10:38:12 -040014#include <linux/fs.h>
15#include <linux/pagemap.h>
16
Mike Marshall5db11c22015-07-17 10:38:12 -040017/*
18 * Copy to client-core's address space from the buffers specified
19 * by the iovec upto total_size bytes.
20 * NOTE: the iovector can either contain addresses which
21 * can futher be kernel-space or user-space addresses.
22 * or it can pointers to struct page's
23 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050024static int precopy_buffers(struct orangefs_bufmap *bufmap,
Mike Marshall5db11c22015-07-17 10:38:12 -040025 int buffer_index,
Al Viroa5c126a2015-10-08 17:54:31 -040026 struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040027 size_t total_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040028{
29 int ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -040030 /*
31 * copy data from application/kernel by pulling it out
32 * of the iovec.
33 */
Mike Marshall4d1c4402015-09-04 10:31:16 -040034
35
36 if (total_size) {
Yi Liu8bb8aef2015-11-24 15:12:14 -050037 ret = orangefs_bufmap_copy_from_iovec(bufmap,
38 iter,
39 buffer_index,
40 total_size);
Mike Marshall4d1c4402015-09-04 10:31:16 -040041 if (ret < 0)
42 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
43 __func__,
44 (long)ret);
Mike Marshall4d1c4402015-09-04 10:31:16 -040045 }
46
Mike Marshall5db11c22015-07-17 10:38:12 -040047 if (ret < 0)
48 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
49 __func__,
50 (long)ret);
51 return ret;
52}
53
54/*
55 * Copy from client-core's address space to the buffers specified
56 * by the iovec upto total_size bytes.
57 * NOTE: the iovector can either contain addresses which
58 * can futher be kernel-space or user-space addresses.
59 * or it can pointers to struct page's
60 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050061static int postcopy_buffers(struct orangefs_bufmap *bufmap,
Mike Marshall5db11c22015-07-17 10:38:12 -040062 int buffer_index,
Al Viro5f0e3c92015-10-08 17:52:44 -040063 struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040064 size_t total_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040065{
66 int ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -040067 /*
68 * copy data to application/kernel by pushing it out to
69 * the iovec. NOTE; target buffers can be addresses or
70 * struct page pointers.
71 */
72 if (total_size) {
Yi Liu8bb8aef2015-11-24 15:12:14 -050073 ret = orangefs_bufmap_copy_to_iovec(bufmap,
74 iter,
75 buffer_index,
76 total_size);
Mike Marshall5db11c22015-07-17 10:38:12 -040077 if (ret < 0)
Mike Marshall4d1c4402015-09-04 10:31:16 -040078 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
Mike Marshall5db11c22015-07-17 10:38:12 -040079 __func__,
80 (long)ret);
81 }
82 return ret;
83}
84
85/*
Al Virob0bc3a72016-01-23 13:50:37 -050086 * handles two possible error cases, depending on context.
87 *
88 * by design, our vfs i/o errors need to be handled in one of two ways,
89 * depending on where the error occured.
90 *
91 * if the error happens in the waitqueue code because we either timed
92 * out or a signal was raised while waiting, we need to cancel the
93 * userspace i/o operation and free the op manually. this is done to
94 * avoid having the device start writing application data to our shared
95 * bufmap pages without us expecting it.
96 *
97 * FIXME: POSSIBLE OPTIMIZATION:
98 * However, if we timed out or if we got a signal AND our upcall was never
99 * picked off the queue (i.e. we were in OP_VFS_STATE_WAITING), then we don't
100 * need to send a cancellation upcall. The way we can handle this is
101 * set error_exit to 2 in such cases and 1 whenever cancellation has to be
102 * sent and have handle_error
103 * take care of this situation as well..
104 *
105 * if a orangefs sysint level error occured and i/o has been completed,
106 * there is no need to cancel the operation, as the user has finished
107 * using the bufmap page and so there is no danger in this case. in
108 * this case, we wake up the device normally so that it may free the
109 * op, as normal.
110 *
111 * note the only reason this is a macro is because both read and write
112 * cases need the exact same handling code.
113 */
114#define handle_io_error() \
115do { \
116 if (!op_state_serviced(new_op)) { \
117 orangefs_cancel_op_in_progress(new_op->tag); \
118 } else { \
119 complete(&new_op->done); \
120 } \
121 orangefs_bufmap_put(bufmap, buffer_index); \
122 buffer_index = -1; \
123} while (0)
124
125/*
Mike Marshall5db11c22015-07-17 10:38:12 -0400126 * Post and wait for the I/O upcall to finish
127 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500128static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400129 loff_t *offset, struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400130 size_t total_size, loff_t readahead_size)
Mike Marshall5db11c22015-07-17 10:38:12 -0400131{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500132 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
133 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
134 struct orangefs_bufmap *bufmap = NULL;
135 struct orangefs_kernel_op_s *new_op = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400136 int buffer_index = -1;
137 ssize_t ret;
138
Yi Liu8bb8aef2015-11-24 15:12:14 -0500139 new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
Al Viroed42fe02016-01-22 19:47:47 -0500140 if (!new_op)
141 return -ENOMEM;
142
Mike Marshall5db11c22015-07-17 10:38:12 -0400143 /* synchronous I/O */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500144 new_op->upcall.req.io.async_vfs_io = ORANGEFS_VFS_SYNC_IO;
Mike Marshall5db11c22015-07-17 10:38:12 -0400145 new_op->upcall.req.io.readahead_size = readahead_size;
146 new_op->upcall.req.io.io_type = type;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500147 new_op->upcall.req.io.refn = orangefs_inode->refn;
Mike Marshall5db11c22015-07-17 10:38:12 -0400148
149populate_shared_memory:
150 /* get a shared buffer index */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500151 ret = orangefs_bufmap_get(&bufmap, &buffer_index);
Mike Marshall5db11c22015-07-17 10:38:12 -0400152 if (ret < 0) {
153 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500154 "%s: orangefs_bufmap_get failure (%ld)\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400155 __func__, (long)ret);
156 goto out;
157 }
158 gossip_debug(GOSSIP_FILE_DEBUG,
159 "%s(%pU): GET op %p -> buffer_index %d\n",
160 __func__,
161 handle,
162 new_op,
163 buffer_index);
164
165 new_op->uses_shared_memory = 1;
166 new_op->upcall.req.io.buf_index = buffer_index;
167 new_op->upcall.req.io.count = total_size;
168 new_op->upcall.req.io.offset = *offset;
169
170 gossip_debug(GOSSIP_FILE_DEBUG,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400171 "%s(%pU): offset: %llu total_size: %zd\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400172 __func__,
173 handle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400174 llu(*offset),
175 total_size);
176 /*
177 * Stage 1: copy the buffers into client-core's address space
178 * precopy_buffers only pertains to writes.
179 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500180 if (type == ORANGEFS_IO_WRITE) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400181 ret = precopy_buffers(bufmap,
182 buffer_index,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400183 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400184 total_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400185 if (ret < 0)
186 goto out;
187 }
188
189 gossip_debug(GOSSIP_FILE_DEBUG,
190 "%s(%pU): Calling post_io_request with tag (%llu)\n",
191 __func__,
192 handle,
193 llu(new_op->tag));
194
195 /* Stage 2: Service the I/O operation */
196 ret = service_operation(new_op,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500197 type == ORANGEFS_IO_WRITE ?
Mike Marshall5db11c22015-07-17 10:38:12 -0400198 "file_write" :
199 "file_read",
200 get_interruptible_flag(inode));
201
202 /*
203 * If service_operation() returns -EAGAIN #and# the operation was
Yi Liu8bb8aef2015-11-24 15:12:14 -0500204 * purged from orangefs_request_list or htable_ops_in_progress, then
Mike Marshall5db11c22015-07-17 10:38:12 -0400205 * we know that the client was restarted, causing the shared memory
206 * area to be wiped clean. To restart a write operation in this
207 * case, we must re-copy the data from the user's iovec to a NEW
208 * shared memory location. To restart a read operation, we must get
209 * a new shared memory location.
210 */
211 if (ret == -EAGAIN && op_state_purged(new_op)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500212 orangefs_bufmap_put(bufmap, buffer_index);
Al Viroe17be9f2016-02-06 14:59:38 -0500213 buffer_index = -1;
Mike Marshall5db11c22015-07-17 10:38:12 -0400214 gossip_debug(GOSSIP_FILE_DEBUG,
215 "%s:going to repopulate_shared_memory.\n",
216 __func__);
217 goto populate_shared_memory;
218 }
219
220 if (ret < 0) {
Mike Marshall575e9462015-12-04 12:56:14 -0500221 handle_io_error();
Mike Marshall5db11c22015-07-17 10:38:12 -0400222 /*
Mike Marshall54804942015-10-05 13:44:24 -0400223 * don't write an error to syslog on signaled operation
224 * termination unless we've got debugging turned on, as
225 * this can happen regularly (i.e. ctrl-c)
Mike Marshall5db11c22015-07-17 10:38:12 -0400226 */
227 if (ret == -EINTR)
228 gossip_debug(GOSSIP_FILE_DEBUG,
229 "%s: returning error %ld\n", __func__,
230 (long)ret);
231 else
232 gossip_err("%s: error in %s handle %pU, returning %zd\n",
233 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500234 type == ORANGEFS_IO_READ ?
Mike Marshall5db11c22015-07-17 10:38:12 -0400235 "read from" : "write to",
236 handle, ret);
237 goto out;
238 }
239
240 /*
241 * Stage 3: Post copy buffers from client-core's address space
242 * postcopy_buffers only pertains to reads.
243 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500244 if (type == ORANGEFS_IO_READ) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400245 ret = postcopy_buffers(bufmap,
246 buffer_index,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400247 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400248 new_op->downcall.resp.io.amt_complete);
Mike Marshall5db11c22015-07-17 10:38:12 -0400249 if (ret < 0) {
250 /*
251 * put error codes in downcall so that handle_io_error()
252 * preserves it properly
253 */
Mike Marshall6ebcc3f2016-02-04 16:28:31 -0500254 WARN_ON(!op_state_serviced(new_op));
Mike Marshall5db11c22015-07-17 10:38:12 -0400255 new_op->downcall.status = ret;
256 handle_io_error();
257 goto out;
258 }
259 }
260 gossip_debug(GOSSIP_FILE_DEBUG,
261 "%s(%pU): Amount written as returned by the sys-io call:%d\n",
262 __func__,
263 handle,
264 (int)new_op->downcall.resp.io.amt_complete);
265
266 ret = new_op->downcall.resp.io.amt_complete;
267
268 /*
Mike Marshall54804942015-10-05 13:44:24 -0400269 * tell the device file owner waiting on I/O that this read has
Al Viroed42fe02016-01-22 19:47:47 -0500270 * completed and it can return now.
Mike Marshall5db11c22015-07-17 10:38:12 -0400271 */
Al Virob0bc3a72016-01-23 13:50:37 -0500272 complete(&new_op->done);
Mike Marshall5db11c22015-07-17 10:38:12 -0400273
274out:
275 if (buffer_index >= 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500276 orangefs_bufmap_put(bufmap, buffer_index);
Mike Marshall5db11c22015-07-17 10:38:12 -0400277 gossip_debug(GOSSIP_FILE_DEBUG,
278 "%s(%pU): PUT buffer_index %d\n",
279 __func__, handle, buffer_index);
280 buffer_index = -1;
281 }
Al Viroed42fe02016-01-22 19:47:47 -0500282 op_release(new_op);
Mike Marshall5db11c22015-07-17 10:38:12 -0400283 return ret;
284}
285
286/*
Mike Marshall5db11c22015-07-17 10:38:12 -0400287 * Common entry point for read/write/readv/writev
288 * This function will dispatch it to either the direct I/O
289 * or buffered I/O path depending on the mount options and/or
290 * augmented/extended metadata attached to the file.
291 * Note: File extended attributes override any mount options.
292 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500293static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file,
Al Viro0071ed12015-10-08 18:22:08 -0400294 loff_t *offset, struct iov_iter *iter)
Mike Marshall5db11c22015-07-17 10:38:12 -0400295{
296 struct inode *inode = file->f_mapping->host;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500297 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
298 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
Al Viro0071ed12015-10-08 18:22:08 -0400299 size_t count = iov_iter_count(iter);
Al Virodc4067f2015-10-08 18:17:26 -0400300 ssize_t total_count = 0;
301 ssize_t ret = -EINVAL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400302
303 gossip_debug(GOSSIP_FILE_DEBUG,
304 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
305 __func__,
306 handle,
307 (int)count);
308
Yi Liu8bb8aef2015-11-24 15:12:14 -0500309 if (type == ORANGEFS_IO_WRITE) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400310 gossip_debug(GOSSIP_FILE_DEBUG,
311 "%s(%pU): proceeding with offset : %llu, "
312 "size %d\n",
313 __func__,
314 handle,
315 llu(*offset),
316 (int)count);
317 }
318
319 if (count == 0) {
320 ret = 0;
321 goto out;
322 }
323
Al Viro0071ed12015-10-08 18:22:08 -0400324 while (iov_iter_count(iter)) {
325 size_t each_count = iov_iter_count(iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400326 size_t amt_complete;
327
328 /* how much to transfer in this loop iteration */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500329 if (each_count > orangefs_bufmap_size_query())
330 each_count = orangefs_bufmap_size_query();
Mike Marshall5db11c22015-07-17 10:38:12 -0400331
332 gossip_debug(GOSSIP_FILE_DEBUG,
333 "%s(%pU): size of each_count(%d)\n",
334 __func__,
335 handle,
336 (int)each_count);
337 gossip_debug(GOSSIP_FILE_DEBUG,
338 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
339 __func__,
340 handle,
341 (int)*offset);
342
Al Viro0071ed12015-10-08 18:22:08 -0400343 ret = wait_for_direct_io(type, inode, offset, iter,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400344 each_count, 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400345 gossip_debug(GOSSIP_FILE_DEBUG,
346 "%s(%pU): return from wait_for_io:%d\n",
347 __func__,
348 handle,
349 (int)ret);
350
351 if (ret < 0)
352 goto out;
353
Mike Marshall5db11c22015-07-17 10:38:12 -0400354 *offset += ret;
355 total_count += ret;
356 amt_complete = ret;
357
358 gossip_debug(GOSSIP_FILE_DEBUG,
359 "%s(%pU): AFTER wait_for_io: offset is %d\n",
360 __func__,
361 handle,
362 (int)*offset);
363
364 /*
365 * if we got a short I/O operations,
366 * fall out and return what we got so far
367 */
368 if (amt_complete < each_count)
369 break;
370 } /*end while */
371
372 if (total_count > 0)
373 ret = total_count;
374out:
Mike Marshall5db11c22015-07-17 10:38:12 -0400375 if (ret > 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500376 if (type == ORANGEFS_IO_READ) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400377 file_accessed(file);
378 } else {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500379 SetMtimeFlag(orangefs_inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400380 inode->i_mtime = CURRENT_TIME;
381 mark_inode_dirty_sync(inode);
382 }
383 }
384
385 gossip_debug(GOSSIP_FILE_DEBUG,
386 "%s(%pU): Value(%d) returned.\n",
387 __func__,
388 handle,
389 (int)ret);
390
391 return ret;
392}
393
394/*
395 * Read data from a specified offset in a file (referenced by inode).
396 * Data may be placed either in a user or kernel buffer.
397 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500398ssize_t orangefs_inode_read(struct inode *inode,
399 struct iov_iter *iter,
400 loff_t *offset,
401 loff_t readahead_size)
Mike Marshall5db11c22015-07-17 10:38:12 -0400402{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500403 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
Al Viro74f68fc2015-10-08 18:31:05 -0400404 size_t count = iov_iter_count(iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400405 size_t bufmap_size;
Mike Marshall5db11c22015-07-17 10:38:12 -0400406 ssize_t ret = -EINVAL;
407
Yi Liu8bb8aef2015-11-24 15:12:14 -0500408 g_orangefs_stats.reads++;
Mike Marshall5db11c22015-07-17 10:38:12 -0400409
Yi Liu8bb8aef2015-11-24 15:12:14 -0500410 bufmap_size = orangefs_bufmap_size_query();
Mike Marshall5db11c22015-07-17 10:38:12 -0400411 if (count > bufmap_size) {
412 gossip_debug(GOSSIP_FILE_DEBUG,
413 "%s: count is too large (%zd/%zd)!\n",
414 __func__, count, bufmap_size);
415 return -EINVAL;
416 }
417
418 gossip_debug(GOSSIP_FILE_DEBUG,
419 "%s(%pU) %zd@%llu\n",
420 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500421 &orangefs_inode->refn.khandle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400422 count,
423 llu(*offset));
424
Yi Liu8bb8aef2015-11-24 15:12:14 -0500425 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400426 count, readahead_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400427 if (ret > 0)
428 *offset += ret;
429
430 gossip_debug(GOSSIP_FILE_DEBUG,
431 "%s(%pU): Value(%zd) returned.\n",
432 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500433 &orangefs_inode->refn.khandle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400434 ret);
435
436 return ret;
437}
438
Yi Liu8bb8aef2015-11-24 15:12:14 -0500439static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
Mike Marshall5db11c22015-07-17 10:38:12 -0400440{
441 struct file *file = iocb->ki_filp;
442 loff_t pos = *(&iocb->ki_pos);
443 ssize_t rc = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -0400444
445 BUG_ON(iocb->private);
446
Yi Liu8bb8aef2015-11-24 15:12:14 -0500447 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400448
Yi Liu8bb8aef2015-11-24 15:12:14 -0500449 g_orangefs_stats.reads++;
Mike Marshall5db11c22015-07-17 10:38:12 -0400450
Yi Liu8bb8aef2015-11-24 15:12:14 -0500451 rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400452 iocb->ki_pos = pos;
453
454 return rc;
455}
456
Yi Liu8bb8aef2015-11-24 15:12:14 -0500457static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
Mike Marshall5db11c22015-07-17 10:38:12 -0400458{
459 struct file *file = iocb->ki_filp;
Mike Marshall3f1b6942015-11-13 13:05:11 -0500460 loff_t pos;
Mike Marshall5db11c22015-07-17 10:38:12 -0400461 ssize_t rc;
462
463 BUG_ON(iocb->private);
464
Yi Liu8bb8aef2015-11-24 15:12:14 -0500465 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400466
467 mutex_lock(&file->f_mapping->host->i_mutex);
468
469 /* Make sure generic_write_checks sees an up to date inode size. */
470 if (file->f_flags & O_APPEND) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500471 rc = orangefs_inode_getattr(file->f_mapping->host,
Martin Brandenburg99109822016-01-28 10:19:40 -0500472 ORANGEFS_ATTR_SYS_SIZE, 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400473 if (rc) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500474 gossip_err("%s: orangefs_inode_getattr failed, rc:%zd:.\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400475 __func__, rc);
476 goto out;
477 }
478 }
479
480 if (file->f_pos > i_size_read(file->f_mapping->host))
Yi Liu8bb8aef2015-11-24 15:12:14 -0500481 orangefs_i_size_write(file->f_mapping->host, file->f_pos);
Mike Marshall5db11c22015-07-17 10:38:12 -0400482
483 rc = generic_write_checks(iocb, iter);
484
485 if (rc <= 0) {
486 gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
487 __func__, rc);
488 goto out;
489 }
490
Mike Marshall3f1b6942015-11-13 13:05:11 -0500491 /*
492 * if we are appending, generic_write_checks would have updated
493 * pos to the end of the file, so we will wait till now to set
494 * pos...
495 */
496 pos = *(&iocb->ki_pos);
497
Yi Liu8bb8aef2015-11-24 15:12:14 -0500498 rc = do_readv_writev(ORANGEFS_IO_WRITE,
Mike Marshall5db11c22015-07-17 10:38:12 -0400499 file,
500 &pos,
Al Viro0071ed12015-10-08 18:22:08 -0400501 iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400502 if (rc < 0) {
503 gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
504 __func__, rc);
505 goto out;
506 }
507
508 iocb->ki_pos = pos;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500509 g_orangefs_stats.writes++;
Mike Marshall5db11c22015-07-17 10:38:12 -0400510
511out:
512
513 mutex_unlock(&file->f_mapping->host->i_mutex);
514 return rc;
515}
516
517/*
518 * Perform a miscellaneous operation on a file.
519 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500520static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Mike Marshall5db11c22015-07-17 10:38:12 -0400521{
522 int ret = -ENOTTY;
523 __u64 val = 0;
524 unsigned long uval;
525
526 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500527 "orangefs_ioctl: called with cmd %d\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400528 cmd);
529
530 /*
531 * we understand some general ioctls on files, such as the immutable
532 * and append flags
533 */
534 if (cmd == FS_IOC_GETFLAGS) {
535 val = 0;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500536 ret = orangefs_inode_getxattr(file_inode(file),
537 ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
538 "user.pvfs2.meta_hint",
539 &val, sizeof(val));
Mike Marshall5db11c22015-07-17 10:38:12 -0400540 if (ret < 0 && ret != -ENODATA)
541 return ret;
542 else if (ret == -ENODATA)
543 val = 0;
544 uval = val;
545 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500546 "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400547 (unsigned long long)uval);
548 return put_user(uval, (int __user *)arg);
549 } else if (cmd == FS_IOC_SETFLAGS) {
550 ret = 0;
551 if (get_user(uval, (int __user *)arg))
552 return -EFAULT;
553 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500554 * ORANGEFS_MIRROR_FL is set internally when the mirroring mode
Mike Marshall5db11c22015-07-17 10:38:12 -0400555 * is turned on for a file. The user is not allowed to turn
556 * on this bit, but the bit is present if the user first gets
557 * the flags and then updates the flags with some new
558 * settings. So, we ignore it in the following edit. bligon.
559 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500560 if ((uval & ~ORANGEFS_MIRROR_FL) &
Mike Marshall5db11c22015-07-17 10:38:12 -0400561 (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500562 gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400563 return -EINVAL;
564 }
565 val = uval;
566 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500567 "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400568 (unsigned long long)val);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500569 ret = orangefs_inode_setxattr(file_inode(file),
570 ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
571 "user.pvfs2.meta_hint",
572 &val, sizeof(val), 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400573 }
574
575 return ret;
576}
577
578/*
579 * Memory map a region of a file.
580 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500581static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
Mike Marshall5db11c22015-07-17 10:38:12 -0400582{
583 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500584 "orangefs_file_mmap: called on %s\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400585 (file ?
586 (char *)file->f_path.dentry->d_name.name :
587 (char *)"Unknown"));
588
589 /* set the sequential readahead hint */
590 vma->vm_flags |= VM_SEQ_READ;
591 vma->vm_flags &= ~VM_RAND_READ;
Martin Brandenburg35390802015-09-30 13:11:54 -0400592
593 /* Use readonly mmap since we cannot support writable maps. */
594 return generic_file_readonly_mmap(file, vma);
Mike Marshall5db11c22015-07-17 10:38:12 -0400595}
596
597#define mapping_nrpages(idata) ((idata)->nrpages)
598
599/*
600 * Called to notify the module that there are no more references to
601 * this file (i.e. no processes have it open).
602 *
603 * \note Not called when each file is closed.
604 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500605static int orangefs_file_release(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -0400606{
607 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500608 "orangefs_file_release: called on %s\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400609 file->f_path.dentry->d_name.name);
610
Yi Liu8bb8aef2015-11-24 15:12:14 -0500611 orangefs_flush_inode(inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400612
613 /*
Mike Marshall54804942015-10-05 13:44:24 -0400614 * remove all associated inode pages from the page cache and mmap
615 * readahead cache (if any); this forces an expensive refresh of
616 * data for the next caller of mmap (or 'get_block' accesses)
Mike Marshall5db11c22015-07-17 10:38:12 -0400617 */
618 if (file->f_path.dentry->d_inode &&
619 file->f_path.dentry->d_inode->i_mapping &&
620 mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
621 truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
622 0);
623 return 0;
624}
625
626/*
627 * Push all data for a specific file onto permanent storage.
628 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500629static int orangefs_fsync(struct file *file,
Mike Marshall84d02152015-07-28 13:27:51 -0400630 loff_t start,
631 loff_t end,
632 int datasync)
Mike Marshall5db11c22015-07-17 10:38:12 -0400633{
634 int ret = -EINVAL;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500635 struct orangefs_inode_s *orangefs_inode =
636 ORANGEFS_I(file->f_path.dentry->d_inode);
637 struct orangefs_kernel_op_s *new_op = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400638
639 /* required call */
640 filemap_write_and_wait_range(file->f_mapping, start, end);
641
Yi Liu8bb8aef2015-11-24 15:12:14 -0500642 new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
Mike Marshall5db11c22015-07-17 10:38:12 -0400643 if (!new_op)
644 return -ENOMEM;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500645 new_op->upcall.req.fsync.refn = orangefs_inode->refn;
Mike Marshall5db11c22015-07-17 10:38:12 -0400646
647 ret = service_operation(new_op,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500648 "orangefs_fsync",
Mike Marshall5db11c22015-07-17 10:38:12 -0400649 get_interruptible_flag(file->f_path.dentry->d_inode));
650
651 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500652 "orangefs_fsync got return value of %d\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400653 ret);
654
655 op_release(new_op);
656
Yi Liu8bb8aef2015-11-24 15:12:14 -0500657 orangefs_flush_inode(file->f_path.dentry->d_inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400658 return ret;
659}
660
661/*
662 * Change the file pointer position for an instance of an open file.
663 *
664 * \note If .llseek is overriden, we must acquire lock as described in
665 * Documentation/filesystems/Locking.
666 *
667 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
668 * require much changes to the FS
669 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500670static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
Mike Marshall5db11c22015-07-17 10:38:12 -0400671{
672 int ret = -EINVAL;
673 struct inode *inode = file->f_path.dentry->d_inode;
674
675 if (!inode) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500676 gossip_err("orangefs_file_llseek: invalid inode (NULL)\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400677 return ret;
678 }
679
Yi Liu8bb8aef2015-11-24 15:12:14 -0500680 if (origin == ORANGEFS_SEEK_END) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400681 /*
682 * revalidate the inode's file size.
683 * NOTE: We are only interested in file size here,
684 * so we set mask accordingly.
685 */
Martin Brandenburg99109822016-01-28 10:19:40 -0500686 ret = orangefs_inode_getattr(inode, ORANGEFS_ATTR_SYS_SIZE, 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400687 if (ret) {
688 gossip_debug(GOSSIP_FILE_DEBUG,
689 "%s:%s:%d calling make bad inode\n",
690 __FILE__,
691 __func__,
692 __LINE__);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500693 orangefs_make_bad_inode(inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400694 return ret;
695 }
696 }
697
698 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500699 "orangefs_file_llseek: offset is %ld | origin is %d"
Mike Marshall54804942015-10-05 13:44:24 -0400700 " | inode size is %lu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400701 (long)offset,
702 origin,
703 (unsigned long)file->f_path.dentry->d_inode->i_size);
704
705 return generic_file_llseek(file, offset, origin);
706}
707
708/*
709 * Support local locks (locks that only this kernel knows about)
710 * if Orangefs was mounted -o local_lock.
711 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500712static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
Mike Marshall5db11c22015-07-17 10:38:12 -0400713{
Mike Marshallf957ae22015-09-24 12:53:05 -0400714 int rc = -EINVAL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400715
Yi Liu8bb8aef2015-11-24 15:12:14 -0500716 if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400717 if (cmd == F_GETLK) {
718 rc = 0;
719 posix_test_lock(filp, fl);
720 } else {
721 rc = posix_lock_file(filp, fl, NULL);
722 }
723 }
724
725 return rc;
726}
727
Yi Liu8bb8aef2015-11-24 15:12:14 -0500728/** ORANGEFS implementation of VFS file operations */
729const struct file_operations orangefs_file_operations = {
730 .llseek = orangefs_file_llseek,
731 .read_iter = orangefs_file_read_iter,
732 .write_iter = orangefs_file_write_iter,
733 .lock = orangefs_lock,
734 .unlocked_ioctl = orangefs_ioctl,
735 .mmap = orangefs_file_mmap,
Mike Marshall5db11c22015-07-17 10:38:12 -0400736 .open = generic_file_open,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500737 .release = orangefs_file_release,
738 .fsync = orangefs_fsync,
Mike Marshall5db11c22015-07-17 10:38:12 -0400739};