blob: 171013ae00363209517bacbe5e445232f0f63a28 [file] [log] [blame]
Mike Marshall5db11c22015-07-17 10:38:12 -04001/*
2 * (C) 2001 Clemson University and The University of Chicago
3 *
4 * See COPYING in top-level directory.
5 */
6
7/*
8 * Linux VFS file operations.
9 */
10
11#include "protocol.h"
Mike Marshall575e9462015-12-04 12:56:14 -050012#include "orangefs-kernel.h"
13#include "orangefs-bufmap.h"
Mike Marshall5db11c22015-07-17 10:38:12 -040014#include <linux/fs.h>
15#include <linux/pagemap.h>
16
17#define wake_up_daemon_for_return(op) \
18do { \
19 spin_lock(&op->lock); \
20 op->io_completed = 1; \
21 spin_unlock(&op->lock); \
22 wake_up_interruptible(&op->io_completion_waitq);\
23} while (0)
24
25/*
26 * Copy to client-core's address space from the buffers specified
27 * by the iovec upto total_size bytes.
28 * NOTE: the iovector can either contain addresses which
29 * can futher be kernel-space or user-space addresses.
30 * or it can pointers to struct page's
31 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050032static int precopy_buffers(struct orangefs_bufmap *bufmap,
Mike Marshall5db11c22015-07-17 10:38:12 -040033 int buffer_index,
Al Viroa5c126a2015-10-08 17:54:31 -040034 struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040035 size_t total_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040036{
37 int ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -040038 /*
39 * copy data from application/kernel by pulling it out
40 * of the iovec.
41 */
Mike Marshall4d1c4402015-09-04 10:31:16 -040042
43
44 if (total_size) {
Yi Liu8bb8aef2015-11-24 15:12:14 -050045 ret = orangefs_bufmap_copy_from_iovec(bufmap,
46 iter,
47 buffer_index,
48 total_size);
Mike Marshall4d1c4402015-09-04 10:31:16 -040049 if (ret < 0)
50 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
51 __func__,
52 (long)ret);
Mike Marshall4d1c4402015-09-04 10:31:16 -040053 }
54
Mike Marshall5db11c22015-07-17 10:38:12 -040055 if (ret < 0)
56 gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
57 __func__,
58 (long)ret);
59 return ret;
60}
61
62/*
63 * Copy from client-core's address space to the buffers specified
64 * by the iovec upto total_size bytes.
65 * NOTE: the iovector can either contain addresses which
66 * can futher be kernel-space or user-space addresses.
67 * or it can pointers to struct page's
68 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050069static int postcopy_buffers(struct orangefs_bufmap *bufmap,
Mike Marshall5db11c22015-07-17 10:38:12 -040070 int buffer_index,
Al Viro5f0e3c92015-10-08 17:52:44 -040071 struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040072 size_t total_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040073{
74 int ret = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -040075 /*
76 * copy data to application/kernel by pushing it out to
77 * the iovec. NOTE; target buffers can be addresses or
78 * struct page pointers.
79 */
80 if (total_size) {
Yi Liu8bb8aef2015-11-24 15:12:14 -050081 ret = orangefs_bufmap_copy_to_iovec(bufmap,
82 iter,
83 buffer_index,
84 total_size);
Mike Marshall5db11c22015-07-17 10:38:12 -040085 if (ret < 0)
Mike Marshall4d1c4402015-09-04 10:31:16 -040086 gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
Mike Marshall5db11c22015-07-17 10:38:12 -040087 __func__,
88 (long)ret);
89 }
90 return ret;
91}
92
93/*
94 * Post and wait for the I/O upcall to finish
95 */
Yi Liu8bb8aef2015-11-24 15:12:14 -050096static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
Al Viro3c2fcfc2015-10-08 18:00:26 -040097 loff_t *offset, struct iov_iter *iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -040098 size_t total_size, loff_t readahead_size)
Mike Marshall5db11c22015-07-17 10:38:12 -040099{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500100 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
101 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
102 struct orangefs_bufmap *bufmap = NULL;
103 struct orangefs_kernel_op_s *new_op = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400104 int buffer_index = -1;
105 ssize_t ret;
106
Yi Liu8bb8aef2015-11-24 15:12:14 -0500107 new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
Mike Marshall5db11c22015-07-17 10:38:12 -0400108 if (!new_op) {
109 ret = -ENOMEM;
110 goto out;
111 }
112 /* synchronous I/O */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500113 new_op->upcall.req.io.async_vfs_io = ORANGEFS_VFS_SYNC_IO;
Mike Marshall5db11c22015-07-17 10:38:12 -0400114 new_op->upcall.req.io.readahead_size = readahead_size;
115 new_op->upcall.req.io.io_type = type;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500116 new_op->upcall.req.io.refn = orangefs_inode->refn;
Mike Marshall5db11c22015-07-17 10:38:12 -0400117
118populate_shared_memory:
119 /* get a shared buffer index */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500120 ret = orangefs_bufmap_get(&bufmap, &buffer_index);
Mike Marshall5db11c22015-07-17 10:38:12 -0400121 if (ret < 0) {
122 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500123 "%s: orangefs_bufmap_get failure (%ld)\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400124 __func__, (long)ret);
125 goto out;
126 }
127 gossip_debug(GOSSIP_FILE_DEBUG,
128 "%s(%pU): GET op %p -> buffer_index %d\n",
129 __func__,
130 handle,
131 new_op,
132 buffer_index);
133
134 new_op->uses_shared_memory = 1;
135 new_op->upcall.req.io.buf_index = buffer_index;
136 new_op->upcall.req.io.count = total_size;
137 new_op->upcall.req.io.offset = *offset;
138
139 gossip_debug(GOSSIP_FILE_DEBUG,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400140 "%s(%pU): offset: %llu total_size: %zd\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400141 __func__,
142 handle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400143 llu(*offset),
144 total_size);
145 /*
146 * Stage 1: copy the buffers into client-core's address space
147 * precopy_buffers only pertains to writes.
148 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500149 if (type == ORANGEFS_IO_WRITE) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400150 ret = precopy_buffers(bufmap,
151 buffer_index,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400152 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400153 total_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400154 if (ret < 0)
155 goto out;
156 }
157
158 gossip_debug(GOSSIP_FILE_DEBUG,
159 "%s(%pU): Calling post_io_request with tag (%llu)\n",
160 __func__,
161 handle,
162 llu(new_op->tag));
163
164 /* Stage 2: Service the I/O operation */
165 ret = service_operation(new_op,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500166 type == ORANGEFS_IO_WRITE ?
Mike Marshall5db11c22015-07-17 10:38:12 -0400167 "file_write" :
168 "file_read",
169 get_interruptible_flag(inode));
170
171 /*
172 * If service_operation() returns -EAGAIN #and# the operation was
Yi Liu8bb8aef2015-11-24 15:12:14 -0500173 * purged from orangefs_request_list or htable_ops_in_progress, then
Mike Marshall5db11c22015-07-17 10:38:12 -0400174 * we know that the client was restarted, causing the shared memory
175 * area to be wiped clean. To restart a write operation in this
176 * case, we must re-copy the data from the user's iovec to a NEW
177 * shared memory location. To restart a read operation, we must get
178 * a new shared memory location.
179 */
180 if (ret == -EAGAIN && op_state_purged(new_op)) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500181 orangefs_bufmap_put(bufmap, buffer_index);
Mike Marshall5db11c22015-07-17 10:38:12 -0400182 gossip_debug(GOSSIP_FILE_DEBUG,
183 "%s:going to repopulate_shared_memory.\n",
184 __func__);
185 goto populate_shared_memory;
186 }
187
188 if (ret < 0) {
Mike Marshall575e9462015-12-04 12:56:14 -0500189 handle_io_error();
Mike Marshall5db11c22015-07-17 10:38:12 -0400190 /*
Mike Marshall54804942015-10-05 13:44:24 -0400191 * don't write an error to syslog on signaled operation
192 * termination unless we've got debugging turned on, as
193 * this can happen regularly (i.e. ctrl-c)
Mike Marshall5db11c22015-07-17 10:38:12 -0400194 */
195 if (ret == -EINTR)
196 gossip_debug(GOSSIP_FILE_DEBUG,
197 "%s: returning error %ld\n", __func__,
198 (long)ret);
199 else
200 gossip_err("%s: error in %s handle %pU, returning %zd\n",
201 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500202 type == ORANGEFS_IO_READ ?
Mike Marshall5db11c22015-07-17 10:38:12 -0400203 "read from" : "write to",
204 handle, ret);
205 goto out;
206 }
207
208 /*
209 * Stage 3: Post copy buffers from client-core's address space
210 * postcopy_buffers only pertains to reads.
211 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500212 if (type == ORANGEFS_IO_READ) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400213 ret = postcopy_buffers(bufmap,
214 buffer_index,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400215 iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400216 new_op->downcall.resp.io.amt_complete);
Mike Marshall5db11c22015-07-17 10:38:12 -0400217 if (ret < 0) {
218 /*
219 * put error codes in downcall so that handle_io_error()
220 * preserves it properly
221 */
222 new_op->downcall.status = ret;
223 handle_io_error();
224 goto out;
225 }
226 }
227 gossip_debug(GOSSIP_FILE_DEBUG,
228 "%s(%pU): Amount written as returned by the sys-io call:%d\n",
229 __func__,
230 handle,
231 (int)new_op->downcall.resp.io.amt_complete);
232
233 ret = new_op->downcall.resp.io.amt_complete;
234
235 /*
Mike Marshall54804942015-10-05 13:44:24 -0400236 * tell the device file owner waiting on I/O that this read has
237 * completed and it can return now. in this exact case, on
238 * wakeup the daemon will free the op, so we *cannot* touch it
239 * after this.
Mike Marshall5db11c22015-07-17 10:38:12 -0400240 */
241 wake_up_daemon_for_return(new_op);
242 new_op = NULL;
243
244out:
245 if (buffer_index >= 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500246 orangefs_bufmap_put(bufmap, buffer_index);
Mike Marshall5db11c22015-07-17 10:38:12 -0400247 gossip_debug(GOSSIP_FILE_DEBUG,
248 "%s(%pU): PUT buffer_index %d\n",
249 __func__, handle, buffer_index);
250 buffer_index = -1;
251 }
252 if (new_op) {
253 op_release(new_op);
254 new_op = NULL;
255 }
256 return ret;
257}
258
259/*
Mike Marshall5db11c22015-07-17 10:38:12 -0400260 * Common entry point for read/write/readv/writev
261 * This function will dispatch it to either the direct I/O
262 * or buffered I/O path depending on the mount options and/or
263 * augmented/extended metadata attached to the file.
264 * Note: File extended attributes override any mount options.
265 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500266static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file,
Al Viro0071ed12015-10-08 18:22:08 -0400267 loff_t *offset, struct iov_iter *iter)
Mike Marshall5db11c22015-07-17 10:38:12 -0400268{
269 struct inode *inode = file->f_mapping->host;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500270 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
271 struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
Al Viro0071ed12015-10-08 18:22:08 -0400272 size_t count = iov_iter_count(iter);
Al Virodc4067f2015-10-08 18:17:26 -0400273 ssize_t total_count = 0;
274 ssize_t ret = -EINVAL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400275
276 gossip_debug(GOSSIP_FILE_DEBUG,
277 "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
278 __func__,
279 handle,
280 (int)count);
281
Yi Liu8bb8aef2015-11-24 15:12:14 -0500282 if (type == ORANGEFS_IO_WRITE) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400283 gossip_debug(GOSSIP_FILE_DEBUG,
284 "%s(%pU): proceeding with offset : %llu, "
285 "size %d\n",
286 __func__,
287 handle,
288 llu(*offset),
289 (int)count);
290 }
291
292 if (count == 0) {
293 ret = 0;
294 goto out;
295 }
296
Al Viro0071ed12015-10-08 18:22:08 -0400297 while (iov_iter_count(iter)) {
298 size_t each_count = iov_iter_count(iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400299 size_t amt_complete;
300
301 /* how much to transfer in this loop iteration */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500302 if (each_count > orangefs_bufmap_size_query())
303 each_count = orangefs_bufmap_size_query();
Mike Marshall5db11c22015-07-17 10:38:12 -0400304
305 gossip_debug(GOSSIP_FILE_DEBUG,
306 "%s(%pU): size of each_count(%d)\n",
307 __func__,
308 handle,
309 (int)each_count);
310 gossip_debug(GOSSIP_FILE_DEBUG,
311 "%s(%pU): BEFORE wait_for_io: offset is %d\n",
312 __func__,
313 handle,
314 (int)*offset);
315
Al Viro0071ed12015-10-08 18:22:08 -0400316 ret = wait_for_direct_io(type, inode, offset, iter,
Al Viro3c2fcfc2015-10-08 18:00:26 -0400317 each_count, 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400318 gossip_debug(GOSSIP_FILE_DEBUG,
319 "%s(%pU): return from wait_for_io:%d\n",
320 __func__,
321 handle,
322 (int)ret);
323
324 if (ret < 0)
325 goto out;
326
Mike Marshall5db11c22015-07-17 10:38:12 -0400327 *offset += ret;
328 total_count += ret;
329 amt_complete = ret;
330
331 gossip_debug(GOSSIP_FILE_DEBUG,
332 "%s(%pU): AFTER wait_for_io: offset is %d\n",
333 __func__,
334 handle,
335 (int)*offset);
336
337 /*
338 * if we got a short I/O operations,
339 * fall out and return what we got so far
340 */
341 if (amt_complete < each_count)
342 break;
343 } /*end while */
344
345 if (total_count > 0)
346 ret = total_count;
347out:
Mike Marshall5db11c22015-07-17 10:38:12 -0400348 if (ret > 0) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500349 if (type == ORANGEFS_IO_READ) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400350 file_accessed(file);
351 } else {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500352 SetMtimeFlag(orangefs_inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400353 inode->i_mtime = CURRENT_TIME;
354 mark_inode_dirty_sync(inode);
355 }
356 }
357
358 gossip_debug(GOSSIP_FILE_DEBUG,
359 "%s(%pU): Value(%d) returned.\n",
360 __func__,
361 handle,
362 (int)ret);
363
364 return ret;
365}
366
367/*
368 * Read data from a specified offset in a file (referenced by inode).
369 * Data may be placed either in a user or kernel buffer.
370 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500371ssize_t orangefs_inode_read(struct inode *inode,
372 struct iov_iter *iter,
373 loff_t *offset,
374 loff_t readahead_size)
Mike Marshall5db11c22015-07-17 10:38:12 -0400375{
Yi Liu8bb8aef2015-11-24 15:12:14 -0500376 struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
Al Viro74f68fc2015-10-08 18:31:05 -0400377 size_t count = iov_iter_count(iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400378 size_t bufmap_size;
Mike Marshall5db11c22015-07-17 10:38:12 -0400379 ssize_t ret = -EINVAL;
380
Yi Liu8bb8aef2015-11-24 15:12:14 -0500381 g_orangefs_stats.reads++;
Mike Marshall5db11c22015-07-17 10:38:12 -0400382
Yi Liu8bb8aef2015-11-24 15:12:14 -0500383 bufmap_size = orangefs_bufmap_size_query();
Mike Marshall5db11c22015-07-17 10:38:12 -0400384 if (count > bufmap_size) {
385 gossip_debug(GOSSIP_FILE_DEBUG,
386 "%s: count is too large (%zd/%zd)!\n",
387 __func__, count, bufmap_size);
388 return -EINVAL;
389 }
390
391 gossip_debug(GOSSIP_FILE_DEBUG,
392 "%s(%pU) %zd@%llu\n",
393 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500394 &orangefs_inode->refn.khandle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400395 count,
396 llu(*offset));
397
Yi Liu8bb8aef2015-11-24 15:12:14 -0500398 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter,
Mike Marshall4d1c4402015-09-04 10:31:16 -0400399 count, readahead_size);
Mike Marshall5db11c22015-07-17 10:38:12 -0400400 if (ret > 0)
401 *offset += ret;
402
403 gossip_debug(GOSSIP_FILE_DEBUG,
404 "%s(%pU): Value(%zd) returned.\n",
405 __func__,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500406 &orangefs_inode->refn.khandle,
Mike Marshall5db11c22015-07-17 10:38:12 -0400407 ret);
408
409 return ret;
410}
411
Yi Liu8bb8aef2015-11-24 15:12:14 -0500412static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
Mike Marshall5db11c22015-07-17 10:38:12 -0400413{
414 struct file *file = iocb->ki_filp;
415 loff_t pos = *(&iocb->ki_pos);
416 ssize_t rc = 0;
Mike Marshall5db11c22015-07-17 10:38:12 -0400417
418 BUG_ON(iocb->private);
419
Yi Liu8bb8aef2015-11-24 15:12:14 -0500420 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400421
Yi Liu8bb8aef2015-11-24 15:12:14 -0500422 g_orangefs_stats.reads++;
Mike Marshall5db11c22015-07-17 10:38:12 -0400423
Yi Liu8bb8aef2015-11-24 15:12:14 -0500424 rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400425 iocb->ki_pos = pos;
426
427 return rc;
428}
429
Yi Liu8bb8aef2015-11-24 15:12:14 -0500430static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
Mike Marshall5db11c22015-07-17 10:38:12 -0400431{
432 struct file *file = iocb->ki_filp;
Mike Marshall3f1b6942015-11-13 13:05:11 -0500433 loff_t pos;
Mike Marshall5db11c22015-07-17 10:38:12 -0400434 ssize_t rc;
435
436 BUG_ON(iocb->private);
437
Yi Liu8bb8aef2015-11-24 15:12:14 -0500438 gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400439
440 mutex_lock(&file->f_mapping->host->i_mutex);
441
442 /* Make sure generic_write_checks sees an up to date inode size. */
443 if (file->f_flags & O_APPEND) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500444 rc = orangefs_inode_getattr(file->f_mapping->host,
445 ORANGEFS_ATTR_SYS_SIZE);
Mike Marshall5db11c22015-07-17 10:38:12 -0400446 if (rc) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500447 gossip_err("%s: orangefs_inode_getattr failed, rc:%zd:.\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400448 __func__, rc);
449 goto out;
450 }
451 }
452
453 if (file->f_pos > i_size_read(file->f_mapping->host))
Yi Liu8bb8aef2015-11-24 15:12:14 -0500454 orangefs_i_size_write(file->f_mapping->host, file->f_pos);
Mike Marshall5db11c22015-07-17 10:38:12 -0400455
456 rc = generic_write_checks(iocb, iter);
457
458 if (rc <= 0) {
459 gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
460 __func__, rc);
461 goto out;
462 }
463
Mike Marshall3f1b6942015-11-13 13:05:11 -0500464 /*
465 * if we are appending, generic_write_checks would have updated
466 * pos to the end of the file, so we will wait till now to set
467 * pos...
468 */
469 pos = *(&iocb->ki_pos);
470
Yi Liu8bb8aef2015-11-24 15:12:14 -0500471 rc = do_readv_writev(ORANGEFS_IO_WRITE,
Mike Marshall5db11c22015-07-17 10:38:12 -0400472 file,
473 &pos,
Al Viro0071ed12015-10-08 18:22:08 -0400474 iter);
Mike Marshall5db11c22015-07-17 10:38:12 -0400475 if (rc < 0) {
476 gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
477 __func__, rc);
478 goto out;
479 }
480
481 iocb->ki_pos = pos;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500482 g_orangefs_stats.writes++;
Mike Marshall5db11c22015-07-17 10:38:12 -0400483
484out:
485
486 mutex_unlock(&file->f_mapping->host->i_mutex);
487 return rc;
488}
489
490/*
491 * Perform a miscellaneous operation on a file.
492 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500493static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
Mike Marshall5db11c22015-07-17 10:38:12 -0400494{
495 int ret = -ENOTTY;
496 __u64 val = 0;
497 unsigned long uval;
498
499 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500500 "orangefs_ioctl: called with cmd %d\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400501 cmd);
502
503 /*
504 * we understand some general ioctls on files, such as the immutable
505 * and append flags
506 */
507 if (cmd == FS_IOC_GETFLAGS) {
508 val = 0;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500509 ret = orangefs_inode_getxattr(file_inode(file),
510 ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
511 "user.pvfs2.meta_hint",
512 &val, sizeof(val));
Mike Marshall5db11c22015-07-17 10:38:12 -0400513 if (ret < 0 && ret != -ENODATA)
514 return ret;
515 else if (ret == -ENODATA)
516 val = 0;
517 uval = val;
518 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500519 "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400520 (unsigned long long)uval);
521 return put_user(uval, (int __user *)arg);
522 } else if (cmd == FS_IOC_SETFLAGS) {
523 ret = 0;
524 if (get_user(uval, (int __user *)arg))
525 return -EFAULT;
526 /*
Yi Liu8bb8aef2015-11-24 15:12:14 -0500527 * ORANGEFS_MIRROR_FL is set internally when the mirroring mode
Mike Marshall5db11c22015-07-17 10:38:12 -0400528 * is turned on for a file. The user is not allowed to turn
529 * on this bit, but the bit is present if the user first gets
530 * the flags and then updates the flags with some new
531 * settings. So, we ignore it in the following edit. bligon.
532 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500533 if ((uval & ~ORANGEFS_MIRROR_FL) &
Mike Marshall5db11c22015-07-17 10:38:12 -0400534 (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500535 gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400536 return -EINVAL;
537 }
538 val = uval;
539 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500540 "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400541 (unsigned long long)val);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500542 ret = orangefs_inode_setxattr(file_inode(file),
543 ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
544 "user.pvfs2.meta_hint",
545 &val, sizeof(val), 0);
Mike Marshall5db11c22015-07-17 10:38:12 -0400546 }
547
548 return ret;
549}
550
551/*
552 * Memory map a region of a file.
553 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500554static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
Mike Marshall5db11c22015-07-17 10:38:12 -0400555{
556 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500557 "orangefs_file_mmap: called on %s\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400558 (file ?
559 (char *)file->f_path.dentry->d_name.name :
560 (char *)"Unknown"));
561
562 /* set the sequential readahead hint */
563 vma->vm_flags |= VM_SEQ_READ;
564 vma->vm_flags &= ~VM_RAND_READ;
Martin Brandenburg35390802015-09-30 13:11:54 -0400565
566 /* Use readonly mmap since we cannot support writable maps. */
567 return generic_file_readonly_mmap(file, vma);
Mike Marshall5db11c22015-07-17 10:38:12 -0400568}
569
570#define mapping_nrpages(idata) ((idata)->nrpages)
571
572/*
573 * Called to notify the module that there are no more references to
574 * this file (i.e. no processes have it open).
575 *
576 * \note Not called when each file is closed.
577 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500578static int orangefs_file_release(struct inode *inode, struct file *file)
Mike Marshall5db11c22015-07-17 10:38:12 -0400579{
580 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500581 "orangefs_file_release: called on %s\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400582 file->f_path.dentry->d_name.name);
583
Yi Liu8bb8aef2015-11-24 15:12:14 -0500584 orangefs_flush_inode(inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400585
586 /*
Mike Marshall54804942015-10-05 13:44:24 -0400587 * remove all associated inode pages from the page cache and mmap
588 * readahead cache (if any); this forces an expensive refresh of
589 * data for the next caller of mmap (or 'get_block' accesses)
Mike Marshall5db11c22015-07-17 10:38:12 -0400590 */
591 if (file->f_path.dentry->d_inode &&
592 file->f_path.dentry->d_inode->i_mapping &&
593 mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
594 truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
595 0);
596 return 0;
597}
598
599/*
600 * Push all data for a specific file onto permanent storage.
601 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500602static int orangefs_fsync(struct file *file,
Mike Marshall84d02152015-07-28 13:27:51 -0400603 loff_t start,
604 loff_t end,
605 int datasync)
Mike Marshall5db11c22015-07-17 10:38:12 -0400606{
607 int ret = -EINVAL;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500608 struct orangefs_inode_s *orangefs_inode =
609 ORANGEFS_I(file->f_path.dentry->d_inode);
610 struct orangefs_kernel_op_s *new_op = NULL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400611
612 /* required call */
613 filemap_write_and_wait_range(file->f_mapping, start, end);
614
Yi Liu8bb8aef2015-11-24 15:12:14 -0500615 new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
Mike Marshall5db11c22015-07-17 10:38:12 -0400616 if (!new_op)
617 return -ENOMEM;
Yi Liu8bb8aef2015-11-24 15:12:14 -0500618 new_op->upcall.req.fsync.refn = orangefs_inode->refn;
Mike Marshall5db11c22015-07-17 10:38:12 -0400619
620 ret = service_operation(new_op,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500621 "orangefs_fsync",
Mike Marshall5db11c22015-07-17 10:38:12 -0400622 get_interruptible_flag(file->f_path.dentry->d_inode));
623
624 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500625 "orangefs_fsync got return value of %d\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400626 ret);
627
628 op_release(new_op);
629
Yi Liu8bb8aef2015-11-24 15:12:14 -0500630 orangefs_flush_inode(file->f_path.dentry->d_inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400631 return ret;
632}
633
634/*
635 * Change the file pointer position for an instance of an open file.
636 *
637 * \note If .llseek is overriden, we must acquire lock as described in
638 * Documentation/filesystems/Locking.
639 *
640 * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
641 * require much changes to the FS
642 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500643static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
Mike Marshall5db11c22015-07-17 10:38:12 -0400644{
645 int ret = -EINVAL;
646 struct inode *inode = file->f_path.dentry->d_inode;
647
648 if (!inode) {
Yi Liu8bb8aef2015-11-24 15:12:14 -0500649 gossip_err("orangefs_file_llseek: invalid inode (NULL)\n");
Mike Marshall5db11c22015-07-17 10:38:12 -0400650 return ret;
651 }
652
Yi Liu8bb8aef2015-11-24 15:12:14 -0500653 if (origin == ORANGEFS_SEEK_END) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400654 /*
655 * revalidate the inode's file size.
656 * NOTE: We are only interested in file size here,
657 * so we set mask accordingly.
658 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500659 ret = orangefs_inode_getattr(inode, ORANGEFS_ATTR_SYS_SIZE);
Mike Marshall5db11c22015-07-17 10:38:12 -0400660 if (ret) {
661 gossip_debug(GOSSIP_FILE_DEBUG,
662 "%s:%s:%d calling make bad inode\n",
663 __FILE__,
664 __func__,
665 __LINE__);
Yi Liu8bb8aef2015-11-24 15:12:14 -0500666 orangefs_make_bad_inode(inode);
Mike Marshall5db11c22015-07-17 10:38:12 -0400667 return ret;
668 }
669 }
670
671 gossip_debug(GOSSIP_FILE_DEBUG,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500672 "orangefs_file_llseek: offset is %ld | origin is %d"
Mike Marshall54804942015-10-05 13:44:24 -0400673 " | inode size is %lu\n",
Mike Marshall5db11c22015-07-17 10:38:12 -0400674 (long)offset,
675 origin,
676 (unsigned long)file->f_path.dentry->d_inode->i_size);
677
678 return generic_file_llseek(file, offset, origin);
679}
680
681/*
682 * Support local locks (locks that only this kernel knows about)
683 * if Orangefs was mounted -o local_lock.
684 */
Yi Liu8bb8aef2015-11-24 15:12:14 -0500685static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
Mike Marshall5db11c22015-07-17 10:38:12 -0400686{
Mike Marshallf957ae22015-09-24 12:53:05 -0400687 int rc = -EINVAL;
Mike Marshall5db11c22015-07-17 10:38:12 -0400688
Yi Liu8bb8aef2015-11-24 15:12:14 -0500689 if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
Mike Marshall5db11c22015-07-17 10:38:12 -0400690 if (cmd == F_GETLK) {
691 rc = 0;
692 posix_test_lock(filp, fl);
693 } else {
694 rc = posix_lock_file(filp, fl, NULL);
695 }
696 }
697
698 return rc;
699}
700
Yi Liu8bb8aef2015-11-24 15:12:14 -0500701/** ORANGEFS implementation of VFS file operations */
702const struct file_operations orangefs_file_operations = {
703 .llseek = orangefs_file_llseek,
704 .read_iter = orangefs_file_read_iter,
705 .write_iter = orangefs_file_write_iter,
706 .lock = orangefs_lock,
707 .unlocked_ioctl = orangefs_ioctl,
708 .mmap = orangefs_file_mmap,
Mike Marshall5db11c22015-07-17 10:38:12 -0400709 .open = generic_file_open,
Yi Liu8bb8aef2015-11-24 15:12:14 -0500710 .release = orangefs_file_release,
711 .fsync = orangefs_fsync,
Mike Marshall5db11c22015-07-17 10:38:12 -0400712};