blob: 74706f3527800a07f262b9e691c5c417949dadb5 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000016#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000017#include <linux/fs.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000018#include <asm/semaphore.h>
19
20#include "gfs2.h"
21#include "bmap.h"
22#include "glock.h"
23#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000024#include "log.h"
25#include "meta_io.h"
26#include "ops_address.h"
27#include "page.h"
28#include "quota.h"
29#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000030#include "rgrp.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000031
32/**
Steven Whitehouse4ff14672006-01-30 09:39:10 +000033 * gfs2_get_block - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000034 * @inode: The inode
35 * @lblock: The block number to look up
36 * @bh_result: The buffer head to return the result in
37 * @create: Non-zero if we may add block to the file
38 *
39 * Returns: errno
40 */
41
Steven Whitehouse4ff14672006-01-30 09:39:10 +000042int gfs2_get_block(struct inode *inode, sector_t lblock,
43 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000044{
45 struct gfs2_inode *ip = get_v2ip(inode);
46 int new = create;
47 uint64_t dblock;
48 int error;
49
50 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
51 if (error)
52 return error;
53
54 if (!dblock)
55 return 0;
56
57 map_bh(bh_result, inode->i_sb, dblock);
58 if (new)
59 set_buffer_new(bh_result);
60
61 return 0;
62}
63
64/**
65 * get_block_noalloc - Fills in a buffer head with details about a block
66 * @inode: The inode
67 * @lblock: The block number to look up
68 * @bh_result: The buffer head to return the result in
69 * @create: Non-zero if we may add block to the file
70 *
71 * Returns: errno
72 */
73
74static int get_block_noalloc(struct inode *inode, sector_t lblock,
75 struct buffer_head *bh_result, int create)
76{
77 struct gfs2_inode *ip = get_v2ip(inode);
78 int new = 0;
79 uint64_t dblock;
80 int error;
81
82 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL);
83 if (error)
84 return error;
85
86 if (dblock)
87 map_bh(bh_result, inode->i_sb, dblock);
88 else if (gfs2_assert_withdraw(ip->i_sbd, !create))
89 error = -EIO;
90
91 return error;
92}
93
94static int get_blocks(struct inode *inode, sector_t lblock,
95 unsigned long max_blocks, struct buffer_head *bh_result,
96 int create)
97{
98 struct gfs2_inode *ip = get_v2ip(inode);
99 int new = create;
100 uint64_t dblock;
101 uint32_t extlen;
102 int error;
103
104 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
105 if (error)
106 return error;
107
108 if (!dblock)
109 return 0;
110
111 map_bh(bh_result, inode->i_sb, dblock);
112 if (new)
113 set_buffer_new(bh_result);
114
115 if (extlen > max_blocks)
116 extlen = max_blocks;
117 bh_result->b_size = extlen << inode->i_blkbits;
118
119 return 0;
120}
121
122static int get_blocks_noalloc(struct inode *inode, sector_t lblock,
123 unsigned long max_blocks,
124 struct buffer_head *bh_result, int create)
125{
126 struct gfs2_inode *ip = get_v2ip(inode);
127 int new = 0;
128 uint64_t dblock;
129 uint32_t extlen;
130 int error;
131
132 error = gfs2_block_map(ip, lblock, &new, &dblock, &extlen);
133 if (error)
134 return error;
135
136 if (dblock) {
137 map_bh(bh_result, inode->i_sb, dblock);
138 if (extlen > max_blocks)
139 extlen = max_blocks;
140 bh_result->b_size = extlen << inode->i_blkbits;
141 } else if (gfs2_assert_withdraw(ip->i_sbd, !create))
142 error = -EIO;
143
144 return error;
145}
146
147/**
148 * gfs2_writepage - Write complete page
149 * @page: Page to write
150 *
151 * Returns: errno
152 *
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000153 * Some of this is copied from block_write_full_page() although we still
154 * call it to do most of the work.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000155 */
156
157static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
158{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000159 struct inode *inode = page->mapping->host;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000160 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
161 struct gfs2_sbd *sdp = ip->i_sbd;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000162 loff_t i_size = i_size_read(inode);
163 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
164 unsigned offset;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000165 int error;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000166 int done_trans = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000167
168 atomic_inc(&sdp->sd_ops_address);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000169 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
170 unlock_page(page);
171 return -EIO;
172 }
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000173 if (get_transaction)
174 goto out_ignore;
175
176 /* Is the page fully outside i_size? (truncate in progress) */
177 offset = i_size & (PAGE_CACHE_SIZE-1);
178 if (page->index >= end_index+1 || !offset) {
179 page->mapping->a_ops->invalidatepage(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000180 unlock_page(page);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000181 return 0; /* don't care */
182 }
183
184 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
185 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
186 if (error)
187 goto out_ignore;
188 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
189 done_trans = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000190 }
191
192 error = block_write_full_page(page, get_block_noalloc, wbc);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000193 if (done_trans)
194 gfs2_trans_end(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000195 gfs2_meta_cache_flush(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000196 return error;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000197
198out_ignore:
199 redirty_page_for_writepage(wbc, page);
200 unlock_page(page);
201 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000202}
203
204/**
205 * stuffed_readpage - Fill in a Linux page with stuffed file data
206 * @ip: the inode
207 * @page: the page
208 *
209 * Returns: errno
210 */
211
212static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
213{
214 struct buffer_head *dibh;
215 void *kaddr;
216 int error;
217
218 error = gfs2_meta_inode_buffer(ip, &dibh);
219 if (error)
220 return error;
221
222 kaddr = kmap(page);
223 memcpy((char *)kaddr,
224 dibh->b_data + sizeof(struct gfs2_dinode),
225 ip->i_di.di_size);
226 memset((char *)kaddr + ip->i_di.di_size,
227 0,
228 PAGE_CACHE_SIZE - ip->i_di.di_size);
229 kunmap(page);
230
231 brelse(dibh);
232
233 SetPageUptodate(page);
234
235 return 0;
236}
237
238static int zero_readpage(struct page *page)
239{
240 void *kaddr;
241
242 kaddr = kmap(page);
243 memset(kaddr, 0, PAGE_CACHE_SIZE);
244 kunmap(page);
245
246 SetPageUptodate(page);
247 unlock_page(page);
248
249 return 0;
250}
251
252/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000253 * gfs2_readpage - readpage with locking
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000254 * @file: The file to read a page for. N.B. This may be NULL if we are
255 * reading an internal file.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000256 * @page: The page to read
257 *
258 * Returns: errno
259 */
260
261static int gfs2_readpage(struct file *file, struct page *page)
262{
263 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
264 struct gfs2_sbd *sdp = ip->i_sbd;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000265 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000266 int error;
267
268 atomic_inc(&sdp->sd_ops_address);
269
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000270 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
271 error = gfs2_glock_nq_m_atime(1, &gh);
272 if (error)
273 goto out_unlock;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000274
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000275 if (gfs2_is_stuffed(ip)) {
276 if (!page->index) {
277 error = stuffed_readpage(ip, page);
278 unlock_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000279 } else
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000280 error = zero_readpage(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000281 } else
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000282 error = mpage_readpage(page, gfs2_get_block);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000283
284 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
285 error = -EIO;
286
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000287 gfs2_glock_dq_m(1, &gh);
288 gfs2_holder_uninit(&gh);
289out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000290 return error;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000291out_unlock:
292 unlock_page(page);
293 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000294}
295
296/**
297 * gfs2_prepare_write - Prepare to write a page to a file
298 * @file: The file to write to
299 * @page: The page which is to be prepared for writing
300 * @from: From (byte range within page)
301 * @to: To (byte range within page)
302 *
303 * Returns: errno
304 */
305
306static int gfs2_prepare_write(struct file *file, struct page *page,
307 unsigned from, unsigned to)
308{
309 struct gfs2_inode *ip = get_v2ip(page->mapping->host);
310 struct gfs2_sbd *sdp = ip->i_sbd;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000311 unsigned int data_blocks, ind_blocks, rblocks;
312 int alloc_required;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000313 int error = 0;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000314 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
315 loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
316 struct gfs2_alloc *al;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000317
318 atomic_inc(&sdp->sd_ops_address);
319
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000320 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
321 error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
322 if (error)
323 goto out_uninit;
324
325 gfs2_write_calc_reserv(ip, to - from, &data_blocks, &ind_blocks);
326
327 error = gfs2_write_alloc_required(ip, pos, from - to, &alloc_required);
328 if (error)
329 goto out_unlock;
330
331
332 if (alloc_required) {
333 al = gfs2_alloc_get(ip);
334
335 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
336 if (error)
337 goto out_alloc_put;
338
339 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
340 if (error)
341 goto out_qunlock;
342
343 al->al_requested = data_blocks + ind_blocks;
344 error = gfs2_inplace_reserve(ip);
345 if (error)
346 goto out_qunlock;
347 }
348
349 rblocks = RES_DINODE + ind_blocks;
350 if (gfs2_is_jdata(ip))
351 rblocks += data_blocks ? data_blocks : 1;
352 if (ind_blocks || data_blocks)
353 rblocks += RES_STATFS + RES_QUOTA;
354
355 error = gfs2_trans_begin(sdp, rblocks, 0);
356 if (error)
357 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000358
359 if (gfs2_is_stuffed(ip)) {
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000360 if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
361 error = gfs2_unstuff_dinode(ip, gfs2_unstuffer_page, page);
362 if (error)
363 goto out;
364 } else if (!PageUptodate(page)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000365 error = stuffed_readpage(ip, page);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000366 goto out;
367 }
368 }
369
370 error = block_prepare_write(page, from, to, gfs2_get_block);
371
372out:
373 if (error) {
374 gfs2_trans_end(sdp);
375 if (alloc_required) {
376 gfs2_inplace_release(ip);
377out_qunlock:
378 gfs2_quota_unlock(ip);
379out_alloc_put:
380 gfs2_alloc_put(ip);
381 }
382out_unlock:
383 gfs2_glock_dq_m(1, &ip->i_gh);
384out_uninit:
385 gfs2_holder_uninit(&ip->i_gh);
386 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000387
388 return error;
389}
390
391/**
392 * gfs2_commit_write - Commit write to a file
393 * @file: The file to write to
394 * @page: The page containing the data
395 * @from: From (byte range within page)
396 * @to: To (byte range within page)
397 *
398 * Returns: errno
399 */
400
401static int gfs2_commit_write(struct file *file, struct page *page,
402 unsigned from, unsigned to)
403{
404 struct inode *inode = page->mapping->host;
405 struct gfs2_inode *ip = get_v2ip(inode);
406 struct gfs2_sbd *sdp = ip->i_sbd;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000407 int error = -EOPNOTSUPP;
408 struct buffer_head *dibh;
409 struct gfs2_alloc *al = &ip->i_alloc;;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000410
411 atomic_inc(&sdp->sd_ops_address);
412
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000413
414 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
415 goto fail_nounlock;
416
417 error = gfs2_meta_inode_buffer(ip, &dibh);
418 if (error)
419 goto fail_endtrans;
420
421 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
422
David Teiglandb3b94fa2006-01-16 16:50:04 +0000423 if (gfs2_is_stuffed(ip)) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000424 uint64_t file_size;
425 void *kaddr;
426
427 file_size = ((uint64_t)page->index << PAGE_CACHE_SHIFT) + to;
428
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000429 kaddr = kmap_atomic(page, KM_USER0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000430 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000431 (char *)kaddr + from, to - from);
432 kunmap_atomic(page, KM_USER0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000433
434 SetPageUptodate(page);
435
436 if (inode->i_size < file_size)
437 i_size_write(inode, file_size);
438 } else {
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000439 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
Steven Whitehouse257f9b42006-01-31 10:00:25 +0000440 gfs2_page_add_databufs(ip, page, from, to);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000441 error = generic_commit_write(file, page, from, to);
442 if (error)
443 goto fail;
444 }
445
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000446 if (ip->i_di.di_size < inode->i_size)
447 ip->i_di.di_size = inode->i_size;
448
449 gfs2_dinode_out(&ip->i_di, dibh->b_data);
450 brelse(dibh);
451 gfs2_trans_end(sdp);
452 if (al->al_requested) {
453 gfs2_inplace_release(ip);
454 gfs2_quota_unlock(ip);
455 gfs2_alloc_put(ip);
456 }
457 gfs2_glock_dq_m(1, &ip->i_gh);
458 gfs2_holder_uninit(&ip->i_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000459 return 0;
460
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000461fail:
462 brelse(dibh);
463fail_endtrans:
464 gfs2_trans_end(sdp);
465 if (al->al_requested) {
466 gfs2_inplace_release(ip);
467 gfs2_quota_unlock(ip);
468 gfs2_alloc_put(ip);
469 }
470 gfs2_glock_dq_m(1, &ip->i_gh);
471 gfs2_holder_uninit(&ip->i_gh);
472fail_nounlock:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000473 ClearPageUptodate(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000474 return error;
475}
476
477/**
478 * gfs2_bmap - Block map function
479 * @mapping: Address space info
480 * @lblock: The block to map
481 *
482 * Returns: The disk address for the block or 0 on hole or error
483 */
484
485static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
486{
487 struct gfs2_inode *ip = get_v2ip(mapping->host);
488 struct gfs2_holder i_gh;
489 sector_t dblock = 0;
490 int error;
491
492 atomic_inc(&ip->i_sbd->sd_ops_address);
493
494 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
495 if (error)
496 return 0;
497
498 if (!gfs2_is_stuffed(ip))
Steven Whitehouse4ff14672006-01-30 09:39:10 +0000499 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000500
501 gfs2_glock_dq_uninit(&i_gh);
502
503 return dblock;
504}
505
506static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
507{
Steven Whitehouse64fb4eb2006-01-18 13:14:40 +0000508 struct gfs2_bufdata *bd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509
510 gfs2_log_lock(sdp);
Steven Whitehouse64fb4eb2006-01-18 13:14:40 +0000511 bd = get_v2bd(bh);
512 if (bd) {
513 bd->bd_bh = NULL;
514 set_v2bd(bh, NULL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000515 gfs2_log_unlock(sdp);
516 brelse(bh);
517 } else
518 gfs2_log_unlock(sdp);
519
520 lock_buffer(bh);
521 clear_buffer_dirty(bh);
522 bh->b_bdev = NULL;
523 clear_buffer_mapped(bh);
524 clear_buffer_req(bh);
525 clear_buffer_new(bh);
526 clear_buffer_delay(bh);
527 unlock_buffer(bh);
528}
529
530static int gfs2_invalidatepage(struct page *page, unsigned long offset)
531{
532 struct gfs2_sbd *sdp = get_v2sdp(page->mapping->host->i_sb);
533 struct buffer_head *head, *bh, *next;
534 unsigned int curr_off = 0;
535 int ret = 1;
536
537 BUG_ON(!PageLocked(page));
538 if (!page_has_buffers(page))
539 return 1;
540
541 bh = head = page_buffers(page);
542 do {
543 unsigned int next_off = curr_off + bh->b_size;
544 next = bh->b_this_page;
545
546 if (offset <= curr_off)
547 discard_buffer(sdp, bh);
548
549 curr_off = next_off;
550 bh = next;
551 } while (bh != head);
552
553 if (!offset)
554 ret = try_to_release_page(page, 0);
555
556 return ret;
557}
558
Steven Whitehoused1665e42006-02-14 11:54:42 +0000559static ssize_t gfs2_direct_IO_write(struct kiocb *iocb, const struct iovec *iov,
560 loff_t offset, unsigned long nr_segs)
561{
562 struct file *file = iocb->ki_filp;
563 struct inode *inode = file->f_mapping->host;
564 struct gfs2_inode *ip = get_v2ip(inode);
565 struct gfs2_holder gh;
566 int rv;
567
568 /*
569 * Shared lock, even though its write, since we do no allocation
570 * on this path. All we need change is atime.
571 */
572 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
573 rv = gfs2_glock_nq_m_atime(1, &gh);
574 if (rv)
575 goto out;
576
577 /*
578 * Should we return an error here? I can't see that O_DIRECT for
579 * a journaled file makes any sense. For now we'll silently fall
580 * back to buffered I/O, likewise we do the same for stuffed
581 * files since they are (a) small and (b) unaligned.
582 */
583 if (gfs2_is_jdata(ip))
584 goto out;
585
586 if (gfs2_is_stuffed(ip))
587 goto out;
588
589 rv = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
590 iov, offset, nr_segs, get_blocks_noalloc,
591 NULL, DIO_OWN_LOCKING);
592out:
593 gfs2_glock_dq_m(1, &gh);
594 gfs2_holder_uninit(&gh);
595
596 return rv;
597}
598
599/**
600 * gfs2_direct_IO
601 *
602 * This is called with a shared lock already held for the read path.
603 * Currently, no locks are held when the write path is called.
604 */
605static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
606 const struct iovec *iov, loff_t offset,
607 unsigned long nr_segs)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000608{
609 struct file *file = iocb->ki_filp;
610 struct inode *inode = file->f_mapping->host;
611 struct gfs2_inode *ip = get_v2ip(inode);
612 struct gfs2_sbd *sdp = ip->i_sbd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000613
614 atomic_inc(&sdp->sd_ops_address);
615
Steven Whitehoused1665e42006-02-14 11:54:42 +0000616 if (rw == WRITE)
617 return gfs2_direct_IO_write(iocb, iov, offset, nr_segs);
618
619 if (gfs2_assert_warn(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)) ||
620 gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
David Teiglandb3b94fa2006-01-16 16:50:04 +0000621 return -EINVAL;
622
Steven Whitehoused1665e42006-02-14 11:54:42 +0000623 return __blockdev_direct_IO(READ, iocb, inode, inode->i_sb->s_bdev, iov,
624 offset, nr_segs, get_blocks, NULL,
625 DIO_OWN_LOCKING);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000626}
627
628struct address_space_operations gfs2_file_aops = {
629 .writepage = gfs2_writepage,
630 .readpage = gfs2_readpage,
631 .sync_page = block_sync_page,
632 .prepare_write = gfs2_prepare_write,
633 .commit_write = gfs2_commit_write,
634 .bmap = gfs2_bmap,
635 .invalidatepage = gfs2_invalidatepage,
636 .direct_IO = gfs2_direct_IO,
637};
638