blob: 74cf28e77b4724b0cf699532178780a76c8de3fd [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/writeback.h>
18#include <linux/swap.h>
19#include <linux/delay.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050020#include <linux/gfs2_ondisk.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000021#include <asm/semaphore.h>
22
23#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050024#include "lm_interface.h"
25#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000026#include "glock.h"
27#include "glops.h"
28#include "inode.h"
29#include "log.h"
30#include "lops.h"
31#include "meta_io.h"
32#include "rgrp.h"
33#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050034#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000035
36#define buffer_busy(bh) \
37((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
38#define buffer_in_io(bh) \
39((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
40
41static int aspace_get_block(struct inode *inode, sector_t lblock,
42 struct buffer_head *bh_result, int create)
43{
Steven Whitehouse5c676f62006-02-27 17:23:27 -050044 gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000045 return -EOPNOTSUPP;
46}
47
48static int gfs2_aspace_writepage(struct page *page,
49 struct writeback_control *wbc)
50{
51 return block_write_full_page(page, aspace_get_block, wbc);
52}
53
54/**
55 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
56 * @bh: the buffer we're stuck on
57 *
58 */
59
60static void stuck_releasepage(struct buffer_head *bh)
61{
Steven Whitehouseb09e5932006-04-07 11:17:32 -040062 struct inode *inode = bh->b_page->mapping->host;
63 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
Steven Whitehouse5c676f62006-02-27 17:23:27 -050064 struct gfs2_bufdata *bd = bh->b_private;
David Teiglandb3b94fa2006-01-16 16:50:04 +000065 struct gfs2_glock *gl;
66
Steven Whitehouseb09e5932006-04-07 11:17:32 -040067 fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +000068 fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
69 (uint64_t)bh->b_blocknr, atomic_read(&bh->b_count));
70 fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
Steven Whitehouse5c676f62006-02-27 17:23:27 -050071 fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
David Teiglandb3b94fa2006-01-16 16:50:04 +000072
73 if (!bd)
74 return;
75
76 gl = bd->bd_gl;
77
78 fs_warn(sdp, "gl = (%u, %llu)\n",
79 gl->gl_name.ln_type, gl->gl_name.ln_number);
80
81 fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
82 (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
83 (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
84
85 if (gl->gl_ops == &gfs2_inode_glops) {
Steven Whitehouse5c676f62006-02-27 17:23:27 -050086 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +000087 unsigned int x;
88
89 if (!ip)
90 return;
91
92 fs_warn(sdp, "ip = %llu %llu\n",
93 ip->i_num.no_formal_ino, ip->i_num.no_addr);
94 fs_warn(sdp, "ip->i_count = %d, ip->i_vnode = %s\n",
95 atomic_read(&ip->i_count),
96 (ip->i_vnode) ? "!NULL" : "NULL");
97
98 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
99 fs_warn(sdp, "ip->i_cache[%u] = %s\n",
100 x, (ip->i_cache[x]) ? "!NULL" : "NULL");
101 }
102}
103
104/**
105 * gfs2_aspace_releasepage - free the metadata associated with a page
106 * @page: the page that's being released
107 * @gfp_mask: passed from Linux VFS, ignored by us
108 *
109 * Call try_to_free_buffers() if the buffers in this page can be
110 * released.
111 *
112 * Returns: 0
113 */
114
115static int gfs2_aspace_releasepage(struct page *page, gfp_t gfp_mask)
116{
117 struct inode *aspace = page->mapping->host;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500118 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000119 struct buffer_head *bh, *head;
120 struct gfs2_bufdata *bd;
121 unsigned long t;
122
123 if (!page_has_buffers(page))
124 goto out;
125
126 head = bh = page_buffers(page);
127 do {
128 t = jiffies;
129
130 while (atomic_read(&bh->b_count)) {
131 if (atomic_read(&aspace->i_writecount)) {
132 if (time_after_eq(jiffies, t +
133 gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
134 stuck_releasepage(bh);
135 t = jiffies;
136 }
137
138 yield();
139 continue;
140 }
141
142 return 0;
143 }
144
145 gfs2_assert_warn(sdp, !buffer_pinned(bh));
146
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500147 bd = bh->b_private;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000148 if (bd) {
149 gfs2_assert_warn(sdp, bd->bd_bh == bh);
150 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
151 gfs2_assert_warn(sdp, list_empty(&bd->bd_le.le_list));
152 gfs2_assert_warn(sdp, !bd->bd_ail);
153 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500154 bh->b_private = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000155 }
156
157 bh = bh->b_this_page;
158 }
159 while (bh != head);
160
161 out:
162 return try_to_free_buffers(page);
163}
164
165static struct address_space_operations aspace_aops = {
166 .writepage = gfs2_aspace_writepage,
167 .releasepage = gfs2_aspace_releasepage,
168};
169
170/**
171 * gfs2_aspace_get - Create and initialize a struct inode structure
172 * @sdp: the filesystem the aspace is in
173 *
174 * Right now a struct inode is just a struct inode. Maybe Linux
175 * will supply a more lightweight address space construct (that works)
176 * in the future.
177 *
178 * Make sure pages/buffers in this aspace aren't in high memory.
179 *
180 * Returns: the aspace
181 */
182
183struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
184{
185 struct inode *aspace;
186
187 aspace = new_inode(sdp->sd_vfs);
188 if (aspace) {
189 mapping_set_gfp_mask(aspace->i_mapping, GFP_KERNEL);
190 aspace->i_mapping->a_ops = &aspace_aops;
191 aspace->i_size = ~0ULL;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500192 aspace->u.generic_ip = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000193 insert_inode_hash(aspace);
194 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000195 return aspace;
196}
197
198void gfs2_aspace_put(struct inode *aspace)
199{
200 remove_inode_hash(aspace);
201 iput(aspace);
202}
203
204/**
205 * gfs2_ail1_start_one - Start I/O on a part of the AIL
206 * @sdp: the filesystem
207 * @tr: the part of the AIL
208 *
209 */
210
211void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
212{
213 struct gfs2_bufdata *bd, *s;
214 struct buffer_head *bh;
215 int retry;
216
217 do {
218 retry = 0;
219
220 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
221 bd_ail_st_list) {
222 bh = bd->bd_bh;
223
224 gfs2_assert(sdp, bd->bd_ail == ai);
225
226 if (!buffer_busy(bh)) {
227 if (!buffer_uptodate(bh))
228 gfs2_io_error_bh(sdp, bh);
229 list_move(&bd->bd_ail_st_list,
230 &ai->ai_ail2_list);
231 continue;
232 }
233
234 if (!buffer_dirty(bh))
235 continue;
236
237 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
238
239 gfs2_log_unlock(sdp);
240 wait_on_buffer(bh);
241 ll_rw_block(WRITE, 1, &bh);
242 gfs2_log_lock(sdp);
243
244 retry = 1;
245 break;
246 }
247 } while (retry);
248}
249
250/**
251 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
252 * @sdp: the filesystem
253 * @ai: the AIL entry
254 *
255 */
256
257int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
258{
259 struct gfs2_bufdata *bd, *s;
260 struct buffer_head *bh;
261
262 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
263 bd_ail_st_list) {
264 bh = bd->bd_bh;
265
266 gfs2_assert(sdp, bd->bd_ail == ai);
267
268 if (buffer_busy(bh)) {
269 if (flags & DIO_ALL)
270 continue;
271 else
272 break;
273 }
274
275 if (!buffer_uptodate(bh))
276 gfs2_io_error_bh(sdp, bh);
277
278 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
279 }
280
281 return list_empty(&ai->ai_ail1_list);
282}
283
284/**
285 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
286 * @sdp: the filesystem
287 * @ai: the AIL entry
288 *
289 */
290
291void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
292{
293 struct list_head *head = &ai->ai_ail2_list;
294 struct gfs2_bufdata *bd;
295
296 while (!list_empty(head)) {
297 bd = list_entry(head->prev, struct gfs2_bufdata,
298 bd_ail_st_list);
299 gfs2_assert(sdp, bd->bd_ail == ai);
300 bd->bd_ail = NULL;
301 list_del(&bd->bd_ail_st_list);
302 list_del(&bd->bd_ail_gl_list);
303 atomic_dec(&bd->bd_gl->gl_ail_count);
304 brelse(bd->bd_bh);
305 }
306}
307
308/**
309 * ail_empty_gl - remove all buffers for a given lock from the AIL
310 * @gl: the glock
311 *
312 * None of the buffers should be dirty, locked, or pinned.
313 */
314
315void gfs2_ail_empty_gl(struct gfs2_glock *gl)
316{
317 struct gfs2_sbd *sdp = gl->gl_sbd;
318 unsigned int blocks;
319 struct list_head *head = &gl->gl_ail_list;
320 struct gfs2_bufdata *bd;
321 struct buffer_head *bh;
322 uint64_t blkno;
323 int error;
324
325 blocks = atomic_read(&gl->gl_ail_count);
326 if (!blocks)
327 return;
328
329 error = gfs2_trans_begin(sdp, 0, blocks);
330 if (gfs2_assert_withdraw(sdp, !error))
331 return;
332
333 gfs2_log_lock(sdp);
334 while (!list_empty(head)) {
335 bd = list_entry(head->next, struct gfs2_bufdata,
336 bd_ail_gl_list);
337 bh = bd->bd_bh;
338 blkno = bh->b_blocknr;
339 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
340
341 bd->bd_ail = NULL;
342 list_del(&bd->bd_ail_st_list);
343 list_del(&bd->bd_ail_gl_list);
344 atomic_dec(&gl->gl_ail_count);
345 brelse(bh);
346 gfs2_log_unlock(sdp);
347
348 gfs2_trans_add_revoke(sdp, blkno);
349
350 gfs2_log_lock(sdp);
351 }
352 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
353 gfs2_log_unlock(sdp);
354
355 gfs2_trans_end(sdp);
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400356 gfs2_log_flush(sdp, NULL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000357}
358
359/**
360 * gfs2_meta_inval - Invalidate all buffers associated with a glock
361 * @gl: the glock
362 *
363 */
364
365void gfs2_meta_inval(struct gfs2_glock *gl)
366{
367 struct gfs2_sbd *sdp = gl->gl_sbd;
368 struct inode *aspace = gl->gl_aspace;
369 struct address_space *mapping = gl->gl_aspace->i_mapping;
370
371 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
372
373 atomic_inc(&aspace->i_writecount);
374 truncate_inode_pages(mapping, 0);
375 atomic_dec(&aspace->i_writecount);
376
377 gfs2_assert_withdraw(sdp, !mapping->nrpages);
378}
379
380/**
381 * gfs2_meta_sync - Sync all buffers associated with a glock
382 * @gl: The glock
383 * @flags: DIO_START | DIO_WAIT
384 *
385 */
386
387void gfs2_meta_sync(struct gfs2_glock *gl, int flags)
388{
389 struct address_space *mapping = gl->gl_aspace->i_mapping;
390 int error = 0;
391
392 if (flags & DIO_START)
393 filemap_fdatawrite(mapping);
394 if (!error && (flags & DIO_WAIT))
395 error = filemap_fdatawait(mapping);
396
397 if (error)
398 gfs2_io_error(gl->gl_sbd);
399}
400
401/**
402 * getbuf - Get a buffer with a given address space
403 * @sdp: the filesystem
404 * @aspace: the address space
405 * @blkno: the block number (filesystem scope)
406 * @create: 1 if the buffer should be created
407 *
408 * Returns: the buffer
409 */
410
411static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
412 uint64_t blkno, int create)
413{
414 struct page *page;
415 struct buffer_head *bh;
416 unsigned int shift;
417 unsigned long index;
418 unsigned int bufnum;
419
420 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
421 index = blkno >> shift; /* convert block to page */
422 bufnum = blkno - (index << shift); /* block buf index within page */
423
424 if (create) {
425 for (;;) {
426 page = grab_cache_page(aspace->i_mapping, index);
427 if (page)
428 break;
429 yield();
430 }
431 } else {
432 page = find_lock_page(aspace->i_mapping, index);
433 if (!page)
434 return NULL;
435 }
436
437 if (!page_has_buffers(page))
438 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
439
440 /* Locate header for our buffer within our page */
441 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
442 /* Do nothing */;
443 get_bh(bh);
444
445 if (!buffer_mapped(bh))
446 map_bh(bh, sdp->sd_vfs, blkno);
447
448 unlock_page(page);
449 mark_page_accessed(page);
450 page_cache_release(page);
451
452 return bh;
453}
454
455static void meta_prep_new(struct buffer_head *bh)
456{
457 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
458
459 lock_buffer(bh);
460 clear_buffer_dirty(bh);
461 set_buffer_uptodate(bh);
462 unlock_buffer(bh);
463
464 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
465}
466
467/**
468 * gfs2_meta_new - Get a block
469 * @gl: The glock associated with this block
470 * @blkno: The block number
471 *
472 * Returns: The buffer
473 */
474
475struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, uint64_t blkno)
476{
477 struct buffer_head *bh;
478 bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
479 meta_prep_new(bh);
480 return bh;
481}
482
483/**
484 * gfs2_meta_read - Read a block from disk
485 * @gl: The glock covering the block
486 * @blkno: The block number
487 * @flags: flags to gfs2_dreread()
488 * @bhp: the place where the buffer is returned (NULL on failure)
489 *
490 * Returns: errno
491 */
492
493int gfs2_meta_read(struct gfs2_glock *gl, uint64_t blkno, int flags,
494 struct buffer_head **bhp)
495{
496 int error;
497
498 *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
499 error = gfs2_meta_reread(gl->gl_sbd, *bhp, flags);
500 if (error)
501 brelse(*bhp);
502
503 return error;
504}
505
506/**
507 * gfs2_meta_reread - Reread a block from disk
508 * @sdp: the filesystem
509 * @bh: The block to read
510 * @flags: Flags that control the read
511 *
512 * Returns: errno
513 */
514
515int gfs2_meta_reread(struct gfs2_sbd *sdp, struct buffer_head *bh, int flags)
516{
517 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
518 return -EIO;
519
520 if (flags & DIO_FORCE)
521 clear_buffer_uptodate(bh);
522
523 if ((flags & DIO_START) && !buffer_uptodate(bh))
524 ll_rw_block(READ, 1, &bh);
525
526 if (flags & DIO_WAIT) {
527 wait_on_buffer(bh);
528
529 if (!buffer_uptodate(bh)) {
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500530 struct gfs2_trans *tr = current->journal_info;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000531 if (tr && tr->tr_touched)
532 gfs2_io_error_bh(sdp, bh);
533 return -EIO;
534 }
535 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
536 return -EIO;
537 }
538
539 return 0;
540}
541
542/**
Steven Whitehouse586dfda2006-01-18 11:32:00 +0000543 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
David Teiglandb3b94fa2006-01-16 16:50:04 +0000544 * @gl: the glock the buffer belongs to
545 * @bh: The buffer to be attached to
Steven Whitehouse586dfda2006-01-18 11:32:00 +0000546 * @meta: Flag to indicate whether its metadata or not
David Teiglandb3b94fa2006-01-16 16:50:04 +0000547 */
548
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500549void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
550 int meta)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000551{
552 struct gfs2_bufdata *bd;
553
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000554 if (meta)
555 lock_page(bh->b_page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000556
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500557 if (bh->b_private) {
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000558 if (meta)
559 unlock_page(bh->b_page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000560 return;
561 }
562
Steven Whitehousef55ab262006-02-21 12:51:39 +0000563 bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
David Teiglandb3b94fa2006-01-16 16:50:04 +0000564 memset(bd, 0, sizeof(struct gfs2_bufdata));
565
566 bd->bd_bh = bh;
567 bd->bd_gl = gl;
568
569 INIT_LIST_HEAD(&bd->bd_list_tr);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000570 if (meta) {
Steven Whitehouse586dfda2006-01-18 11:32:00 +0000571 lops_init_le(&bd->bd_le, &gfs2_buf_lops);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000572 } else {
Steven Whitehouse586dfda2006-01-18 11:32:00 +0000573 lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000574 get_bh(bh);
575 }
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500576 bh->b_private = bd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000577
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000578 if (meta)
579 unlock_page(bh->b_page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000580}
581
582/**
Steven Whitehousea98ab222006-01-18 13:38:44 +0000583 * gfs2_pin - Pin a buffer in memory
David Teiglandb3b94fa2006-01-16 16:50:04 +0000584 * @sdp: the filesystem the buffer belongs to
585 * @bh: The buffer to be pinned
586 *
587 */
588
Steven Whitehousea98ab222006-01-18 13:38:44 +0000589void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000590{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500591 struct gfs2_bufdata *bd = bh->b_private;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000592
593 gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
594
595 if (test_set_buffer_pinned(bh))
596 gfs2_assert_withdraw(sdp, 0);
597
598 wait_on_buffer(bh);
599
600 /* If this buffer is in the AIL and it has already been written
601 to in-place disk block, remove it from the AIL. */
602
603 gfs2_log_lock(sdp);
604 if (bd->bd_ail && !buffer_in_io(bh))
605 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
606 gfs2_log_unlock(sdp);
607
608 clear_buffer_dirty(bh);
609 wait_on_buffer(bh);
610
611 if (!buffer_uptodate(bh))
612 gfs2_io_error_bh(sdp, bh);
613
614 get_bh(bh);
615}
616
617/**
Steven Whitehousea98ab222006-01-18 13:38:44 +0000618 * gfs2_unpin - Unpin a buffer
David Teiglandb3b94fa2006-01-16 16:50:04 +0000619 * @sdp: the filesystem the buffer belongs to
620 * @bh: The buffer to unpin
621 * @ai:
622 *
623 */
624
Steven Whitehousea98ab222006-01-18 13:38:44 +0000625void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
626 struct gfs2_ail *ai)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000627{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500628 struct gfs2_bufdata *bd = bh->b_private;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000629
630 gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
631
632 if (!buffer_pinned(bh))
633 gfs2_assert_withdraw(sdp, 0);
634
635 mark_buffer_dirty(bh);
636 clear_buffer_pinned(bh);
637
638 gfs2_log_lock(sdp);
639 if (bd->bd_ail) {
640 list_del(&bd->bd_ail_st_list);
641 brelse(bh);
642 } else {
643 struct gfs2_glock *gl = bd->bd_gl;
644 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
645 atomic_inc(&gl->gl_ail_count);
646 }
647 bd->bd_ail = ai;
648 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
649 gfs2_log_unlock(sdp);
650}
651
652/**
653 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
654 * @ip: the inode who owns the buffers
655 * @bstart: the first buffer in the run
656 * @blen: the number of buffers in the run
657 *
658 */
659
660void gfs2_meta_wipe(struct gfs2_inode *ip, uint64_t bstart, uint32_t blen)
661{
662 struct gfs2_sbd *sdp = ip->i_sbd;
663 struct inode *aspace = ip->i_gl->gl_aspace;
664 struct buffer_head *bh;
665
666 while (blen) {
667 bh = getbuf(sdp, aspace, bstart, NO_CREATE);
668 if (bh) {
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500669 struct gfs2_bufdata *bd = bh->b_private;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000670
671 if (test_clear_buffer_pinned(bh)) {
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500672 struct gfs2_trans *tr = current->journal_info;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000673 gfs2_log_lock(sdp);
674 list_del_init(&bd->bd_le.le_list);
675 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
676 sdp->sd_log_num_buf--;
677 gfs2_log_unlock(sdp);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500678 tr->tr_num_buf_rm++;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000679 brelse(bh);
680 }
681 if (bd) {
682 gfs2_log_lock(sdp);
683 if (bd->bd_ail) {
684 uint64_t blkno = bh->b_blocknr;
685 bd->bd_ail = NULL;
686 list_del(&bd->bd_ail_st_list);
687 list_del(&bd->bd_ail_gl_list);
688 atomic_dec(&bd->bd_gl->gl_ail_count);
689 brelse(bh);
690 gfs2_log_unlock(sdp);
691 gfs2_trans_add_revoke(sdp, blkno);
692 } else
693 gfs2_log_unlock(sdp);
694 }
695
696 lock_buffer(bh);
697 clear_buffer_dirty(bh);
698 clear_buffer_uptodate(bh);
699 unlock_buffer(bh);
700
701 brelse(bh);
702 }
703
704 bstart++;
705 blen--;
706 }
707}
708
709/**
710 * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
711 * @ip: The GFS2 inode
712 *
713 * This releases buffers that are in the most-recently-used array of
714 * blocks used for indirect block addressing for this inode.
715 */
716
717void gfs2_meta_cache_flush(struct gfs2_inode *ip)
718{
719 struct buffer_head **bh_slot;
720 unsigned int x;
721
722 spin_lock(&ip->i_spin);
723
724 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
725 bh_slot = &ip->i_cache[x];
726 if (!*bh_slot)
727 break;
728 brelse(*bh_slot);
729 *bh_slot = NULL;
730 }
731
732 spin_unlock(&ip->i_spin);
733}
734
735/**
736 * gfs2_meta_indirect_buffer - Get a metadata buffer
737 * @ip: The GFS2 inode
738 * @height: The level of this buf in the metadata (indir addr) tree (if any)
739 * @num: The block number (device relative) of the buffer
740 * @new: Non-zero if we may create a new buffer
741 * @bhp: the buffer is returned here
742 *
743 * Try to use the gfs2_inode's MRU metadata tree cache.
744 *
745 * Returns: errno
746 */
747
748int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, uint64_t num,
749 int new, struct buffer_head **bhp)
750{
751 struct buffer_head *bh, **bh_slot = ip->i_cache + height;
752 int error;
753
754 spin_lock(&ip->i_spin);
755 bh = *bh_slot;
756 if (bh) {
757 if (bh->b_blocknr == num)
758 get_bh(bh);
759 else
760 bh = NULL;
761 }
762 spin_unlock(&ip->i_spin);
763
764 if (bh) {
765 if (new)
766 meta_prep_new(bh);
767 else {
768 error = gfs2_meta_reread(ip->i_sbd, bh,
769 DIO_START | DIO_WAIT);
770 if (error) {
771 brelse(bh);
772 return error;
773 }
774 }
775 } else {
776 if (new)
777 bh = gfs2_meta_new(ip->i_gl, num);
778 else {
779 error = gfs2_meta_read(ip->i_gl, num,
780 DIO_START | DIO_WAIT, &bh);
781 if (error)
782 return error;
783 }
784
785 spin_lock(&ip->i_spin);
786 if (*bh_slot != bh) {
787 brelse(*bh_slot);
788 *bh_slot = bh;
789 get_bh(bh);
790 }
791 spin_unlock(&ip->i_spin);
792 }
793
794 if (new) {
795 if (gfs2_assert_warn(ip->i_sbd, height)) {
796 brelse(bh);
797 return -EIO;
798 }
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000799 gfs2_trans_add_bh(ip->i_gl, bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000800 gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
801 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
802
803 } else if (gfs2_metatype_check(ip->i_sbd, bh,
804 (height) ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)) {
805 brelse(bh);
806 return -EIO;
807 }
808
809 *bhp = bh;
810
811 return 0;
812}
813
814/**
815 * gfs2_meta_ra - start readahead on an extent of a file
816 * @gl: the glock the blocks belong to
817 * @dblock: the starting disk block
818 * @extlen: the number of blocks in the extent
819 *
820 */
821
822void gfs2_meta_ra(struct gfs2_glock *gl, uint64_t dblock, uint32_t extlen)
823{
824 struct gfs2_sbd *sdp = gl->gl_sbd;
825 struct inode *aspace = gl->gl_aspace;
826 struct buffer_head *first_bh, *bh;
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500827 uint32_t max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
828 sdp->sd_sb.sb_bsize_shift;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000829 int error;
830
831 if (!extlen || !max_ra)
832 return;
833 if (extlen > max_ra)
834 extlen = max_ra;
835
836 first_bh = getbuf(sdp, aspace, dblock, CREATE);
837
838 if (buffer_uptodate(first_bh))
839 goto out;
840 if (!buffer_locked(first_bh)) {
841 error = gfs2_meta_reread(sdp, first_bh, DIO_START);
842 if (error)
843 goto out;
844 }
845
846 dblock++;
847 extlen--;
848
849 while (extlen) {
850 bh = getbuf(sdp, aspace, dblock, CREATE);
851
852 if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
853 error = gfs2_meta_reread(sdp, bh, DIO_START);
854 brelse(bh);
855 if (error)
856 goto out;
857 } else
858 brelse(bh);
859
860 dblock++;
861 extlen--;
862
863 if (buffer_uptodate(first_bh))
864 break;
865 }
866
867 out:
868 brelse(first_bh);
869}
870
871/**
872 * gfs2_meta_syncfs - sync all the buffers in a filesystem
873 * @sdp: the filesystem
874 *
875 */
876
877void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
878{
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400879 gfs2_log_flush(sdp, NULL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000880 for (;;) {
881 gfs2_ail1_start(sdp, DIO_ALL);
882 if (gfs2_ail1_empty(sdp, DIO_ALL))
883 break;
Steven Whitehousefe1bded2006-04-18 10:09:15 -0400884 msleep(10);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000885 }
886}
887