blob: 0e31d46edd4d19a5e7c01ae57f6ef9f0f10cb862 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <asm/semaphore.h>
16
17#include "gfs2.h"
18#include "bmap.h"
19#include "glock.h"
20#include "log.h"
21#include "lops.h"
22#include "meta_io.h"
23
24#define PULL 1
25
David Teiglandb3b94fa2006-01-16 16:50:04 +000026static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
27 atomic_t *a)
28{
Steven Whitehousef55ab262006-02-21 12:51:39 +000029 wait_event(*wq, atomic_read(a) ? 0 : 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +000030}
31
32static void lock_for_trans(struct gfs2_sbd *sdp)
33{
David Teiglandb3b94fa2006-01-16 16:50:04 +000034 do_lock_wait(sdp, &sdp->sd_log_trans_wq, &sdp->sd_log_flush_count);
35 atomic_inc(&sdp->sd_log_trans_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +000036}
37
38static void unlock_from_trans(struct gfs2_sbd *sdp)
39{
40 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_trans_count));
41 if (atomic_dec_and_test(&sdp->sd_log_trans_count))
42 wake_up(&sdp->sd_log_flush_wq);
43}
44
Steven Whitehousef55ab262006-02-21 12:51:39 +000045static void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +000046{
David Teiglandb3b94fa2006-01-16 16:50:04 +000047 atomic_inc(&sdp->sd_log_flush_count);
48 do_lock_wait(sdp, &sdp->sd_log_flush_wq, &sdp->sd_log_trans_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +000049}
50
Steven Whitehousef55ab262006-02-21 12:51:39 +000051static void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +000052{
53 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_flush_count));
54 if (atomic_dec_and_test(&sdp->sd_log_flush_count))
55 wake_up(&sdp->sd_log_trans_wq);
56}
57
58/**
59 * gfs2_struct2blk - compute stuff
60 * @sdp: the filesystem
61 * @nstruct: the number of structures
62 * @ssize: the size of the structures
63 *
64 * Compute the number of log descriptor blocks needed to hold a certain number
65 * of structures of a certain size.
66 *
67 * Returns: the number of blocks needed (minimum is always 1)
68 */
69
70unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
71 unsigned int ssize)
72{
73 unsigned int blks;
74 unsigned int first, second;
75
76 blks = 1;
Steven Whitehouse568f4c92006-02-27 12:00:42 -050077 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) /
78 ssize;
David Teiglandb3b94fa2006-01-16 16:50:04 +000079
80 if (nstruct > first) {
Steven Whitehouse568f4c92006-02-27 12:00:42 -050081 second = (sdp->sd_sb.sb_bsize -
82 sizeof(struct gfs2_meta_header)) / ssize;
David Teiglandb3b94fa2006-01-16 16:50:04 +000083 blks += DIV_RU(nstruct - first, second);
84 }
85
86 return blks;
87}
88
89void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
90{
91 struct list_head *head = &sdp->sd_ail1_list;
92 uint64_t sync_gen;
93 struct list_head *first, *tmp;
94 struct gfs2_ail *first_ai, *ai;
95
96 gfs2_log_lock(sdp);
97 if (list_empty(head)) {
98 gfs2_log_unlock(sdp);
99 return;
100 }
101 sync_gen = sdp->sd_ail_sync_gen++;
102
103 first = head->prev;
104 first_ai = list_entry(first, struct gfs2_ail, ai_list);
105 first_ai->ai_sync_gen = sync_gen;
106 gfs2_ail1_start_one(sdp, first_ai);
107
108 if (flags & DIO_ALL)
109 first = NULL;
110
111 for (;;) {
112 if (first &&
113 (head->prev != first ||
114 gfs2_ail1_empty_one(sdp, first_ai, 0)))
115 break;
116
117 for (tmp = head->prev; tmp != head; tmp = tmp->prev) {
118 ai = list_entry(tmp, struct gfs2_ail, ai_list);
119 if (ai->ai_sync_gen >= sync_gen)
120 continue;
121 ai->ai_sync_gen = sync_gen;
122 gfs2_ail1_start_one(sdp, ai);
123 break;
124 }
125
126 if (tmp == head)
127 break;
128 }
129
130 gfs2_log_unlock(sdp);
131}
132
133int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
134{
135 struct gfs2_ail *ai, *s;
136 int ret;
137
138 gfs2_log_lock(sdp);
139
140 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
141 if (gfs2_ail1_empty_one(sdp, ai, flags))
142 list_move(&ai->ai_list, &sdp->sd_ail2_list);
143 else if (!(flags & DIO_ALL))
144 break;
145 }
146
147 ret = list_empty(&sdp->sd_ail1_list);
148
149 gfs2_log_unlock(sdp);
150
151 return ret;
152}
153
154static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
155{
156 struct gfs2_ail *ai, *safe;
157 unsigned int old_tail = sdp->sd_log_tail;
158 int wrap = (new_tail < old_tail);
159 int a, b, rm;
160
161 gfs2_log_lock(sdp);
162
163 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
164 a = (old_tail <= ai->ai_first);
165 b = (ai->ai_first < new_tail);
166 rm = (wrap) ? (a || b) : (a && b);
167 if (!rm)
168 continue;
169
170 gfs2_ail2_empty_one(sdp, ai);
171 list_del(&ai->ai_list);
172 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
173 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
174 kfree(ai);
175 }
176
177 gfs2_log_unlock(sdp);
178}
179
180/**
181 * gfs2_log_reserve - Make a log reservation
182 * @sdp: The GFS2 superblock
183 * @blks: The number of blocks to reserve
184 *
185 * Returns: errno
186 */
187
188int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
189{
190 LIST_HEAD(list);
191 unsigned int try = 0;
192
193 if (gfs2_assert_warn(sdp, blks) ||
194 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
195 return -EINVAL;
196
197 for (;;) {
198 gfs2_log_lock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000199 if (list_empty(&list)) {
200 list_add_tail(&list, &sdp->sd_log_blks_list);
201 while (sdp->sd_log_blks_list.next != &list) {
202 DECLARE_WAITQUEUE(__wait_chan, current);
203 set_current_state(TASK_UNINTERRUPTIBLE);
204 add_wait_queue(&sdp->sd_log_blks_wait,
205 &__wait_chan);
206 gfs2_log_unlock(sdp);
207 schedule();
208 gfs2_log_lock(sdp);
209 remove_wait_queue(&sdp->sd_log_blks_wait,
210 &__wait_chan);
211 set_current_state(TASK_RUNNING);
212 }
213 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000214 /* Never give away the last block so we can
215 always pull the tail if we need to. */
216 if (sdp->sd_log_blks_free > blks) {
217 sdp->sd_log_blks_free -= blks;
218 list_del(&list);
219 gfs2_log_unlock(sdp);
220 wake_up(&sdp->sd_log_blks_wait);
221 break;
222 }
223
224 gfs2_log_unlock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000225 gfs2_ail1_empty(sdp, 0);
226 gfs2_log_flush(sdp);
227
228 if (try++)
229 gfs2_ail1_start(sdp, 0);
230 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000231 lock_for_trans(sdp);
232
233 return 0;
234}
235
236/**
237 * gfs2_log_release - Release a given number of log blocks
238 * @sdp: The GFS2 superblock
239 * @blks: The number of blocks
240 *
241 */
242
243void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
244{
245 unlock_from_trans(sdp);
246
247 gfs2_log_lock(sdp);
248 sdp->sd_log_blks_free += blks;
249 gfs2_assert_withdraw(sdp,
250 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
251 gfs2_log_unlock(sdp);
252}
253
254static uint64_t log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
255{
256 int new = 0;
257 uint64_t dbn;
258 int error;
259
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500260 error = gfs2_block_map(get_v2ip(sdp->sd_jdesc->jd_inode),
261 lbn, &new, &dbn, NULL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000262 gfs2_assert_withdraw(sdp, !error && dbn);
263
264 return dbn;
265}
266
267/**
268 * log_distance - Compute distance between two journal blocks
269 * @sdp: The GFS2 superblock
270 * @newer: The most recent journal block of the pair
271 * @older: The older journal block of the pair
272 *
273 * Compute the distance (in the journal direction) between two
274 * blocks in the journal
275 *
276 * Returns: the distance in blocks
277 */
278
279static inline unsigned int log_distance(struct gfs2_sbd *sdp,
280 unsigned int newer,
281 unsigned int older)
282{
283 int dist;
284
285 dist = newer - older;
286 if (dist < 0)
287 dist += sdp->sd_jdesc->jd_blocks;
288
289 return dist;
290}
291
292static unsigned int current_tail(struct gfs2_sbd *sdp)
293{
294 struct gfs2_ail *ai;
295 unsigned int tail;
296
297 gfs2_log_lock(sdp);
298
299 if (list_empty(&sdp->sd_ail1_list))
300 tail = sdp->sd_log_head;
301 else {
302 ai = list_entry(sdp->sd_ail1_list.prev,
303 struct gfs2_ail, ai_list);
304 tail = ai->ai_first;
305 }
306
307 gfs2_log_unlock(sdp);
308
309 return tail;
310}
311
312static inline void log_incr_head(struct gfs2_sbd *sdp)
313{
314 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
315 gfs2_assert_withdraw(sdp,
316 sdp->sd_log_flush_head == sdp->sd_log_head);
317
318 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
319 sdp->sd_log_flush_head = 0;
320 sdp->sd_log_flush_wrapped = 1;
321 }
322}
323
324/**
325 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
326 * @sdp: The GFS2 superblock
327 *
328 * Returns: the buffer_head
329 */
330
331struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
332{
333 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
334 struct gfs2_log_buf *lb;
335 struct buffer_head *bh;
336
Steven Whitehouse4f3df042006-01-18 13:20:16 +0000337 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000338 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
339
340 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
341 lock_buffer(bh);
342 memset(bh->b_data, 0, bh->b_size);
343 set_buffer_uptodate(bh);
344 clear_buffer_dirty(bh);
345 unlock_buffer(bh);
346
347 log_incr_head(sdp);
348
349 return bh;
350}
351
352/**
353 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
354 * @sdp: the filesystem
355 * @data: the data the buffer_head should point to
356 *
357 * Returns: the log buffer descriptor
358 */
359
360struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
361 struct buffer_head *real)
362{
363 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
364 struct gfs2_log_buf *lb;
365 struct buffer_head *bh;
366
Steven Whitehouse4f3df042006-01-18 13:20:16 +0000367 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000368 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
369 lb->lb_real = real;
370
371 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
372 atomic_set(&bh->b_count, 1);
373 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000374 set_bh_page(bh, real->b_page, bh_offset(real));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000375 bh->b_blocknr = blkno;
376 bh->b_size = sdp->sd_sb.sb_bsize;
377 bh->b_bdev = sdp->sd_vfs->s_bdev;
378
379 log_incr_head(sdp);
380
381 return bh;
382}
383
384static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
385{
386 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
387
388 ail2_empty(sdp, new_tail);
389
390 gfs2_log_lock(sdp);
391 sdp->sd_log_blks_free += dist - ((pull) ? 1 : 0);
392 gfs2_assert_withdraw(sdp,
393 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
394 gfs2_log_unlock(sdp);
395
396 sdp->sd_log_tail = new_tail;
397}
398
399/**
400 * log_write_header - Get and initialize a journal header buffer
401 * @sdp: The GFS2 superblock
402 *
403 * Returns: the initialized log buffer descriptor
404 */
405
406static void log_write_header(struct gfs2_sbd *sdp, uint32_t flags, int pull)
407{
408 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
409 struct buffer_head *bh;
410 struct gfs2_log_header *lh;
411 unsigned int tail;
412 uint32_t hash;
413
David Teiglandb3b94fa2006-01-16 16:50:04 +0000414 bh = sb_getblk(sdp->sd_vfs, blkno);
415 lock_buffer(bh);
416 memset(bh->b_data, 0, bh->b_size);
417 set_buffer_uptodate(bh);
418 clear_buffer_dirty(bh);
419 unlock_buffer(bh);
420
421 gfs2_ail1_empty(sdp, 0);
422 tail = current_tail(sdp);
423
424 lh = (struct gfs2_log_header *)bh->b_data;
425 memset(lh, 0, sizeof(struct gfs2_log_header));
426 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
427 lh->lh_header.mh_type = cpu_to_be16(GFS2_METATYPE_LH);
428 lh->lh_header.mh_format = cpu_to_be16(GFS2_FORMAT_LH);
429 lh->lh_sequence = be64_to_cpu(sdp->sd_log_sequence++);
430 lh->lh_flags = be32_to_cpu(flags);
431 lh->lh_tail = be32_to_cpu(tail);
432 lh->lh_blkno = be32_to_cpu(sdp->sd_log_flush_head);
433 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
434 lh->lh_hash = cpu_to_be32(hash);
435
436 set_buffer_dirty(bh);
437 if (sync_dirty_buffer(bh))
438 gfs2_io_error_bh(sdp, bh);
439 brelse(bh);
440
441 if (sdp->sd_log_tail != tail)
442 log_pull_tail(sdp, tail, pull);
443 else
444 gfs2_assert_withdraw(sdp, !pull);
445
446 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
447 log_incr_head(sdp);
448}
449
450static void log_flush_commit(struct gfs2_sbd *sdp)
451{
452 struct list_head *head = &sdp->sd_log_flush_list;
453 struct gfs2_log_buf *lb;
454 struct buffer_head *bh;
455 unsigned int d;
456
457 d = log_distance(sdp, sdp->sd_log_flush_head, sdp->sd_log_head);
458
459 gfs2_assert_withdraw(sdp, d + 1 == sdp->sd_log_blks_reserved);
460
461 while (!list_empty(head)) {
462 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
463 list_del(&lb->lb_list);
464 bh = lb->lb_bh;
465
466 wait_on_buffer(bh);
467 if (!buffer_uptodate(bh))
468 gfs2_io_error_bh(sdp, bh);
469 if (lb->lb_real) {
470 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
471 schedule();
472 free_buffer_head(bh);
473 } else
474 brelse(bh);
475 kfree(lb);
476 }
477
478 log_write_header(sdp, 0, 0);
479}
480
481/**
482 * gfs2_log_flush_i - flush incore transaction(s)
483 * @sdp: the filesystem
484 * @gl: The glock structure to flush. If NULL, flush the whole incore log
485 *
486 */
487
488void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
489{
490 struct gfs2_ail *ai;
491
Steven Whitehouse4f3df042006-01-18 13:20:16 +0000492 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000493 INIT_LIST_HEAD(&ai->ai_ail1_list);
494 INIT_LIST_HEAD(&ai->ai_ail2_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000495 gfs2_lock_for_flush(sdp);
Steven Whitehousef55ab262006-02-21 12:51:39 +0000496
497 if (gl) {
498 gfs2_log_lock(sdp);
499 if (list_empty(&gl->gl_le.le_list)) {
500 gfs2_log_unlock(sdp);
501 gfs2_unlock_from_flush(sdp);
502 kfree(ai);
503 return;
504 }
505 gfs2_log_unlock(sdp);
506 }
507
508 mutex_lock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509
510 gfs2_assert_withdraw(sdp,
511 sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
512 gfs2_assert_withdraw(sdp,
513 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
514
David Teiglandb3b94fa2006-01-16 16:50:04 +0000515 sdp->sd_log_flush_head = sdp->sd_log_head;
516 sdp->sd_log_flush_wrapped = 0;
517 ai->ai_first = sdp->sd_log_flush_head;
518
519 lops_before_commit(sdp);
520 if (!list_empty(&sdp->sd_log_flush_list))
521 log_flush_commit(sdp);
522 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
523 log_write_header(sdp, 0, PULL);
524 lops_after_commit(sdp, ai);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000525 sdp->sd_log_head = sdp->sd_log_flush_head;
526 if (sdp->sd_log_flush_wrapped)
527 sdp->sd_log_wraps++;
528
529 sdp->sd_log_blks_reserved =
530 sdp->sd_log_commited_buf =
531 sdp->sd_log_commited_revoke = 0;
532
533 gfs2_log_lock(sdp);
534 if (!list_empty(&ai->ai_ail1_list)) {
535 list_add(&ai->ai_list, &sdp->sd_ail1_list);
536 ai = NULL;
537 }
538 gfs2_log_unlock(sdp);
539
Steven Whitehousef55ab262006-02-21 12:51:39 +0000540 mutex_unlock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000541 sdp->sd_vfs->s_dirt = 0;
542 gfs2_unlock_from_flush(sdp);
543
544 kfree(ai);
545}
546
547static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
548{
549 unsigned int reserved = 1;
550 unsigned int old;
551
552 gfs2_log_lock(sdp);
553
554 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
555 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
556 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
557 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
558
559 if (sdp->sd_log_commited_buf)
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500560 reserved += 1 + sdp->sd_log_commited_buf +
561 sdp->sd_log_commited_buf/503;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000562 if (sdp->sd_log_commited_revoke)
563 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
564 sizeof(uint64_t));
565
566 old = sdp->sd_log_blks_free;
567 sdp->sd_log_blks_free += tr->tr_reserved -
568 (reserved - sdp->sd_log_blks_reserved);
569
570 gfs2_assert_withdraw(sdp,
571 sdp->sd_log_blks_free >= old);
572 gfs2_assert_withdraw(sdp,
573 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
574
575 sdp->sd_log_blks_reserved = reserved;
576
577 gfs2_log_unlock(sdp);
578}
579
580/**
581 * gfs2_log_commit - Commit a transaction to the log
582 * @sdp: the filesystem
583 * @tr: the transaction
584 *
585 * Returns: errno
586 */
587
588void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
589{
590 log_refund(sdp, tr);
591 lops_incore_commit(sdp, tr);
592
593 sdp->sd_vfs->s_dirt = 1;
594 unlock_from_trans(sdp);
595
596 kfree(tr);
597
598 gfs2_log_lock(sdp);
599 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
600 gfs2_log_unlock(sdp);
601 gfs2_log_flush(sdp);
602 } else
603 gfs2_log_unlock(sdp);
604}
605
606/**
607 * gfs2_log_shutdown - write a shutdown header into a journal
608 * @sdp: the filesystem
609 *
610 */
611
612void gfs2_log_shutdown(struct gfs2_sbd *sdp)
613{
Steven Whitehousef55ab262006-02-21 12:51:39 +0000614 mutex_lock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000615
616 gfs2_assert_withdraw(sdp, !atomic_read(&sdp->sd_log_trans_count));
617 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
618 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
619 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000620 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000621 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
622 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
623 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
624 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
625
626 sdp->sd_log_flush_head = sdp->sd_log_head;
627 sdp->sd_log_flush_wrapped = 0;
628
629 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
630
631 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free ==
632 sdp->sd_jdesc->jd_blocks);
633 gfs2_assert_withdraw(sdp, sdp->sd_log_head == sdp->sd_log_tail);
634 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail2_list));
635
636 sdp->sd_log_head = sdp->sd_log_flush_head;
637 if (sdp->sd_log_flush_wrapped)
638 sdp->sd_log_wraps++;
639 sdp->sd_log_tail = sdp->sd_log_head;
640
Steven Whitehousef55ab262006-02-21 12:51:39 +0000641 mutex_unlock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000642}
643