blob: e6acb41332e70364c124a45f4463674a1bb54b17 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <asm/semaphore.h>
16
17#include "gfs2.h"
18#include "bmap.h"
19#include "glock.h"
20#include "log.h"
21#include "lops.h"
22#include "meta_io.h"
23
24#define PULL 1
25
David Teiglandb3b94fa2006-01-16 16:50:04 +000026static void do_lock_wait(struct gfs2_sbd *sdp, wait_queue_head_t *wq,
27 atomic_t *a)
28{
Steven Whitehousef55ab262006-02-21 12:51:39 +000029 wait_event(*wq, atomic_read(a) ? 0 : 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +000030}
31
32static void lock_for_trans(struct gfs2_sbd *sdp)
33{
David Teiglandb3b94fa2006-01-16 16:50:04 +000034 do_lock_wait(sdp, &sdp->sd_log_trans_wq, &sdp->sd_log_flush_count);
35 atomic_inc(&sdp->sd_log_trans_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +000036}
37
38static void unlock_from_trans(struct gfs2_sbd *sdp)
39{
40 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_trans_count));
41 if (atomic_dec_and_test(&sdp->sd_log_trans_count))
42 wake_up(&sdp->sd_log_flush_wq);
43}
44
Steven Whitehousef55ab262006-02-21 12:51:39 +000045static void gfs2_lock_for_flush(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +000046{
David Teiglandb3b94fa2006-01-16 16:50:04 +000047 atomic_inc(&sdp->sd_log_flush_count);
48 do_lock_wait(sdp, &sdp->sd_log_flush_wq, &sdp->sd_log_trans_count);
David Teiglandb3b94fa2006-01-16 16:50:04 +000049}
50
Steven Whitehousef55ab262006-02-21 12:51:39 +000051static void gfs2_unlock_from_flush(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +000052{
53 gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_flush_count));
54 if (atomic_dec_and_test(&sdp->sd_log_flush_count))
55 wake_up(&sdp->sd_log_trans_wq);
56}
57
58/**
59 * gfs2_struct2blk - compute stuff
60 * @sdp: the filesystem
61 * @nstruct: the number of structures
62 * @ssize: the size of the structures
63 *
64 * Compute the number of log descriptor blocks needed to hold a certain number
65 * of structures of a certain size.
66 *
67 * Returns: the number of blocks needed (minimum is always 1)
68 */
69
70unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
71 unsigned int ssize)
72{
73 unsigned int blks;
74 unsigned int first, second;
75
76 blks = 1;
77 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
78
79 if (nstruct > first) {
80 second = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / ssize;
81 blks += DIV_RU(nstruct - first, second);
82 }
83
84 return blks;
85}
86
87void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
88{
89 struct list_head *head = &sdp->sd_ail1_list;
90 uint64_t sync_gen;
91 struct list_head *first, *tmp;
92 struct gfs2_ail *first_ai, *ai;
93
94 gfs2_log_lock(sdp);
95 if (list_empty(head)) {
96 gfs2_log_unlock(sdp);
97 return;
98 }
99 sync_gen = sdp->sd_ail_sync_gen++;
100
101 first = head->prev;
102 first_ai = list_entry(first, struct gfs2_ail, ai_list);
103 first_ai->ai_sync_gen = sync_gen;
104 gfs2_ail1_start_one(sdp, first_ai);
105
106 if (flags & DIO_ALL)
107 first = NULL;
108
109 for (;;) {
110 if (first &&
111 (head->prev != first ||
112 gfs2_ail1_empty_one(sdp, first_ai, 0)))
113 break;
114
115 for (tmp = head->prev; tmp != head; tmp = tmp->prev) {
116 ai = list_entry(tmp, struct gfs2_ail, ai_list);
117 if (ai->ai_sync_gen >= sync_gen)
118 continue;
119 ai->ai_sync_gen = sync_gen;
120 gfs2_ail1_start_one(sdp, ai);
121 break;
122 }
123
124 if (tmp == head)
125 break;
126 }
127
128 gfs2_log_unlock(sdp);
129}
130
131int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
132{
133 struct gfs2_ail *ai, *s;
134 int ret;
135
136 gfs2_log_lock(sdp);
137
138 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
139 if (gfs2_ail1_empty_one(sdp, ai, flags))
140 list_move(&ai->ai_list, &sdp->sd_ail2_list);
141 else if (!(flags & DIO_ALL))
142 break;
143 }
144
145 ret = list_empty(&sdp->sd_ail1_list);
146
147 gfs2_log_unlock(sdp);
148
149 return ret;
150}
151
152static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
153{
154 struct gfs2_ail *ai, *safe;
155 unsigned int old_tail = sdp->sd_log_tail;
156 int wrap = (new_tail < old_tail);
157 int a, b, rm;
158
159 gfs2_log_lock(sdp);
160
161 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
162 a = (old_tail <= ai->ai_first);
163 b = (ai->ai_first < new_tail);
164 rm = (wrap) ? (a || b) : (a && b);
165 if (!rm)
166 continue;
167
168 gfs2_ail2_empty_one(sdp, ai);
169 list_del(&ai->ai_list);
170 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
171 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
172 kfree(ai);
173 }
174
175 gfs2_log_unlock(sdp);
176}
177
178/**
179 * gfs2_log_reserve - Make a log reservation
180 * @sdp: The GFS2 superblock
181 * @blks: The number of blocks to reserve
182 *
183 * Returns: errno
184 */
185
186int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
187{
188 LIST_HEAD(list);
189 unsigned int try = 0;
190
191 if (gfs2_assert_warn(sdp, blks) ||
192 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
193 return -EINVAL;
194
195 for (;;) {
196 gfs2_log_lock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000197 if (list_empty(&list)) {
198 list_add_tail(&list, &sdp->sd_log_blks_list);
199 while (sdp->sd_log_blks_list.next != &list) {
200 DECLARE_WAITQUEUE(__wait_chan, current);
201 set_current_state(TASK_UNINTERRUPTIBLE);
202 add_wait_queue(&sdp->sd_log_blks_wait,
203 &__wait_chan);
204 gfs2_log_unlock(sdp);
205 schedule();
206 gfs2_log_lock(sdp);
207 remove_wait_queue(&sdp->sd_log_blks_wait,
208 &__wait_chan);
209 set_current_state(TASK_RUNNING);
210 }
211 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000212 /* Never give away the last block so we can
213 always pull the tail if we need to. */
214 if (sdp->sd_log_blks_free > blks) {
215 sdp->sd_log_blks_free -= blks;
216 list_del(&list);
217 gfs2_log_unlock(sdp);
218 wake_up(&sdp->sd_log_blks_wait);
219 break;
220 }
221
222 gfs2_log_unlock(sdp);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000223 gfs2_ail1_empty(sdp, 0);
224 gfs2_log_flush(sdp);
225
226 if (try++)
227 gfs2_ail1_start(sdp, 0);
228 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000229 lock_for_trans(sdp);
230
231 return 0;
232}
233
234/**
235 * gfs2_log_release - Release a given number of log blocks
236 * @sdp: The GFS2 superblock
237 * @blks: The number of blocks
238 *
239 */
240
241void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
242{
243 unlock_from_trans(sdp);
244
245 gfs2_log_lock(sdp);
246 sdp->sd_log_blks_free += blks;
247 gfs2_assert_withdraw(sdp,
248 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
249 gfs2_log_unlock(sdp);
250}
251
252static uint64_t log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
253{
254 int new = 0;
255 uint64_t dbn;
256 int error;
257
Steven Whitehouse7359a192006-02-13 12:27:43 +0000258 error = gfs2_block_map(get_v2ip(sdp->sd_jdesc->jd_inode), lbn, &new, &dbn, NULL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000259 gfs2_assert_withdraw(sdp, !error && dbn);
260
261 return dbn;
262}
263
264/**
265 * log_distance - Compute distance between two journal blocks
266 * @sdp: The GFS2 superblock
267 * @newer: The most recent journal block of the pair
268 * @older: The older journal block of the pair
269 *
270 * Compute the distance (in the journal direction) between two
271 * blocks in the journal
272 *
273 * Returns: the distance in blocks
274 */
275
276static inline unsigned int log_distance(struct gfs2_sbd *sdp,
277 unsigned int newer,
278 unsigned int older)
279{
280 int dist;
281
282 dist = newer - older;
283 if (dist < 0)
284 dist += sdp->sd_jdesc->jd_blocks;
285
286 return dist;
287}
288
289static unsigned int current_tail(struct gfs2_sbd *sdp)
290{
291 struct gfs2_ail *ai;
292 unsigned int tail;
293
294 gfs2_log_lock(sdp);
295
296 if (list_empty(&sdp->sd_ail1_list))
297 tail = sdp->sd_log_head;
298 else {
299 ai = list_entry(sdp->sd_ail1_list.prev,
300 struct gfs2_ail, ai_list);
301 tail = ai->ai_first;
302 }
303
304 gfs2_log_unlock(sdp);
305
306 return tail;
307}
308
309static inline void log_incr_head(struct gfs2_sbd *sdp)
310{
311 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
312 gfs2_assert_withdraw(sdp,
313 sdp->sd_log_flush_head == sdp->sd_log_head);
314
315 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
316 sdp->sd_log_flush_head = 0;
317 sdp->sd_log_flush_wrapped = 1;
318 }
319}
320
321/**
322 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
323 * @sdp: The GFS2 superblock
324 *
325 * Returns: the buffer_head
326 */
327
328struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
329{
330 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
331 struct gfs2_log_buf *lb;
332 struct buffer_head *bh;
333
Steven Whitehouse4f3df042006-01-18 13:20:16 +0000334 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000335 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
336
337 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
338 lock_buffer(bh);
339 memset(bh->b_data, 0, bh->b_size);
340 set_buffer_uptodate(bh);
341 clear_buffer_dirty(bh);
342 unlock_buffer(bh);
343
344 log_incr_head(sdp);
345
346 return bh;
347}
348
349/**
350 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
351 * @sdp: the filesystem
352 * @data: the data the buffer_head should point to
353 *
354 * Returns: the log buffer descriptor
355 */
356
357struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
358 struct buffer_head *real)
359{
360 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
361 struct gfs2_log_buf *lb;
362 struct buffer_head *bh;
363
Steven Whitehouse4f3df042006-01-18 13:20:16 +0000364 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000365 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
366 lb->lb_real = real;
367
368 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
369 atomic_set(&bh->b_count, 1);
370 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000371 set_bh_page(bh, real->b_page, bh_offset(real));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000372 bh->b_blocknr = blkno;
373 bh->b_size = sdp->sd_sb.sb_bsize;
374 bh->b_bdev = sdp->sd_vfs->s_bdev;
375
376 log_incr_head(sdp);
377
378 return bh;
379}
380
381static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
382{
383 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
384
385 ail2_empty(sdp, new_tail);
386
387 gfs2_log_lock(sdp);
388 sdp->sd_log_blks_free += dist - ((pull) ? 1 : 0);
389 gfs2_assert_withdraw(sdp,
390 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
391 gfs2_log_unlock(sdp);
392
393 sdp->sd_log_tail = new_tail;
394}
395
396/**
397 * log_write_header - Get and initialize a journal header buffer
398 * @sdp: The GFS2 superblock
399 *
400 * Returns: the initialized log buffer descriptor
401 */
402
403static void log_write_header(struct gfs2_sbd *sdp, uint32_t flags, int pull)
404{
405 uint64_t blkno = log_bmap(sdp, sdp->sd_log_flush_head);
406 struct buffer_head *bh;
407 struct gfs2_log_header *lh;
408 unsigned int tail;
409 uint32_t hash;
410
411 atomic_inc(&sdp->sd_log_flush_ondisk);
412
413 bh = sb_getblk(sdp->sd_vfs, blkno);
414 lock_buffer(bh);
415 memset(bh->b_data, 0, bh->b_size);
416 set_buffer_uptodate(bh);
417 clear_buffer_dirty(bh);
418 unlock_buffer(bh);
419
420 gfs2_ail1_empty(sdp, 0);
421 tail = current_tail(sdp);
422
423 lh = (struct gfs2_log_header *)bh->b_data;
424 memset(lh, 0, sizeof(struct gfs2_log_header));
425 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
426 lh->lh_header.mh_type = cpu_to_be16(GFS2_METATYPE_LH);
427 lh->lh_header.mh_format = cpu_to_be16(GFS2_FORMAT_LH);
428 lh->lh_sequence = be64_to_cpu(sdp->sd_log_sequence++);
429 lh->lh_flags = be32_to_cpu(flags);
430 lh->lh_tail = be32_to_cpu(tail);
431 lh->lh_blkno = be32_to_cpu(sdp->sd_log_flush_head);
432 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
433 lh->lh_hash = cpu_to_be32(hash);
434
435 set_buffer_dirty(bh);
436 if (sync_dirty_buffer(bh))
437 gfs2_io_error_bh(sdp, bh);
438 brelse(bh);
439
440 if (sdp->sd_log_tail != tail)
441 log_pull_tail(sdp, tail, pull);
442 else
443 gfs2_assert_withdraw(sdp, !pull);
444
445 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
446 log_incr_head(sdp);
447}
448
449static void log_flush_commit(struct gfs2_sbd *sdp)
450{
451 struct list_head *head = &sdp->sd_log_flush_list;
452 struct gfs2_log_buf *lb;
453 struct buffer_head *bh;
454 unsigned int d;
455
456 d = log_distance(sdp, sdp->sd_log_flush_head, sdp->sd_log_head);
457
458 gfs2_assert_withdraw(sdp, d + 1 == sdp->sd_log_blks_reserved);
459
460 while (!list_empty(head)) {
461 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
462 list_del(&lb->lb_list);
463 bh = lb->lb_bh;
464
465 wait_on_buffer(bh);
466 if (!buffer_uptodate(bh))
467 gfs2_io_error_bh(sdp, bh);
468 if (lb->lb_real) {
469 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
470 schedule();
471 free_buffer_head(bh);
472 } else
473 brelse(bh);
474 kfree(lb);
475 }
476
477 log_write_header(sdp, 0, 0);
478}
479
480/**
481 * gfs2_log_flush_i - flush incore transaction(s)
482 * @sdp: the filesystem
483 * @gl: The glock structure to flush. If NULL, flush the whole incore log
484 *
485 */
486
487void gfs2_log_flush_i(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
488{
489 struct gfs2_ail *ai;
490
491 atomic_inc(&sdp->sd_log_flush_incore);
492
Steven Whitehouse4f3df042006-01-18 13:20:16 +0000493 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000494 INIT_LIST_HEAD(&ai->ai_ail1_list);
495 INIT_LIST_HEAD(&ai->ai_ail2_list);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000496 gfs2_lock_for_flush(sdp);
Steven Whitehousef55ab262006-02-21 12:51:39 +0000497
498 if (gl) {
499 gfs2_log_lock(sdp);
500 if (list_empty(&gl->gl_le.le_list)) {
501 gfs2_log_unlock(sdp);
502 gfs2_unlock_from_flush(sdp);
503 kfree(ai);
504 return;
505 }
506 gfs2_log_unlock(sdp);
507 }
508
509 mutex_lock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000510
511 gfs2_assert_withdraw(sdp,
512 sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
513 gfs2_assert_withdraw(sdp,
514 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
515
David Teiglandb3b94fa2006-01-16 16:50:04 +0000516 sdp->sd_log_flush_head = sdp->sd_log_head;
517 sdp->sd_log_flush_wrapped = 0;
518 ai->ai_first = sdp->sd_log_flush_head;
519
520 lops_before_commit(sdp);
521 if (!list_empty(&sdp->sd_log_flush_list))
522 log_flush_commit(sdp);
523 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
524 log_write_header(sdp, 0, PULL);
525 lops_after_commit(sdp, ai);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000526 sdp->sd_log_head = sdp->sd_log_flush_head;
527 if (sdp->sd_log_flush_wrapped)
528 sdp->sd_log_wraps++;
529
530 sdp->sd_log_blks_reserved =
531 sdp->sd_log_commited_buf =
532 sdp->sd_log_commited_revoke = 0;
533
534 gfs2_log_lock(sdp);
535 if (!list_empty(&ai->ai_ail1_list)) {
536 list_add(&ai->ai_list, &sdp->sd_ail1_list);
537 ai = NULL;
538 }
539 gfs2_log_unlock(sdp);
540
Steven Whitehousef55ab262006-02-21 12:51:39 +0000541 mutex_unlock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000542 sdp->sd_vfs->s_dirt = 0;
543 gfs2_unlock_from_flush(sdp);
544
545 kfree(ai);
546}
547
548static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
549{
550 unsigned int reserved = 1;
551 unsigned int old;
552
553 gfs2_log_lock(sdp);
554
555 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
556 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
557 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
558 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
559
560 if (sdp->sd_log_commited_buf)
561 reserved += 1 + sdp->sd_log_commited_buf + sdp->sd_log_commited_buf/503;
562 if (sdp->sd_log_commited_revoke)
563 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
564 sizeof(uint64_t));
565
566 old = sdp->sd_log_blks_free;
567 sdp->sd_log_blks_free += tr->tr_reserved -
568 (reserved - sdp->sd_log_blks_reserved);
569
570 gfs2_assert_withdraw(sdp,
571 sdp->sd_log_blks_free >= old);
572 gfs2_assert_withdraw(sdp,
573 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
574
575 sdp->sd_log_blks_reserved = reserved;
576
577 gfs2_log_unlock(sdp);
578}
579
580/**
581 * gfs2_log_commit - Commit a transaction to the log
582 * @sdp: the filesystem
583 * @tr: the transaction
584 *
585 * Returns: errno
586 */
587
588void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
589{
590 log_refund(sdp, tr);
591 lops_incore_commit(sdp, tr);
592
593 sdp->sd_vfs->s_dirt = 1;
594 unlock_from_trans(sdp);
595
596 kfree(tr);
597
598 gfs2_log_lock(sdp);
599 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
600 gfs2_log_unlock(sdp);
601 gfs2_log_flush(sdp);
602 } else
603 gfs2_log_unlock(sdp);
604}
605
606/**
607 * gfs2_log_shutdown - write a shutdown header into a journal
608 * @sdp: the filesystem
609 *
610 */
611
612void gfs2_log_shutdown(struct gfs2_sbd *sdp)
613{
Steven Whitehousef55ab262006-02-21 12:51:39 +0000614 mutex_lock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000615
616 gfs2_assert_withdraw(sdp, !atomic_read(&sdp->sd_log_trans_count));
617 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
618 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
619 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000620 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000621 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
622 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
623 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
624 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
625
626 sdp->sd_log_flush_head = sdp->sd_log_head;
627 sdp->sd_log_flush_wrapped = 0;
628
629 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
630
631 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free ==
632 sdp->sd_jdesc->jd_blocks);
633 gfs2_assert_withdraw(sdp, sdp->sd_log_head == sdp->sd_log_tail);
634 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail2_list));
635
636 sdp->sd_log_head = sdp->sd_log_flush_head;
637 if (sdp->sd_log_flush_wrapped)
638 sdp->sd_log_wraps++;
639 sdp->sd_log_tail = sdp->sd_log_head;
640
Steven Whitehousef55ab262006-02-21 12:51:39 +0000641 mutex_unlock(&sdp->sd_log_flush_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000642}
643