blob: 1d06f81ee8b4706c4a9c75a60f9d8a7435b0ffcc [file] [log] [blame]
Dave Kleikamp470decc2006-10-11 01:20:57 -07001/*
Mingming Caof7f4bcc2006-10-11 01:20:59 -07002 * linux/fs/jbd2/commit.c
Dave Kleikamp470decc2006-10-11 01:20:57 -07003 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 *
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
7 *
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
11 *
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
14 */
15
16#include <linux/time.h>
17#include <linux/fs.h>
Mingming Caof7f4bcc2006-10-11 01:20:59 -070018#include <linux/jbd2.h>
Dave Kleikamp470decc2006-10-11 01:20:57 -070019#include <linux/errno.h>
20#include <linux/slab.h>
21#include <linux/mm.h>
22#include <linux/pagemap.h>
Johann Lombardi8e85fb32008-01-28 23:58:27 -050023#include <linux/jiffies.h>
Girish Shilamkar818d2762008-01-28 23:58:27 -050024#include <linux/crc32.h>
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -040025#include <linux/writeback.h>
26#include <linux/backing-dev.h>
Theodore Ts'ofd984962009-01-05 21:34:13 -050027#include <linux/bio.h>
Theodore Ts'o0e3d2a62009-09-11 09:30:12 -040028#include <linux/blkdev.h>
Brian King39e3ac22010-10-27 21:25:12 -040029#include <linux/bitops.h>
Theodore Ts'o879c5e62009-06-17 11:47:48 -040030#include <trace/events/jbd2.h>
Dave Kleikamp470decc2006-10-11 01:20:57 -070031
32/*
Jan Karab34090e2013-06-04 12:08:56 -040033 * IO end handler for temporary buffer_heads handling writes to the journal.
Dave Kleikamp470decc2006-10-11 01:20:57 -070034 */
35static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
36{
Jan Karab34090e2013-06-04 12:08:56 -040037 struct buffer_head *orig_bh = bh->b_private;
38
Dave Kleikamp470decc2006-10-11 01:20:57 -070039 BUFFER_TRACE(bh, "");
40 if (uptodate)
41 set_buffer_uptodate(bh);
42 else
43 clear_buffer_uptodate(bh);
Jan Karab34090e2013-06-04 12:08:56 -040044 if (orig_bh) {
45 clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
Peter Zijlstra4e857c52014-03-17 18:06:10 +010046 smp_mb__after_atomic();
Jan Karab34090e2013-06-04 12:08:56 -040047 wake_up_bit(&orig_bh->b_state, BH_Shadow);
48 }
Dave Kleikamp470decc2006-10-11 01:20:57 -070049 unlock_buffer(bh);
50}
51
52/*
Jan Kara87c89c22008-07-11 19:27:31 -040053 * When an ext4 file is truncated, it is possible that some pages are not
54 * successfully freed, because they are attached to a committing transaction.
Dave Kleikamp470decc2006-10-11 01:20:57 -070055 * After the transaction commits, these pages are left on the LRU, with no
56 * ->mapping, and with attached buffers. These pages are trivially reclaimable
57 * by the VM, but their apparent absence upsets the VM accounting, and it makes
58 * the numbers in /proc/meminfo look odd.
59 *
60 * So here, we have a buffer which has just come off the forget list. Look to
61 * see if we can strip all buffers from the backing page.
62 *
63 * Called under lock_journal(), and possibly under journal_datalist_lock. The
64 * caller provided us with a ref against the buffer, and we drop that here.
65 */
66static void release_buffer_page(struct buffer_head *bh)
67{
68 struct page *page;
69
70 if (buffer_dirty(bh))
71 goto nope;
72 if (atomic_read(&bh->b_count) != 1)
73 goto nope;
74 page = bh->b_page;
75 if (!page)
76 goto nope;
77 if (page->mapping)
78 goto nope;
79
80 /* OK, it's a truncated page */
Nick Piggin529ae9a2008-08-02 12:01:03 +020081 if (!trylock_page(page))
Dave Kleikamp470decc2006-10-11 01:20:57 -070082 goto nope;
83
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030084 get_page(page);
Dave Kleikamp470decc2006-10-11 01:20:57 -070085 __brelse(bh);
86 try_to_free_buffers(page);
87 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030088 put_page(page);
Dave Kleikamp470decc2006-10-11 01:20:57 -070089 return;
90
91nope:
92 __brelse(bh);
93}
94
Jan Karae5a120a2013-06-04 12:06:01 -040095static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
Darrick J. Wong1f56c582012-05-27 08:10:25 -040096{
97 struct commit_header *h;
98 __u32 csum;
99
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400100 if (!jbd2_journal_has_csum_v2or3(j))
Darrick J. Wong1f56c582012-05-27 08:10:25 -0400101 return;
102
Jan Karae5a120a2013-06-04 12:06:01 -0400103 h = (struct commit_header *)(bh->b_data);
Darrick J. Wong1f56c582012-05-27 08:10:25 -0400104 h->h_chksum_type = 0;
105 h->h_chksum_size = 0;
106 h->h_chksum[0] = 0;
Jan Karae5a120a2013-06-04 12:06:01 -0400107 csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
Darrick J. Wong1f56c582012-05-27 08:10:25 -0400108 h->h_chksum[0] = cpu_to_be32(csum);
109}
110
Dave Kleikamp470decc2006-10-11 01:20:57 -0700111/*
Girish Shilamkar818d2762008-01-28 23:58:27 -0500112 * Done it all: now submit the commit record. We should have
Dave Kleikamp470decc2006-10-11 01:20:57 -0700113 * cleaned up our previous buffers by now, so if we are in abort
114 * mode we can now just skip the rest of the journal write
115 * entirely.
116 *
117 * Returns 1 if the journal needs to be aborted or 0 on success
118 */
Girish Shilamkar818d2762008-01-28 23:58:27 -0500119static int journal_submit_commit_record(journal_t *journal,
120 transaction_t *commit_transaction,
121 struct buffer_head **cbh,
122 __u32 crc32_sum)
Dave Kleikamp470decc2006-10-11 01:20:57 -0700123{
Girish Shilamkar818d2762008-01-28 23:58:27 -0500124 struct commit_header *tmp;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700125 struct buffer_head *bh;
Girish Shilamkar818d2762008-01-28 23:58:27 -0500126 int ret;
Arnd Bergmannabcfb5d2016-06-30 11:49:01 -0400127 struct timespec64 now = current_kernel_time64();
Dave Kleikamp470decc2006-10-11 01:20:57 -0700128
Zhang Huan6cba6112011-04-05 19:16:20 -0400129 *cbh = NULL;
130
Dave Kleikamp470decc2006-10-11 01:20:57 -0700131 if (is_journal_aborted(journal))
132 return 0;
133
Jan Kara32ab6712016-02-22 23:17:15 -0500134 bh = jbd2_journal_get_descriptor_buffer(commit_transaction,
135 JBD2_COMMIT_BLOCK);
Jan Karae5a120a2013-06-04 12:06:01 -0400136 if (!bh)
Dave Kleikamp470decc2006-10-11 01:20:57 -0700137 return 1;
138
Girish Shilamkar818d2762008-01-28 23:58:27 -0500139 tmp = (struct commit_header *)bh->b_data;
Theodore Ts'o736603a2008-07-11 19:27:31 -0400140 tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
141 tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500142
Darrick J. Wong56316a02015-10-17 16:18:45 -0400143 if (jbd2_has_feature_checksum(journal)) {
Girish Shilamkar818d2762008-01-28 23:58:27 -0500144 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
145 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
146 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700147 }
Jan Karae5a120a2013-06-04 12:06:01 -0400148 jbd2_commit_block_csum_set(journal, bh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700149
Jan Karae5a120a2013-06-04 12:06:01 -0400150 BUFFER_TRACE(bh, "submit commit block");
Girish Shilamkar818d2762008-01-28 23:58:27 -0500151 lock_buffer(bh);
Theodore Ts'o45a90bf2008-10-06 12:04:02 -0400152 clear_buffer_dirty(bh);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500153 set_buffer_uptodate(bh);
154 bh->b_end_io = journal_end_buffer_io_sync;
155
156 if (journal->j_flags & JBD2_BARRIER &&
Darrick J. Wong56316a02015-10-17 16:18:45 -0400157 !jbd2_has_feature_async_commit(journal))
Mike Christie2a222ca2016-06-05 14:31:43 -0500158 ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh);
Christoph Hellwig9c355752010-08-18 05:29:17 -0400159 else
Mike Christie2a222ca2016-06-05 14:31:43 -0500160 ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
Christoph Hellwig9c355752010-08-18 05:29:17 -0400161
Girish Shilamkar818d2762008-01-28 23:58:27 -0500162 *cbh = bh;
163 return ret;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700164}
165
Girish Shilamkar818d2762008-01-28 23:58:27 -0500166/*
167 * This function along with journal_submit_commit_record
168 * allows to write the commit record asynchronously.
169 */
Theodore Ts'ofd984962009-01-05 21:34:13 -0500170static int journal_wait_on_commit_record(journal_t *journal,
171 struct buffer_head *bh)
Girish Shilamkar818d2762008-01-28 23:58:27 -0500172{
173 int ret = 0;
174
175 clear_buffer_dirty(bh);
176 wait_on_buffer(bh);
177
178 if (unlikely(!buffer_uptodate(bh)))
179 ret = -EIO;
180 put_bh(bh); /* One for getblk() */
Girish Shilamkar818d2762008-01-28 23:58:27 -0500181
182 return ret;
183}
184
185/*
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400186 * write the filemap data using writepage() address_space_operations.
187 * We don't do block allocation here even for delalloc. We don't
188 * use writepages() because with dealyed allocation we may be doing
189 * block allocation in writepages().
190 */
191static int journal_submit_inode_data_buffers(struct address_space *mapping)
192{
193 int ret;
194 struct writeback_control wbc = {
195 .sync_mode = WB_SYNC_ALL,
196 .nr_to_write = mapping->nrpages * 2,
197 .range_start = 0,
198 .range_end = i_size_read(mapping->host),
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400199 };
200
201 ret = generic_writepages(mapping, &wbc);
202 return ret;
203}
204
205/*
Jan Karac851ed52008-07-11 19:27:31 -0400206 * Submit all the data buffers of inode associated with the transaction to
207 * disk.
208 *
209 * We are in a committing transaction. Therefore no new inode can be added to
210 * our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
211 * operate on from being released while we write out pages.
212 */
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400213static int journal_submit_data_buffers(journal_t *journal,
Jan Karac851ed52008-07-11 19:27:31 -0400214 transaction_t *commit_transaction)
215{
216 struct jbd2_inode *jinode;
217 int err, ret = 0;
218 struct address_space *mapping;
219
220 spin_lock(&journal->j_list_lock);
221 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
Jan Kara41617e12016-04-24 00:56:07 -0400222 if (!(jinode->i_flags & JI_WRITE_DATA))
223 continue;
Jan Karac851ed52008-07-11 19:27:31 -0400224 mapping = jinode->i_vfs_inode->i_mapping;
Jan Karacb0d9d42016-02-22 23:20:30 -0500225 jinode->i_flags |= JI_COMMIT_RUNNING;
Jan Karac851ed52008-07-11 19:27:31 -0400226 spin_unlock(&journal->j_list_lock);
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400227 /*
228 * submit the inode data buffers. We use writepage
229 * instead of writepages. Because writepages can do
230 * block allocation with delalloc. We need to write
231 * only allocated blocks here.
232 */
Theodore Ts'o879c5e62009-06-17 11:47:48 -0400233 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400234 err = journal_submit_inode_data_buffers(mapping);
Jan Karac851ed52008-07-11 19:27:31 -0400235 if (!ret)
236 ret = err;
237 spin_lock(&journal->j_list_lock);
238 J_ASSERT(jinode->i_transaction == commit_transaction);
Jan Karacb0d9d42016-02-22 23:20:30 -0500239 jinode->i_flags &= ~JI_COMMIT_RUNNING;
240 smp_mb();
Jan Karac851ed52008-07-11 19:27:31 -0400241 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
242 }
243 spin_unlock(&journal->j_list_lock);
244 return ret;
245}
246
247/*
248 * Wait for data submitted for writeout, refile inodes to proper
249 * transaction if needed.
250 *
251 */
252static int journal_finish_inode_data_buffers(journal_t *journal,
253 transaction_t *commit_transaction)
254{
255 struct jbd2_inode *jinode, *next_i;
256 int err, ret = 0;
257
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400258 /* For locking, see the comment in journal_submit_data_buffers() */
Jan Karac851ed52008-07-11 19:27:31 -0400259 spin_lock(&journal->j_list_lock);
260 list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
Jan Kara41617e12016-04-24 00:56:07 -0400261 if (!(jinode->i_flags & JI_WAIT_DATA))
262 continue;
Jan Karacb0d9d42016-02-22 23:20:30 -0500263 jinode->i_flags |= JI_COMMIT_RUNNING;
Jan Karac851ed52008-07-11 19:27:31 -0400264 spin_unlock(&journal->j_list_lock);
265 err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
Hidehiro Kawaie9e34f42008-07-31 22:26:04 -0400266 if (err) {
267 /*
268 * Because AS_EIO is cleared by
Christoph Hellwig94004ed2009-09-30 22:16:33 +0200269 * filemap_fdatawait_range(), set it again so
Hidehiro Kawaie9e34f42008-07-31 22:26:04 -0400270 * that user process can get -EIO from fsync().
271 */
Michal Hocko5114a972016-10-11 13:56:01 -0700272 mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO);
Hidehiro Kawaie9e34f42008-07-31 22:26:04 -0400273
274 if (!ret)
275 ret = err;
276 }
Jan Karac851ed52008-07-11 19:27:31 -0400277 spin_lock(&journal->j_list_lock);
Jan Karacb0d9d42016-02-22 23:20:30 -0500278 jinode->i_flags &= ~JI_COMMIT_RUNNING;
279 smp_mb();
Jan Karac851ed52008-07-11 19:27:31 -0400280 wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
281 }
282
283 /* Now refile inode to proper lists */
284 list_for_each_entry_safe(jinode, next_i,
285 &commit_transaction->t_inode_list, i_list) {
286 list_del(&jinode->i_list);
287 if (jinode->i_next_transaction) {
288 jinode->i_transaction = jinode->i_next_transaction;
289 jinode->i_next_transaction = NULL;
290 list_add(&jinode->i_list,
291 &jinode->i_transaction->t_inode_list);
292 } else {
293 jinode->i_transaction = NULL;
294 }
295 }
296 spin_unlock(&journal->j_list_lock);
297
298 return ret;
299}
300
Girish Shilamkar818d2762008-01-28 23:58:27 -0500301static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
302{
303 struct page *page = bh->b_page;
304 char *addr;
305 __u32 checksum;
306
Cong Wang303a8f22011-11-25 23:14:31 +0800307 addr = kmap_atomic(page);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500308 checksum = crc32_be(crc32_sum,
309 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
Cong Wang303a8f22011-11-25 23:14:31 +0800310 kunmap_atomic(addr);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500311
312 return checksum;
313}
314
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400315static void write_tag_block(journal_t *j, journal_block_tag_t *tag,
Mingming Cao18eba7a2006-10-11 01:21:13 -0700316 unsigned long long block)
Zach Brownb517bea2006-10-11 01:21:08 -0700317{
318 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
Darrick J. Wong56316a02015-10-17 16:18:45 -0400319 if (jbd2_has_feature_64bit(j))
Zach Brownb517bea2006-10-11 01:21:08 -0700320 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
321}
322
Darrick J. Wongc3900872012-05-27 08:12:12 -0400323static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
324 struct buffer_head *bh, __u32 sequence)
325{
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400326 journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag;
Darrick J. Wongc3900872012-05-27 08:12:12 -0400327 struct page *page = bh->b_page;
328 __u8 *addr;
Darrick J. Wongeee06c52013-05-28 07:31:59 -0400329 __u32 csum32;
Darrick J. Wong18a6ea12013-08-28 14:59:58 -0400330 __be32 seq;
Darrick J. Wongc3900872012-05-27 08:12:12 -0400331
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400332 if (!jbd2_journal_has_csum_v2or3(j))
Darrick J. Wongc3900872012-05-27 08:12:12 -0400333 return;
334
Darrick J. Wong18a6ea12013-08-28 14:59:58 -0400335 seq = cpu_to_be32(sequence);
Cong Wang906adea2012-06-23 11:24:48 +0800336 addr = kmap_atomic(page);
Darrick J. Wong18a6ea12013-08-28 14:59:58 -0400337 csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
Darrick J. Wongeee06c52013-05-28 07:31:59 -0400338 csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
339 bh->b_size);
Cong Wang906adea2012-06-23 11:24:48 +0800340 kunmap_atomic(addr);
Darrick J. Wongc3900872012-05-27 08:12:12 -0400341
Darrick J. Wong56316a02015-10-17 16:18:45 -0400342 if (jbd2_has_feature_csum3(j))
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400343 tag3->t_checksum = cpu_to_be32(csum32);
344 else
345 tag->t_checksum = cpu_to_be16(csum32);
Darrick J. Wongc3900872012-05-27 08:12:12 -0400346}
Dave Kleikamp470decc2006-10-11 01:20:57 -0700347/*
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700348 * jbd2_journal_commit_transaction
Dave Kleikamp470decc2006-10-11 01:20:57 -0700349 *
350 * The primary function for committing a transaction to the log. This
351 * function is called by the journal thread to begin a complete commit.
352 */
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700353void jbd2_journal_commit_transaction(journal_t *journal)
Dave Kleikamp470decc2006-10-11 01:20:57 -0700354{
Johann Lombardi8e85fb32008-01-28 23:58:27 -0500355 struct transaction_stats_s stats;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700356 transaction_t *commit_transaction;
Jan Karae5a120a2013-06-04 12:06:01 -0400357 struct journal_head *jh;
358 struct buffer_head *descriptor;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700359 struct buffer_head **wbuf = journal->j_wbuf;
360 int bufs;
361 int flags;
362 int err;
Mingming Cao18eba7a2006-10-11 01:21:13 -0700363 unsigned long long blocknr;
Josef Bacike07f7182008-11-26 01:14:26 -0500364 ktime_t start_time;
365 u64 commit_time;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700366 char *tagp = NULL;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700367 journal_block_tag_t *tag = NULL;
368 int space_left = 0;
369 int first_tag = 0;
370 int tag_flag;
Dmitry Monakhov794446c2013-04-03 22:06:52 -0400371 int i;
Zach Brownb517bea2006-10-11 01:21:08 -0700372 int tag_bytes = journal_tag_bytes(journal);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500373 struct buffer_head *cbh = NULL; /* For transactional checksums */
374 __u32 crc32_sum = ~0;
Jens Axboe82f04ab2011-03-17 11:01:52 +0100375 struct blk_plug plug;
Jan Kara33395782012-03-13 22:45:38 -0400376 /* Tail of the journal */
377 unsigned long first_block;
378 tid_t first_tid;
379 int update_tail;
Darrick J. Wong3caa4872012-05-27 08:10:22 -0400380 int csum_size = 0;
Jan Karaf5113ef2013-06-04 12:01:45 -0400381 LIST_HEAD(io_bufs);
Jan Karae5a120a2013-06-04 12:06:01 -0400382 LIST_HEAD(log_bufs);
Darrick J. Wong3caa4872012-05-27 08:10:22 -0400383
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400384 if (jbd2_journal_has_csum_v2or3(journal))
Darrick J. Wong3caa4872012-05-27 08:10:22 -0400385 csum_size = sizeof(struct jbd2_journal_block_tail);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700386
387 /*
388 * First job: lock down the current transaction and wait for
389 * all outstanding updates to complete.
390 */
391
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700392 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
393 if (journal->j_flags & JBD2_FLUSHED) {
Dave Kleikamp470decc2006-10-11 01:20:57 -0700394 jbd_debug(3, "super block updated\n");
Jan Karaa78bb112012-03-13 15:43:04 -0400395 mutex_lock(&journal->j_checkpoint_mutex);
Jan Kara79feb522012-03-13 22:22:54 -0400396 /*
397 * We hold j_checkpoint_mutex so tail cannot change under us.
398 * We don't need any special data guarantees for writing sb
399 * since journal is empty and it is ok for write to be
400 * flushed only with transaction commit.
401 */
402 jbd2_journal_update_sb_log_tail(journal,
403 journal->j_tail_sequence,
404 journal->j_tail,
405 WRITE_SYNC);
Jan Karaa78bb112012-03-13 15:43:04 -0400406 mutex_unlock(&journal->j_checkpoint_mutex);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700407 } else {
408 jbd_debug(3, "superblock not updated\n");
409 }
410
411 J_ASSERT(journal->j_running_transaction != NULL);
412 J_ASSERT(journal->j_committing_transaction == NULL);
413
414 commit_transaction = journal->j_running_transaction;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700415
Theodore Ts'o879c5e62009-06-17 11:47:48 -0400416 trace_jbd2_start_commit(journal, commit_transaction);
Eryu Guanf2a44522011-11-01 19:09:18 -0400417 jbd_debug(1, "JBD2: starting commit of transaction %d\n",
Dave Kleikamp470decc2006-10-11 01:20:57 -0700418 commit_transaction->t_tid);
419
Theodore Ts'oa931da62010-08-03 21:35:12 -0400420 write_lock(&journal->j_state_lock);
Paul Gortmaker3ca841c2013-06-12 22:46:35 -0400421 J_ASSERT(commit_transaction->t_state == T_RUNNING);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700422 commit_transaction->t_state = T_LOCKED;
423
Theodore Ts'o879c5e62009-06-17 11:47:48 -0400424 trace_jbd2_commit_locking(journal, commit_transaction);
Theodore Ts'obf699322009-09-30 00:32:06 -0400425 stats.run.rs_wait = commit_transaction->t_max_wait;
Theodore Ts'o9fff24a2013-02-06 22:30:23 -0500426 stats.run.rs_request_delay = 0;
Theodore Ts'obf699322009-09-30 00:32:06 -0400427 stats.run.rs_locked = jiffies;
Theodore Ts'o9fff24a2013-02-06 22:30:23 -0500428 if (commit_transaction->t_requested)
429 stats.run.rs_request_delay =
430 jbd2_time_diff(commit_transaction->t_requested,
431 stats.run.rs_locked);
Theodore Ts'obf699322009-09-30 00:32:06 -0400432 stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
433 stats.run.rs_locked);
Johann Lombardi8e85fb32008-01-28 23:58:27 -0500434
Dave Kleikamp470decc2006-10-11 01:20:57 -0700435 spin_lock(&commit_transaction->t_handle_lock);
Theodore Ts'oa51dca92010-08-02 08:43:25 -0400436 while (atomic_read(&commit_transaction->t_updates)) {
Dave Kleikamp470decc2006-10-11 01:20:57 -0700437 DEFINE_WAIT(wait);
438
439 prepare_to_wait(&journal->j_wait_updates, &wait,
440 TASK_UNINTERRUPTIBLE);
Theodore Ts'oa51dca92010-08-02 08:43:25 -0400441 if (atomic_read(&commit_transaction->t_updates)) {
Dave Kleikamp470decc2006-10-11 01:20:57 -0700442 spin_unlock(&commit_transaction->t_handle_lock);
Theodore Ts'oa931da62010-08-03 21:35:12 -0400443 write_unlock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700444 schedule();
Theodore Ts'oa931da62010-08-03 21:35:12 -0400445 write_lock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700446 spin_lock(&commit_transaction->t_handle_lock);
447 }
448 finish_wait(&journal->j_wait_updates, &wait);
449 }
450 spin_unlock(&commit_transaction->t_handle_lock);
451
Theodore Ts'oa51dca92010-08-02 08:43:25 -0400452 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
Dave Kleikamp470decc2006-10-11 01:20:57 -0700453 journal->j_max_transaction_buffers);
454
455 /*
456 * First thing we are allowed to do is to discard any remaining
457 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
458 * that there are no such buffers: if a large filesystem
459 * operation like a truncate needs to split itself over multiple
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700460 * transactions, then it may try to do a jbd2_journal_restart() while
Dave Kleikamp470decc2006-10-11 01:20:57 -0700461 * there are still BJ_Reserved buffers outstanding. These must
462 * be released cleanly from the current transaction.
463 *
464 * In this case, the filesystem must still reserve write access
465 * again before modifying the buffer in the new transaction, but
466 * we do not require it to remember exactly which old buffers it
467 * has reserved. This is consistent with the existing behaviour
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700468 * that multiple jbd2_journal_get_write_access() calls to the same
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300469 * buffer are perfectly permissible.
Dave Kleikamp470decc2006-10-11 01:20:57 -0700470 */
471 while (commit_transaction->t_reserved_list) {
472 jh = commit_transaction->t_reserved_list;
473 JBUFFER_TRACE(jh, "reserved, unused: refile");
474 /*
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700475 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
Dave Kleikamp470decc2006-10-11 01:20:57 -0700476 * leave undo-committed data.
477 */
478 if (jh->b_committed_data) {
479 struct buffer_head *bh = jh2bh(jh);
480
481 jbd_lock_bh_state(bh);
Mingming Caoaf1e76d2007-10-16 18:38:25 -0400482 jbd2_free(jh->b_committed_data, bh->b_size);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700483 jh->b_committed_data = NULL;
484 jbd_unlock_bh_state(bh);
485 }
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700486 jbd2_journal_refile_buffer(journal, jh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700487 }
488
489 /*
490 * Now try to drop any written-back buffers from the journal's
491 * checkpoint lists. We do this *before* commit because it potentially
492 * frees some memory
493 */
494 spin_lock(&journal->j_list_lock);
Jan Kara841df7df2015-07-28 14:57:14 -0400495 __jbd2_journal_clean_checkpoint_list(journal, false);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700496 spin_unlock(&journal->j_list_lock);
497
Eryu Guanf2a44522011-11-01 19:09:18 -0400498 jbd_debug(3, "JBD2: commit phase 1\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700499
500 /*
Yongqiang Yang1ba37262011-12-28 17:46:46 -0500501 * Clear revoked flag to reflect there is no revoked buffers
502 * in the next transaction which is going to be started.
503 */
504 jbd2_clear_buffer_revoked_flags(journal);
505
506 /*
Dave Kleikamp470decc2006-10-11 01:20:57 -0700507 * Switch to a new revoke table.
508 */
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700509 jbd2_journal_switch_revoke_table(journal);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700510
Jan Kara8f7d89f2013-06-04 12:35:11 -0400511 /*
512 * Reserved credits cannot be claimed anymore, free them
513 */
514 atomic_sub(atomic_read(&journal->j_reserved_credits),
515 &commit_transaction->t_outstanding_credits);
516
Theodore Ts'o879c5e62009-06-17 11:47:48 -0400517 trace_jbd2_commit_flushing(journal, commit_transaction);
Theodore Ts'obf699322009-09-30 00:32:06 -0400518 stats.run.rs_flushing = jiffies;
519 stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
520 stats.run.rs_flushing);
Johann Lombardi8e85fb32008-01-28 23:58:27 -0500521
Dave Kleikamp470decc2006-10-11 01:20:57 -0700522 commit_transaction->t_state = T_FLUSH;
523 journal->j_committing_transaction = commit_transaction;
524 journal->j_running_transaction = NULL;
Josef Bacike07f7182008-11-26 01:14:26 -0500525 start_time = ktime_get();
Dave Kleikamp470decc2006-10-11 01:20:57 -0700526 commit_transaction->t_log_start = journal->j_head;
527 wake_up(&journal->j_wait_transaction_locked);
Theodore Ts'oa931da62010-08-03 21:35:12 -0400528 write_unlock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700529
Paul Gortmakercfc7bc82013-06-12 22:56:35 -0400530 jbd_debug(3, "JBD2: commit phase 2a\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700531
532 /*
Dave Kleikamp470decc2006-10-11 01:20:57 -0700533 * Now start flushing things to disk, in the order they appear
534 * on the transaction lists. Data blocks go first.
535 */
Aneesh Kumar K.Vcd1aac32008-07-11 19:27:31 -0400536 err = journal_submit_data_buffers(journal, commit_transaction);
Jan Karac851ed52008-07-11 19:27:31 -0400537 if (err)
538 jbd2_journal_abort(journal, err);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700539
Jens Axboe82f04ab2011-03-17 11:01:52 +0100540 blk_start_plug(&plug);
Jan Kara9bcf9762016-02-22 23:07:30 -0500541 jbd2_journal_write_revoke_records(commit_transaction, &log_bufs);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700542
Paul Gortmakercfc7bc82013-06-12 22:56:35 -0400543 jbd_debug(3, "JBD2: commit phase 2b\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700544
545 /*
Dave Kleikamp470decc2006-10-11 01:20:57 -0700546 * Way to go: we have now written out all of the data for a
547 * transaction! Now comes the tricky part: we need to write out
548 * metadata. Loop over the transaction's entire buffer list:
549 */
Theodore Ts'oa931da62010-08-03 21:35:12 -0400550 write_lock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700551 commit_transaction->t_state = T_COMMIT;
Theodore Ts'oa931da62010-08-03 21:35:12 -0400552 write_unlock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700553
Theodore Ts'o879c5e62009-06-17 11:47:48 -0400554 trace_jbd2_commit_logging(journal, commit_transaction);
Theodore Ts'obf699322009-09-30 00:32:06 -0400555 stats.run.rs_logging = jiffies;
556 stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
557 stats.run.rs_logging);
Theodore Ts'oa51dca92010-08-02 08:43:25 -0400558 stats.run.rs_blocks =
559 atomic_read(&commit_transaction->t_outstanding_credits);
Theodore Ts'obf699322009-09-30 00:32:06 -0400560 stats.run.rs_blocks_logged = 0;
Johann Lombardi8e85fb32008-01-28 23:58:27 -0500561
Josef Bacik1dfc3222008-04-17 10:38:59 -0400562 J_ASSERT(commit_transaction->t_nr_buffers <=
Theodore Ts'oa51dca92010-08-02 08:43:25 -0400563 atomic_read(&commit_transaction->t_outstanding_credits));
Josef Bacik1dfc3222008-04-17 10:38:59 -0400564
Jan Kara87c89c22008-07-11 19:27:31 -0400565 err = 0;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700566 bufs = 0;
Jan Karae5a120a2013-06-04 12:06:01 -0400567 descriptor = NULL;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700568 while (commit_transaction->t_buffers) {
569
570 /* Find the next buffer to be journaled... */
571
572 jh = commit_transaction->t_buffers;
573
574 /* If we're in abort mode, we just un-journal the buffer and
Hidehiro Kawai7ad74452008-10-10 20:29:31 -0400575 release it. */
Dave Kleikamp470decc2006-10-11 01:20:57 -0700576
577 if (is_journal_aborted(journal)) {
Hidehiro Kawai7ad74452008-10-10 20:29:31 -0400578 clear_buffer_jbddirty(jh2bh(jh));
Dave Kleikamp470decc2006-10-11 01:20:57 -0700579 JBUFFER_TRACE(jh, "journal is aborting: refile");
Joel Beckere06c8222008-09-11 15:35:47 -0700580 jbd2_buffer_abort_trigger(jh,
581 jh->b_frozen_data ?
582 jh->b_frozen_triggers :
583 jh->b_triggers);
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700584 jbd2_journal_refile_buffer(journal, jh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700585 /* If that was the last one, we need to clean up
586 * any descriptor buffers which may have been
587 * already allocated, even if we are now
588 * aborting. */
589 if (!commit_transaction->t_buffers)
590 goto start_journal_io;
591 continue;
592 }
593
594 /* Make sure we have a descriptor block in which to
595 record the metadata buffer. */
596
597 if (!descriptor) {
Dave Kleikamp470decc2006-10-11 01:20:57 -0700598 J_ASSERT (bufs == 0);
599
Eryu Guanf2a44522011-11-01 19:09:18 -0400600 jbd_debug(4, "JBD2: get descriptor\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700601
Jan Kara32ab6712016-02-22 23:17:15 -0500602 descriptor = jbd2_journal_get_descriptor_buffer(
603 commit_transaction,
604 JBD2_DESCRIPTOR_BLOCK);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700605 if (!descriptor) {
Jan Karaa7fa2ba2007-10-16 18:38:25 -0400606 jbd2_journal_abort(journal, -EIO);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700607 continue;
608 }
609
Eryu Guanf2a44522011-11-01 19:09:18 -0400610 jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
Jan Karae5a120a2013-06-04 12:06:01 -0400611 (unsigned long long)descriptor->b_blocknr,
612 descriptor->b_data);
Jan Karae5a120a2013-06-04 12:06:01 -0400613 tagp = &descriptor->b_data[sizeof(journal_header_t)];
614 space_left = descriptor->b_size -
615 sizeof(journal_header_t);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700616 first_tag = 1;
Jan Karae5a120a2013-06-04 12:06:01 -0400617 set_buffer_jwrite(descriptor);
618 set_buffer_dirty(descriptor);
619 wbuf[bufs++] = descriptor;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700620
621 /* Record it so that we can wait for IO
622 completion later */
Jan Karae5a120a2013-06-04 12:06:01 -0400623 BUFFER_TRACE(descriptor, "ph3: file as descriptor");
624 jbd2_file_log_bh(&log_bufs, descriptor);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700625 }
626
627 /* Where is the buffer to be written? */
628
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700629 err = jbd2_journal_next_log_block(journal, &blocknr);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700630 /* If the block mapping failed, just abandon the buffer
631 and repeat this loop: we'll fall into the
632 refile-on-abort condition above. */
633 if (err) {
Jan Karaa7fa2ba2007-10-16 18:38:25 -0400634 jbd2_journal_abort(journal, err);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700635 continue;
636 }
637
638 /*
639 * start_this_handle() uses t_outstanding_credits to determine
640 * the free space in the log, but this counter is changed
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700641 * by jbd2_journal_next_log_block() also.
Dave Kleikamp470decc2006-10-11 01:20:57 -0700642 */
Theodore Ts'oa51dca92010-08-02 08:43:25 -0400643 atomic_dec(&commit_transaction->t_outstanding_credits);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700644
645 /* Bump b_count to prevent truncate from stumbling over
646 the shadowed buffer! @@@ This can go if we ever get
Jan Karaf5113ef2013-06-04 12:01:45 -0400647 rid of the shadow pairing of buffers. */
Dave Kleikamp470decc2006-10-11 01:20:57 -0700648 atomic_inc(&jh2bh(jh)->b_count);
649
Dave Kleikamp470decc2006-10-11 01:20:57 -0700650 /*
Jan Karaf5113ef2013-06-04 12:01:45 -0400651 * Make a temporary IO buffer with which to write it out
652 * (this will requeue the metadata buffer to BJ_Shadow).
Dave Kleikamp470decc2006-10-11 01:20:57 -0700653 */
Jan Karaf5113ef2013-06-04 12:01:45 -0400654 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700655 JBUFFER_TRACE(jh, "ph3: write metadata");
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700656 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
Jan Karaf5113ef2013-06-04 12:01:45 -0400657 jh, &wbuf[bufs], blocknr);
Theodore Ts'oe6ec1162009-12-01 09:04:42 -0500658 if (flags < 0) {
659 jbd2_journal_abort(journal, flags);
660 continue;
661 }
Jan Karaf5113ef2013-06-04 12:01:45 -0400662 jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700663
664 /* Record the new block's tag in the current descriptor
665 buffer */
666
667 tag_flag = 0;
668 if (flags & 1)
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700669 tag_flag |= JBD2_FLAG_ESCAPE;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700670 if (!first_tag)
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700671 tag_flag |= JBD2_FLAG_SAME_UUID;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700672
673 tag = (journal_block_tag_t *) tagp;
Darrick J. Wongdb9ee222014-08-27 18:40:07 -0400674 write_tag_block(journal, tag, jh2bh(jh)->b_blocknr);
Darrick J. Wong8f888ef2012-05-22 22:43:41 -0400675 tag->t_flags = cpu_to_be16(tag_flag);
Jan Karaf5113ef2013-06-04 12:01:45 -0400676 jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
Darrick J. Wongc3900872012-05-27 08:12:12 -0400677 commit_transaction->t_tid);
Zach Brownb517bea2006-10-11 01:21:08 -0700678 tagp += tag_bytes;
679 space_left -= tag_bytes;
Jan Karaf5113ef2013-06-04 12:01:45 -0400680 bufs++;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700681
682 if (first_tag) {
683 memcpy (tagp, journal->j_uuid, 16);
684 tagp += 16;
685 space_left -= 16;
686 first_tag = 0;
687 }
688
689 /* If there's no more to do, or if the descriptor is full,
690 let the IO rip! */
691
692 if (bufs == journal->j_wbufsize ||
693 commit_transaction->t_buffers == NULL ||
Darrick J. Wong3caa4872012-05-27 08:10:22 -0400694 space_left < tag_bytes + 16 + csum_size) {
Dave Kleikamp470decc2006-10-11 01:20:57 -0700695
Eryu Guanf2a44522011-11-01 19:09:18 -0400696 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700697
698 /* Write an end-of-descriptor marker before
699 submitting the IOs. "tag" still points to
700 the last tag we set up. */
701
Darrick J. Wong8f888ef2012-05-22 22:43:41 -0400702 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700703start_journal_io:
luojiajuna05d9942019-03-01 00:30:00 -0500704 if (descriptor)
705 jbd2_descriptor_block_csum_set(journal,
706 descriptor);
707
Dave Kleikamp470decc2006-10-11 01:20:57 -0700708 for (i = 0; i < bufs; i++) {
709 struct buffer_head *bh = wbuf[i];
Girish Shilamkar818d2762008-01-28 23:58:27 -0500710 /*
711 * Compute checksum.
712 */
Darrick J. Wong56316a02015-10-17 16:18:45 -0400713 if (jbd2_has_feature_checksum(journal)) {
Girish Shilamkar818d2762008-01-28 23:58:27 -0500714 crc32_sum =
715 jbd2_checksum_data(crc32_sum, bh);
716 }
717
Dave Kleikamp470decc2006-10-11 01:20:57 -0700718 lock_buffer(bh);
719 clear_buffer_dirty(bh);
720 set_buffer_uptodate(bh);
721 bh->b_end_io = journal_end_buffer_io_sync;
Mike Christie2a222ca2016-06-05 14:31:43 -0500722 submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700723 }
724 cond_resched();
725
726 /* Force a new descriptor to be generated next
727 time round the loop. */
728 descriptor = NULL;
729 bufs = 0;
730 }
731 }
732
Jan Karac851ed52008-07-11 19:27:31 -0400733 err = journal_finish_inode_data_buffers(journal, commit_transaction);
Hidehiro Kawaie9e34f42008-07-31 22:26:04 -0400734 if (err) {
Hidehiro Kawaie9e34f42008-07-31 22:26:04 -0400735 printk(KERN_WARNING
736 "JBD2: Detected IO errors while flushing file data "
Theodore Ts'o05496762008-09-16 14:36:17 -0400737 "on %s\n", journal->j_devname);
Hidehiro Kawai5bf56832008-10-10 22:12:43 -0400738 if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
739 jbd2_journal_abort(journal, err);
Hidehiro Kawaie9e34f42008-07-31 22:26:04 -0400740 err = 0;
741 }
Jan Karac851ed52008-07-11 19:27:31 -0400742
Jan Kara33395782012-03-13 22:45:38 -0400743 /*
744 * Get current oldest transaction in the log before we issue flush
745 * to the filesystem device. After the flush we can be sure that
746 * blocks of all older transactions are checkpointed to persistent
747 * storage and we will be safe to update journal start in the
748 * superblock with the numbers we get here.
749 */
750 update_tail =
751 jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
752
Jan Karabbd2be32011-05-24 11:59:18 -0400753 write_lock(&journal->j_state_lock);
Jan Kara33395782012-03-13 22:45:38 -0400754 if (update_tail) {
755 long freed = first_block - journal->j_tail;
756
757 if (first_block < journal->j_tail)
758 freed += journal->j_last - journal->j_first;
759 /* Update tail only if we free significant amount of space */
760 if (freed < journal->j_maxlen / 4)
761 update_tail = 0;
762 }
Jan Karabbd2be32011-05-24 11:59:18 -0400763 J_ASSERT(commit_transaction->t_state == T_COMMIT);
764 commit_transaction->t_state = T_COMMIT_DFLUSH;
765 write_unlock(&journal->j_state_lock);
Jan Kara33395782012-03-13 22:45:38 -0400766
Girish Shilamkar818d2762008-01-28 23:58:27 -0500767 /*
768 * If the journal is not located on the file system device,
769 * then we must flush the file system device before we issue
770 * the commit record
771 */
Jan Kara81be12c2011-05-24 11:52:40 -0400772 if (commit_transaction->t_need_data_flush &&
Girish Shilamkar818d2762008-01-28 23:58:27 -0500773 (journal->j_fs_dev != journal->j_dev) &&
774 (journal->j_flags & JBD2_BARRIER))
Shaohua Li99aa7842012-04-13 10:27:35 +0800775 blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500776
777 /* Done it all: now write the commit record asynchronously. */
Darrick J. Wong56316a02015-10-17 16:18:45 -0400778 if (jbd2_has_feature_async_commit(journal)) {
Girish Shilamkar818d2762008-01-28 23:58:27 -0500779 err = journal_submit_commit_record(journal, commit_transaction,
Dave Kleikamp470decc2006-10-11 01:20:57 -0700780 &cbh, crc32_sum);
781 if (err)
zhangyi (F)b5bcf262019-12-04 20:46:11 +0800782 jbd2_journal_abort(journal, err);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700783 }
784
Jens Axboe82f04ab2011-03-17 11:01:52 +0100785 blk_finish_plug(&plug);
786
Dave Kleikamp470decc2006-10-11 01:20:57 -0700787 /* Lo and behold: we have just managed to send a transaction to
788 the log. Before we can commit it, wait for the IO so far to
789 complete. Control buffers being written are on the
790 transaction's t_log_list queue, and metadata buffers are on
Jan Karaf5113ef2013-06-04 12:01:45 -0400791 the io_bufs list.
Dave Kleikamp470decc2006-10-11 01:20:57 -0700792
793 Wait for the buffers in reverse order. That way we are
794 less likely to be woken up until all IOs have completed, and
795 so we incur less scheduling load.
796 */
797
Eryu Guanf2a44522011-11-01 19:09:18 -0400798 jbd_debug(3, "JBD2: commit phase 3\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700799
Jan Karaf5113ef2013-06-04 12:01:45 -0400800 while (!list_empty(&io_bufs)) {
801 struct buffer_head *bh = list_entry(io_bufs.prev,
802 struct buffer_head,
803 b_assoc_buffers);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700804
Jan Karaf5113ef2013-06-04 12:01:45 -0400805 wait_on_buffer(bh);
806 cond_resched();
Dave Kleikamp470decc2006-10-11 01:20:57 -0700807
808 if (unlikely(!buffer_uptodate(bh)))
809 err = -EIO;
Jan Karaf5113ef2013-06-04 12:01:45 -0400810 jbd2_unfile_log_bh(bh);
Jan Kara90244f02019-11-05 17:44:19 +0100811 stats.run.rs_blocks_logged++;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700812
813 /*
Jan Karaf5113ef2013-06-04 12:01:45 -0400814 * The list contains temporary buffer heads created by
815 * jbd2_journal_write_metadata_buffer().
Dave Kleikamp470decc2006-10-11 01:20:57 -0700816 */
817 BUFFER_TRACE(bh, "dumping temporary bh");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700818 __brelse(bh);
819 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
820 free_buffer_head(bh);
821
Jan Karaf5113ef2013-06-04 12:01:45 -0400822 /* We also have to refile the corresponding shadowed buffer */
Dave Kleikamp470decc2006-10-11 01:20:57 -0700823 jh = commit_transaction->t_shadow_list->b_tprev;
824 bh = jh2bh(jh);
Jan Karaf5113ef2013-06-04 12:01:45 -0400825 clear_buffer_jwrite(bh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700826 J_ASSERT_BH(bh, buffer_jbddirty(bh));
Jan Karab34090e2013-06-04 12:08:56 -0400827 J_ASSERT_BH(bh, !buffer_shadow(bh));
Dave Kleikamp470decc2006-10-11 01:20:57 -0700828
829 /* The metadata is now released for reuse, but we need
830 to remember it against this transaction so that when
831 we finally commit, we can do any checkpointing
832 required. */
833 JBUFFER_TRACE(jh, "file as BJ_Forget");
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700834 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700835 JBUFFER_TRACE(jh, "brelse shadowed buffer");
836 __brelse(bh);
837 }
838
839 J_ASSERT (commit_transaction->t_shadow_list == NULL);
840
Eryu Guanf2a44522011-11-01 19:09:18 -0400841 jbd_debug(3, "JBD2: commit phase 4\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700842
843 /* Here we wait for the revoke record and descriptor record buffers */
Jan Karae5a120a2013-06-04 12:06:01 -0400844 while (!list_empty(&log_bufs)) {
Dave Kleikamp470decc2006-10-11 01:20:57 -0700845 struct buffer_head *bh;
846
Jan Karae5a120a2013-06-04 12:06:01 -0400847 bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
848 wait_on_buffer(bh);
849 cond_resched();
Dave Kleikamp470decc2006-10-11 01:20:57 -0700850
851 if (unlikely(!buffer_uptodate(bh)))
852 err = -EIO;
853
854 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
855 clear_buffer_jwrite(bh);
Jan Karae5a120a2013-06-04 12:06:01 -0400856 jbd2_unfile_log_bh(bh);
Jan Kara90244f02019-11-05 17:44:19 +0100857 stats.run.rs_blocks_logged++;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700858 __brelse(bh); /* One for getblk */
859 /* AKPM: bforget here */
860 }
861
Hidehiro Kawai77e841d2008-10-12 16:39:16 -0400862 if (err)
863 jbd2_journal_abort(journal, err);
864
Eryu Guanf2a44522011-11-01 19:09:18 -0400865 jbd_debug(3, "JBD2: commit phase 5\n");
Jan Karabbd2be32011-05-24 11:59:18 -0400866 write_lock(&journal->j_state_lock);
867 J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
868 commit_transaction->t_state = T_COMMIT_JFLUSH;
869 write_unlock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700870
Darrick J. Wong56316a02015-10-17 16:18:45 -0400871 if (!jbd2_has_feature_async_commit(journal)) {
Girish Shilamkar818d2762008-01-28 23:58:27 -0500872 err = journal_submit_commit_record(journal, commit_transaction,
873 &cbh, crc32_sum);
874 if (err)
zhangyi (F)b5bcf262019-12-04 20:46:11 +0800875 jbd2_journal_abort(journal, err);
Girish Shilamkar818d2762008-01-28 23:58:27 -0500876 }
Zhang Huan6cba6112011-04-05 19:16:20 -0400877 if (cbh)
Theodore Ts'ofd984962009-01-05 21:34:13 -0500878 err = journal_wait_on_commit_record(journal, cbh);
Jan Kara90244f02019-11-05 17:44:19 +0100879 stats.run.rs_blocks_logged++;
Darrick J. Wong56316a02015-10-17 16:18:45 -0400880 if (jbd2_has_feature_async_commit(journal) &&
Jan Karaf73bee42010-08-18 15:56:56 +0200881 journal->j_flags & JBD2_BARRIER) {
Shaohua Li99aa7842012-04-13 10:27:35 +0800882 blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
Jan Karaf73bee42010-08-18 15:56:56 +0200883 }
Dave Kleikamp470decc2006-10-11 01:20:57 -0700884
885 if (err)
Jan Karaa7fa2ba2007-10-16 18:38:25 -0400886 jbd2_journal_abort(journal, err);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700887
Jan Kara33395782012-03-13 22:45:38 -0400888 /*
889 * Now disk caches for filesystem device are flushed so we are safe to
890 * erase checkpointed transactions from the log by updating journal
891 * superblock.
892 */
893 if (update_tail)
894 jbd2_update_log_tail(journal, first_tid, first_block);
895
Dave Kleikamp470decc2006-10-11 01:20:57 -0700896 /* End of a transaction! Finally, we can do checkpoint
897 processing: any buffers committed as a result of this
898 transaction can be removed from any checkpoint list it was on
899 before. */
900
Eryu Guanf2a44522011-11-01 19:09:18 -0400901 jbd_debug(3, "JBD2: commit phase 6\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -0700902
Jan Karac851ed52008-07-11 19:27:31 -0400903 J_ASSERT(list_empty(&commit_transaction->t_inode_list));
Dave Kleikamp470decc2006-10-11 01:20:57 -0700904 J_ASSERT(commit_transaction->t_buffers == NULL);
905 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700906 J_ASSERT(commit_transaction->t_shadow_list == NULL);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700907
908restart_loop:
909 /*
910 * As there are other places (journal_unmap_buffer()) adding buffers
911 * to this list we have to be careful and hold the j_list_lock.
912 */
913 spin_lock(&journal->j_list_lock);
914 while (commit_transaction->t_forget) {
915 transaction_t *cp_transaction;
916 struct buffer_head *bh;
Jan Karade1b7942011-06-13 15:38:22 -0400917 int try_to_free = 0;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700918
919 jh = commit_transaction->t_forget;
920 spin_unlock(&journal->j_list_lock);
921 bh = jh2bh(jh);
Jan Karade1b7942011-06-13 15:38:22 -0400922 /*
923 * Get a reference so that bh cannot be freed before we are
924 * done with it.
925 */
926 get_bh(bh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700927 jbd_lock_bh_state(bh);
dingdinghua23e2af32010-02-24 12:11:20 -0500928 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700929
930 /*
931 * If there is undo-protected committed data against
932 * this buffer, then we can remove it now. If it is a
933 * buffer needing such protection, the old frozen_data
934 * field now points to a committed version of the
935 * buffer, so rotate that field to the new committed
936 * data.
937 *
938 * Otherwise, we can just throw away the frozen data now.
Joel Beckere06c8222008-09-11 15:35:47 -0700939 *
940 * We also know that the frozen data has already fired
941 * its triggers if they exist, so we can clear that too.
Dave Kleikamp470decc2006-10-11 01:20:57 -0700942 */
943 if (jh->b_committed_data) {
Mingming Caoaf1e76d2007-10-16 18:38:25 -0400944 jbd2_free(jh->b_committed_data, bh->b_size);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700945 jh->b_committed_data = NULL;
946 if (jh->b_frozen_data) {
947 jh->b_committed_data = jh->b_frozen_data;
948 jh->b_frozen_data = NULL;
Joel Beckere06c8222008-09-11 15:35:47 -0700949 jh->b_frozen_triggers = NULL;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700950 }
951 } else if (jh->b_frozen_data) {
Mingming Caoaf1e76d2007-10-16 18:38:25 -0400952 jbd2_free(jh->b_frozen_data, bh->b_size);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700953 jh->b_frozen_data = NULL;
Joel Beckere06c8222008-09-11 15:35:47 -0700954 jh->b_frozen_triggers = NULL;
Dave Kleikamp470decc2006-10-11 01:20:57 -0700955 }
956
957 spin_lock(&journal->j_list_lock);
958 cp_transaction = jh->b_cp_transaction;
959 if (cp_transaction) {
960 JBUFFER_TRACE(jh, "remove from old cp transaction");
Johann Lombardi8e85fb32008-01-28 23:58:27 -0500961 cp_transaction->t_chp_stats.cs_dropped++;
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700962 __jbd2_journal_remove_checkpoint(jh);
Dave Kleikamp470decc2006-10-11 01:20:57 -0700963 }
964
965 /* Only re-checkpoint the buffer_head if it is marked
966 * dirty. If the buffer was added to the BJ_Forget list
Mingming Caof7f4bcc2006-10-11 01:20:59 -0700967 * by jbd2_journal_forget, it may no longer be dirty and
Dave Kleikamp470decc2006-10-11 01:20:57 -0700968 * there's no point in keeping a checkpoint record for
969 * it. */
970
Jan Karab794e7a2012-09-26 23:11:13 -0400971 /*
zhangyi (F)48216262020-02-18 18:58:22 +0800972 * A buffer which has been freed while still being journaled
973 * by a previous transaction, refile the buffer to BJ_Forget of
974 * the running transaction. If the just committed transaction
975 * contains "add to orphan" operation, we can completely
976 * invalidate the buffer now. We are rather through in that
977 * since the buffer may be still accessible when blocksize <
978 * pagesize and it is attached to the last partial page.
979 */
980 if (buffer_freed(bh) && !jh->b_next_transaction) {
zhangyi (F)f98c9932020-02-18 18:58:23 +0800981 struct address_space *mapping;
982
zhangyi (F)48216262020-02-18 18:58:22 +0800983 clear_buffer_freed(bh);
984 clear_buffer_jbddirty(bh);
zhangyi (F)f98c9932020-02-18 18:58:23 +0800985
986 /*
987 * Block device buffers need to stay mapped all the
988 * time, so it is enough to clear buffer_jbddirty and
989 * buffer_freed bits. For the file mapping buffers (i.e.
990 * journalled data) we need to unmap buffer and clear
991 * more bits. We also need to be careful about the check
992 * because the data page mapping can get cleared under
993 * out hands, which alse need not to clear more bits
994 * because the page and buffers will be freed and can
995 * never be reused once we are done with them.
996 */
997 mapping = READ_ONCE(bh->b_page->mapping);
998 if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
999 clear_buffer_mapped(bh);
1000 clear_buffer_new(bh);
1001 clear_buffer_req(bh);
1002 bh->b_bdev = NULL;
1003 }
Dave Kleikamp470decc2006-10-11 01:20:57 -07001004 }
1005
1006 if (buffer_jbddirty(bh)) {
1007 JBUFFER_TRACE(jh, "add to new checkpointing trans");
Mingming Caof7f4bcc2006-10-11 01:20:59 -07001008 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
Hidehiro Kawai7ad74452008-10-10 20:29:31 -04001009 if (is_journal_aborted(journal))
1010 clear_buffer_jbddirty(bh);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001011 } else {
1012 J_ASSERT_BH(bh, !buffer_dirty(bh));
Jan Karade1b7942011-06-13 15:38:22 -04001013 /*
1014 * The buffer on BJ_Forget list and not jbddirty means
Dave Kleikamp470decc2006-10-11 01:20:57 -07001015 * it has been freed by this transaction and hence it
1016 * could not have been reallocated until this
1017 * transaction has committed. *BUT* it could be
1018 * reallocated once we have written all the data to
1019 * disk and before we process the buffer on BJ_Forget
Jan Karade1b7942011-06-13 15:38:22 -04001020 * list.
1021 */
1022 if (!jh->b_next_transaction)
1023 try_to_free = 1;
Dave Kleikamp470decc2006-10-11 01:20:57 -07001024 }
Jan Karade1b7942011-06-13 15:38:22 -04001025 JBUFFER_TRACE(jh, "refile or unfile buffer");
1026 __jbd2_journal_refile_buffer(jh);
1027 jbd_unlock_bh_state(bh);
1028 if (try_to_free)
1029 release_buffer_page(bh); /* Drops bh reference */
1030 else
1031 __brelse(bh);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001032 cond_resched_lock(&journal->j_list_lock);
1033 }
1034 spin_unlock(&journal->j_list_lock);
1035 /*
Jan Karaf5a7a6b2008-01-28 23:58:27 -05001036 * This is a bit sleazy. We use j_list_lock to protect transition
1037 * of a transaction into T_FINISHED state and calling
1038 * __jbd2_journal_drop_transaction(). Otherwise we could race with
1039 * other checkpointing code processing the transaction...
Dave Kleikamp470decc2006-10-11 01:20:57 -07001040 */
Theodore Ts'oa931da62010-08-03 21:35:12 -04001041 write_lock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001042 spin_lock(&journal->j_list_lock);
1043 /*
1044 * Now recheck if some buffers did not get attached to the transaction
1045 * while the lock was dropped...
1046 */
1047 if (commit_transaction->t_forget) {
1048 spin_unlock(&journal->j_list_lock);
Theodore Ts'oa931da62010-08-03 21:35:12 -04001049 write_unlock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001050 goto restart_loop;
1051 }
1052
Theodore Ts'od4e839d2014-03-08 22:34:10 -05001053 /* Add the transaction to the checkpoint list
1054 * __journal_remove_checkpoint() can not destroy transaction
1055 * under us because it is not marked as T_FINISHED yet */
1056 if (journal->j_checkpoint_transactions == NULL) {
1057 journal->j_checkpoint_transactions = commit_transaction;
1058 commit_transaction->t_cpnext = commit_transaction;
1059 commit_transaction->t_cpprev = commit_transaction;
1060 } else {
1061 commit_transaction->t_cpnext =
1062 journal->j_checkpoint_transactions;
1063 commit_transaction->t_cpprev =
1064 commit_transaction->t_cpnext->t_cpprev;
1065 commit_transaction->t_cpnext->t_cpprev =
1066 commit_transaction;
1067 commit_transaction->t_cpprev->t_cpnext =
1068 commit_transaction;
1069 }
1070 spin_unlock(&journal->j_list_lock);
1071
Dave Kleikamp470decc2006-10-11 01:20:57 -07001072 /* Done with this transaction! */
1073
Eryu Guanf2a44522011-11-01 19:09:18 -04001074 jbd_debug(3, "JBD2: commit phase 7\n");
Dave Kleikamp470decc2006-10-11 01:20:57 -07001075
Jan Karabbd2be32011-05-24 11:59:18 -04001076 J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001077
Johann Lombardi8e85fb32008-01-28 23:58:27 -05001078 commit_transaction->t_start = jiffies;
Theodore Ts'obf699322009-09-30 00:32:06 -04001079 stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
1080 commit_transaction->t_start);
Johann Lombardi8e85fb32008-01-28 23:58:27 -05001081
1082 /*
Theodore Ts'obf699322009-09-30 00:32:06 -04001083 * File the transaction statistics
Johann Lombardi8e85fb32008-01-28 23:58:27 -05001084 */
Johann Lombardi8e85fb32008-01-28 23:58:27 -05001085 stats.ts_tid = commit_transaction->t_tid;
Theodore Ts'o8dd42042010-08-03 21:38:29 -04001086 stats.run.rs_handle_count =
1087 atomic_read(&commit_transaction->t_handle_count);
Theodore Ts'obf699322009-09-30 00:32:06 -04001088 trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
1089 commit_transaction->t_tid, &stats.run);
Theodore Ts'o42cf3452014-03-08 19:51:16 -05001090 stats.ts_requested = (commit_transaction->t_requested) ? 1 : 0;
Johann Lombardi8e85fb32008-01-28 23:58:27 -05001091
Dmitry Monakhov794446c2013-04-03 22:06:52 -04001092 commit_transaction->t_state = T_COMMIT_CALLBACK;
Dave Kleikamp470decc2006-10-11 01:20:57 -07001093 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1094 journal->j_commit_sequence = commit_transaction->t_tid;
1095 journal->j_committing_transaction = NULL;
Josef Bacike07f7182008-11-26 01:14:26 -05001096 commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
Dave Kleikamp470decc2006-10-11 01:20:57 -07001097
Josef Bacike07f7182008-11-26 01:14:26 -05001098 /*
1099 * weight the commit time higher than the average time so we don't
1100 * react too strongly to vast changes in the commit time
1101 */
1102 if (likely(journal->j_average_commit_time))
1103 journal->j_average_commit_time = (commit_time +
1104 journal->j_average_commit_time*3) / 4;
1105 else
1106 journal->j_average_commit_time = commit_time;
Dmitry Monakhov794446c2013-04-03 22:06:52 -04001107
Theodore Ts'oa931da62010-08-03 21:35:12 -04001108 write_unlock(&journal->j_state_lock);
Theodore Ts'o6c20ec82008-10-28 21:08:20 -04001109
Aneesh Kumar K.Vfb684072008-11-06 17:50:21 -05001110 if (journal->j_commit_callback)
1111 journal->j_commit_callback(journal, commit_transaction);
1112
Theodore Ts'o879c5e62009-06-17 11:47:48 -04001113 trace_jbd2_end_commit(journal, commit_transaction);
Eryu Guanf2a44522011-11-01 19:09:18 -04001114 jbd_debug(1, "JBD2: commit %d complete, head %d\n",
Dave Kleikamp470decc2006-10-11 01:20:57 -07001115 journal->j_commit_sequence, journal->j_tail_sequence);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001116
Dmitry Monakhov794446c2013-04-03 22:06:52 -04001117 write_lock(&journal->j_state_lock);
1118 spin_lock(&journal->j_list_lock);
1119 commit_transaction->t_state = T_FINISHED;
Theodore Ts'od4e839d2014-03-08 22:34:10 -05001120 /* Check if the transaction can be dropped now that we are finished */
Dmitry Monakhov794446c2013-04-03 22:06:52 -04001121 if (commit_transaction->t_checkpoint_list == NULL &&
1122 commit_transaction->t_checkpoint_io_list == NULL) {
1123 __jbd2_journal_drop_transaction(journal, commit_transaction);
1124 jbd2_journal_free_transaction(commit_transaction);
1125 }
1126 spin_unlock(&journal->j_list_lock);
1127 write_unlock(&journal->j_state_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001128 wake_up(&journal->j_wait_done_commit);
Theodore Ts'o42cf3452014-03-08 19:51:16 -05001129
1130 /*
1131 * Calculate overall stats
1132 */
1133 spin_lock(&journal->j_history_lock);
1134 journal->j_stats.ts_tid++;
1135 journal->j_stats.ts_requested += stats.ts_requested;
1136 journal->j_stats.run.rs_wait += stats.run.rs_wait;
1137 journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
1138 journal->j_stats.run.rs_running += stats.run.rs_running;
1139 journal->j_stats.run.rs_locked += stats.run.rs_locked;
1140 journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
1141 journal->j_stats.run.rs_logging += stats.run.rs_logging;
1142 journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
1143 journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
1144 journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
1145 spin_unlock(&journal->j_history_lock);
Dave Kleikamp470decc2006-10-11 01:20:57 -07001146}