blob: 82f1cbcc4de15ce254322aaa50afb7cdfcb82ae0 [file] [log] [blame]
Dave Chinner71e330b2010-05-21 14:37:18 +10001/*
2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18#include "xfs.h"
19#include "xfs_fs.h"
Christoph Hellwig4fb6e8a2014-11-28 14:25:04 +110020#include "xfs_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_log_format.h"
Dave Chinner70a98832013-10-23 10:36:05 +110022#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110023#include "xfs_trans_resv.h"
Dave Chinner71e330b2010-05-21 14:37:18 +100024#include "xfs_mount.h"
25#include "xfs_error.h"
26#include "xfs_alloc.h"
Dave Chinnerefc27b52012-04-29 10:39:43 +000027#include "xfs_extent_busy.h"
Christoph Hellwige84661a2011-05-20 13:45:32 +000028#include "xfs_discard.h"
Dave Chinner239880e2013-10-23 10:50:10 +110029#include "xfs_trans.h"
30#include "xfs_trans_priv.h"
31#include "xfs_log.h"
32#include "xfs_log_priv.h"
Christoph Hellwig4560e782017-02-07 14:07:58 -080033#include "xfs_trace.h"
34
35struct workqueue_struct *xfs_discard_wq;
Dave Chinner71e330b2010-05-21 14:37:18 +100036
37/*
Dave Chinner71e330b2010-05-21 14:37:18 +100038 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
39 * recover, so we don't allow failure here. Also, we allocate in a context that
40 * we don't want to be issuing transactions from, so we need to tell the
41 * allocation code this as well.
42 *
43 * We don't reserve any space for the ticket - we are going to steal whatever
44 * space we require from transactions as they commit. To ensure we reserve all
45 * the space required, we need to set the current reservation of the ticket to
46 * zero so that we know to steal the initial transaction overhead from the
47 * first transaction commit.
48 */
49static struct xlog_ticket *
50xlog_cil_ticket_alloc(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -050051 struct xlog *log)
Dave Chinner71e330b2010-05-21 14:37:18 +100052{
53 struct xlog_ticket *tic;
54
55 tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
56 KM_SLEEP|KM_NOFS);
Dave Chinner71e330b2010-05-21 14:37:18 +100057
58 /*
59 * set the current reservation to zero so we know to steal the basic
60 * transaction overhead reservation from the first transaction commit.
61 */
62 tic->t_curr_res = 0;
63 return tic;
64}
65
66/*
67 * After the first stage of log recovery is done, we know where the head and
68 * tail of the log are. We need this log initialisation done before we can
69 * initialise the first CIL checkpoint context.
70 *
71 * Here we allocate a log ticket to track space usage during a CIL push. This
72 * ticket is passed to xlog_write() directly so that we don't slowly leak log
73 * space by failing to account for space used by log headers and additional
74 * region headers for split regions.
75 */
76void
77xlog_cil_init_post_recovery(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -050078 struct xlog *log)
Dave Chinner71e330b2010-05-21 14:37:18 +100079{
Dave Chinner71e330b2010-05-21 14:37:18 +100080 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
81 log->l_cilp->xc_ctx->sequence = 1;
Dave Chinner71e330b2010-05-21 14:37:18 +100082}
83
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +100084static inline int
85xlog_cil_iovec_space(
86 uint niovecs)
87{
88 return round_up((sizeof(struct xfs_log_vec) +
89 niovecs * sizeof(struct xfs_log_iovec)),
90 sizeof(uint64_t));
91}
92
93/*
94 * Allocate or pin log vector buffers for CIL insertion.
95 *
96 * The CIL currently uses disposable buffers for copying a snapshot of the
97 * modified items into the log during a push. The biggest problem with this is
98 * the requirement to allocate the disposable buffer during the commit if:
99 * a) does not exist; or
100 * b) it is too small
101 *
102 * If we do this allocation within xlog_cil_insert_format_items(), it is done
103 * under the xc_ctx_lock, which means that a CIL push cannot occur during
104 * the memory allocation. This means that we have a potential deadlock situation
105 * under low memory conditions when we have lots of dirty metadata pinned in
106 * the CIL and we need a CIL commit to occur to free memory.
107 *
108 * To avoid this, we need to move the memory allocation outside the
109 * xc_ctx_lock, but because the log vector buffers are disposable, that opens
110 * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
111 * vector buffers between the check and the formatting of the item into the
112 * log vector buffer within the xc_ctx_lock.
113 *
114 * Because the log vector buffer needs to be unchanged during the CIL push
115 * process, we cannot share the buffer between the transaction commit (which
116 * modifies the buffer) and the CIL push context that is writing the changes
117 * into the log. This means skipping preallocation of buffer space is
118 * unreliable, but we most definitely do not want to be allocating and freeing
119 * buffers unnecessarily during commits when overwrites can be done safely.
120 *
121 * The simplest solution to this problem is to allocate a shadow buffer when a
122 * log item is committed for the second time, and then to only use this buffer
123 * if necessary. The buffer can remain attached to the log item until such time
124 * it is needed, and this is the buffer that is reallocated to match the size of
125 * the incoming modification. Then during the formatting of the item we can swap
126 * the active buffer with the new one if we can't reuse the existing buffer. We
127 * don't free the old buffer as it may be reused on the next modification if
128 * it's size is right, otherwise we'll free and reallocate it at that point.
129 *
130 * This function builds a vector for the changes in each log item in the
131 * transaction. It then works out the length of the buffer needed for each log
132 * item, allocates them and attaches the vector to the log item in preparation
133 * for the formatting step which occurs under the xc_ctx_lock.
134 *
135 * While this means the memory footprint goes up, it avoids the repeated
136 * alloc/free pattern that repeated modifications of an item would otherwise
137 * cause, and hence minimises the CPU overhead of such behaviour.
138 */
139static void
140xlog_cil_alloc_shadow_bufs(
141 struct xlog *log,
142 struct xfs_trans *tp)
143{
144 struct xfs_log_item_desc *lidp;
145
146 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
147 struct xfs_log_item *lip = lidp->lid_item;
148 struct xfs_log_vec *lv;
149 int niovecs = 0;
150 int nbytes = 0;
151 int buf_size;
152 bool ordered = false;
153
154 /* Skip items which aren't dirty in this transaction. */
155 if (!(lidp->lid_flags & XFS_LID_DIRTY))
156 continue;
157
158 /* get number of vecs and size of data to be stored */
159 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
160
161 /*
162 * Ordered items need to be tracked but we do not wish to write
163 * them. We need a logvec to track the object, but we do not
164 * need an iovec or buffer to be allocated for copying data.
165 */
166 if (niovecs == XFS_LOG_VEC_ORDERED) {
167 ordered = true;
168 niovecs = 0;
169 nbytes = 0;
170 }
171
172 /*
173 * We 64-bit align the length of each iovec so that the start
174 * of the next one is naturally aligned. We'll need to
175 * account for that slack space here. Then round nbytes up
176 * to 64-bit alignment so that the initial buffer alignment is
177 * easy to calculate and verify.
178 */
179 nbytes += niovecs * sizeof(uint64_t);
180 nbytes = round_up(nbytes, sizeof(uint64_t));
181
182 /*
183 * The data buffer needs to start 64-bit aligned, so round up
184 * that space to ensure we can align it appropriately and not
185 * overrun the buffer.
186 */
187 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
188
189 /*
190 * if we have no shadow buffer, or it is too small, we need to
191 * reallocate it.
192 */
193 if (!lip->li_lv_shadow ||
194 buf_size > lip->li_lv_shadow->lv_size) {
195
196 /*
197 * We free and allocate here as a realloc would copy
198 * unecessary data. We don't use kmem_zalloc() for the
199 * same reason - we don't need to zero the data area in
200 * the buffer, only the log vector header and the iovec
201 * storage.
202 */
203 kmem_free(lip->li_lv_shadow);
204
205 lv = kmem_alloc(buf_size, KM_SLEEP|KM_NOFS);
206 memset(lv, 0, xlog_cil_iovec_space(niovecs));
207
208 lv->lv_item = lip;
209 lv->lv_size = buf_size;
210 if (ordered)
211 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
212 else
213 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
214 lip->li_lv_shadow = lv;
215 } else {
216 /* same or smaller, optimise common overwrite case */
217 lv = lip->li_lv_shadow;
218 if (ordered)
219 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
220 else
221 lv->lv_buf_len = 0;
222 lv->lv_bytes = 0;
223 lv->lv_next = NULL;
224 }
225
226 /* Ensure the lv is set up according to ->iop_size */
227 lv->lv_niovecs = niovecs;
228
229 /* The allocated data region lies beyond the iovec region */
230 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
231 }
232
233}
234
Dave Chinner71e330b2010-05-21 14:37:18 +1000235/*
Dave Chinner991aaf62013-08-12 20:50:07 +1000236 * Prepare the log item for insertion into the CIL. Calculate the difference in
237 * log space and vectors it will consume, and if it is a new item pin it as
238 * well.
239 */
240STATIC void
241xfs_cil_prepare_item(
242 struct xlog *log,
243 struct xfs_log_vec *lv,
244 struct xfs_log_vec *old_lv,
245 int *diff_len,
246 int *diff_iovecs)
247{
248 /* Account for the new LV being passed in */
249 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
Dave Chinner110dc242014-05-20 08:18:09 +1000250 *diff_len += lv->lv_bytes;
Dave Chinner991aaf62013-08-12 20:50:07 +1000251 *diff_iovecs += lv->lv_niovecs;
252 }
253
254 /*
255 * If there is no old LV, this is the first time we've seen the item in
256 * this CIL context and so we need to pin it. If we are replacing the
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000257 * old_lv, then remove the space it accounts for and make it the shadow
258 * buffer for later freeing. In both cases we are now switching to the
259 * shadow buffer, so update the the pointer to it appropriately.
Dave Chinner991aaf62013-08-12 20:50:07 +1000260 */
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000261 if (!old_lv) {
Dave Chinner991aaf62013-08-12 20:50:07 +1000262 lv->lv_item->li_ops->iop_pin(lv->lv_item);
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000263 lv->lv_item->li_lv_shadow = NULL;
264 } else if (old_lv != lv) {
Dave Chinner991aaf62013-08-12 20:50:07 +1000265 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
266
Dave Chinner110dc242014-05-20 08:18:09 +1000267 *diff_len -= old_lv->lv_bytes;
Dave Chinner991aaf62013-08-12 20:50:07 +1000268 *diff_iovecs -= old_lv->lv_niovecs;
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000269 lv->lv_item->li_lv_shadow = old_lv;
Dave Chinner991aaf62013-08-12 20:50:07 +1000270 }
271
272 /* attach new log vector to log item */
273 lv->lv_item->li_lv = lv;
274
275 /*
276 * If this is the first time the item is being committed to the
277 * CIL, store the sequence number on the log item so we can
278 * tell in future commits whether this is the first checkpoint
279 * the item is being committed into.
280 */
281 if (!lv->lv_item->li_seq)
282 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
283}
284
285/*
Dave Chinner71e330b2010-05-21 14:37:18 +1000286 * Format log item into a flat buffers
287 *
288 * For delayed logging, we need to hold a formatted buffer containing all the
289 * changes on the log item. This enables us to relog the item in memory and
290 * write it out asynchronously without needing to relock the object that was
291 * modified at the time it gets written into the iclog.
292 *
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000293 * This function takes the prepared log vectors attached to each log item, and
294 * formats the changes into the log vector buffer. The buffer it uses is
295 * dependent on the current state of the vector in the CIL - the shadow lv is
296 * guaranteed to be large enough for the current modification, but we will only
297 * use that if we can't reuse the existing lv. If we can't reuse the existing
298 * lv, then simple swap it out for the shadow lv. We don't free it - that is
299 * done lazily either by th enext modification or the freeing of the log item.
Dave Chinner71e330b2010-05-21 14:37:18 +1000300 *
301 * We don't set up region headers during this process; we simply copy the
302 * regions into the flat buffer. We can do this because we still have to do a
303 * formatting step to write the regions into the iclog buffer. Writing the
304 * ophdrs during the iclog write means that we can support splitting large
305 * regions across iclog boundares without needing a change in the format of the
306 * item/region encapsulation.
307 *
308 * Hence what we need to do now is change the rewrite the vector array to point
309 * to the copied region inside the buffer we just allocated. This allows us to
310 * format the regions into the iclog as though they are being formatted
311 * directly out of the objects themselves.
312 */
Dave Chinner991aaf62013-08-12 20:50:07 +1000313static void
314xlog_cil_insert_format_items(
315 struct xlog *log,
316 struct xfs_trans *tp,
317 int *diff_len,
318 int *diff_iovecs)
Dave Chinner71e330b2010-05-21 14:37:18 +1000319{
Christoph Hellwig0244b962011-12-06 21:58:08 +0000320 struct xfs_log_item_desc *lidp;
Dave Chinner71e330b2010-05-21 14:37:18 +1000321
Christoph Hellwig0244b962011-12-06 21:58:08 +0000322
323 /* Bail out if we didn't find a log item. */
324 if (list_empty(&tp->t_items)) {
325 ASSERT(0);
Dave Chinner991aaf62013-08-12 20:50:07 +1000326 return;
Christoph Hellwig0244b962011-12-06 21:58:08 +0000327 }
328
329 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
Dave Chinner166d1362013-08-12 20:50:04 +1000330 struct xfs_log_item *lip = lidp->lid_item;
Dave Chinner7492c5b2013-08-12 20:50:05 +1000331 struct xfs_log_vec *lv;
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000332 struct xfs_log_vec *old_lv = NULL;
333 struct xfs_log_vec *shadow;
Dave Chinnerfd638752013-06-27 16:04:51 +1000334 bool ordered = false;
Dave Chinner71e330b2010-05-21 14:37:18 +1000335
Christoph Hellwig0244b962011-12-06 21:58:08 +0000336 /* Skip items which aren't dirty in this transaction. */
337 if (!(lidp->lid_flags & XFS_LID_DIRTY))
338 continue;
339
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000340 /*
341 * The formatting size information is already attached to
342 * the shadow lv on the log item.
343 */
344 shadow = lip->li_lv_shadow;
345 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
346 ordered = true;
Dave Chinner166d1362013-08-12 20:50:04 +1000347
Christoph Hellwig0244b962011-12-06 21:58:08 +0000348 /* Skip items that do not have any vectors for writing */
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000349 if (!shadow->lv_niovecs && !ordered)
Christoph Hellwig0244b962011-12-06 21:58:08 +0000350 continue;
351
Dave Chinnerf5baac32013-08-12 20:50:06 +1000352 /* compare to existing item size */
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000353 old_lv = lip->li_lv;
354 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
Dave Chinnerf5baac32013-08-12 20:50:06 +1000355 /* same or smaller, optimise common overwrite case */
356 lv = lip->li_lv;
357 lv->lv_next = NULL;
358
359 if (ordered)
360 goto insert;
361
Dave Chinner991aaf62013-08-12 20:50:07 +1000362 /*
363 * set the item up as though it is a new insertion so
364 * that the space reservation accounting is correct.
365 */
366 *diff_iovecs -= lv->lv_niovecs;
Dave Chinner110dc242014-05-20 08:18:09 +1000367 *diff_len -= lv->lv_bytes;
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000368
369 /* Ensure the lv is set up according to ->iop_size */
370 lv->lv_niovecs = shadow->lv_niovecs;
371
372 /* reset the lv buffer information for new formatting */
373 lv->lv_buf_len = 0;
374 lv->lv_bytes = 0;
375 lv->lv_buf = (char *)lv +
376 xlog_cil_iovec_space(lv->lv_niovecs);
Christoph Hellwig9597df62013-12-13 11:00:42 +1100377 } else {
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000378 /* switch to shadow buffer! */
379 lv = shadow;
Christoph Hellwig9597df62013-12-13 11:00:42 +1100380 lv->lv_item = lip;
Christoph Hellwig9597df62013-12-13 11:00:42 +1100381 if (ordered) {
382 /* track as an ordered logvec */
383 ASSERT(lip->li_lv == NULL);
Christoph Hellwig9597df62013-12-13 11:00:42 +1100384 goto insert;
385 }
Dave Chinnerf5baac32013-08-12 20:50:06 +1000386 }
387
Dave Chinner3895e512014-02-10 10:37:18 +1100388 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
Christoph Hellwigbde7cff2013-12-13 11:34:02 +1100389 lip->li_ops->iop_format(lip, lv);
Dave Chinner7492c5b2013-08-12 20:50:05 +1000390insert:
Dave Chinner991aaf62013-08-12 20:50:07 +1000391 xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
Dave Chinner71e330b2010-05-21 14:37:18 +1000392 }
Dave Chinnerd1583a32010-09-24 18:14:13 +1000393}
394
395/*
396 * Insert the log items into the CIL and calculate the difference in space
397 * consumed by the item. Add the space to the checkpoint ticket and calculate
398 * if the change requires additional log metadata. If it does, take that space
Justin P. Mattock42b2aa82011-11-28 20:31:00 -0800399 * as well. Remove the amount of space we added to the checkpoint ticket from
Dave Chinnerd1583a32010-09-24 18:14:13 +1000400 * the current transaction ticket so that the accounting works out correctly.
401 */
Dave Chinner71e330b2010-05-21 14:37:18 +1000402static void
Dave Chinner3b93c7a2010-08-24 11:45:53 +1000403xlog_cil_insert_items(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -0500404 struct xlog *log,
Dave Chinner991aaf62013-08-12 20:50:07 +1000405 struct xfs_trans *tp)
Dave Chinner3b93c7a2010-08-24 11:45:53 +1000406{
Dave Chinnerd1583a32010-09-24 18:14:13 +1000407 struct xfs_cil *cil = log->l_cilp;
408 struct xfs_cil_ctx *ctx = cil->xc_ctx;
Dave Chinner991aaf62013-08-12 20:50:07 +1000409 struct xfs_log_item_desc *lidp;
Dave Chinnerd1583a32010-09-24 18:14:13 +1000410 int len = 0;
411 int diff_iovecs = 0;
412 int iclog_space;
Dave Chinner3b93c7a2010-08-24 11:45:53 +1000413
Dave Chinner991aaf62013-08-12 20:50:07 +1000414 ASSERT(tp);
Dave Chinnerd1583a32010-09-24 18:14:13 +1000415
416 /*
Dave Chinnerd1583a32010-09-24 18:14:13 +1000417 * We can do this safely because the context can't checkpoint until we
418 * are done so it doesn't matter exactly how we update the CIL.
419 */
Dave Chinner991aaf62013-08-12 20:50:07 +1000420 xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
421
422 /*
423 * Now (re-)position everything modified at the tail of the CIL.
424 * We do this here so we only need to take the CIL lock once during
425 * the transaction commit.
426 */
Dave Chinnerfd638752013-06-27 16:04:51 +1000427 spin_lock(&cil->xc_cil_lock);
Dave Chinner991aaf62013-08-12 20:50:07 +1000428 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
429 struct xfs_log_item *lip = lidp->lid_item;
Dave Chinnerfd638752013-06-27 16:04:51 +1000430
Dave Chinner991aaf62013-08-12 20:50:07 +1000431 /* Skip items which aren't dirty in this transaction. */
432 if (!(lidp->lid_flags & XFS_LID_DIRTY))
433 continue;
Dave Chinnerfd638752013-06-27 16:04:51 +1000434
Brian Foster4703da72015-07-29 11:51:01 +1000435 /*
436 * Only move the item if it isn't already at the tail. This is
437 * to prevent a transient list_empty() state when reinserting
438 * an item that is already the only item in the CIL.
439 */
440 if (!list_is_last(&lip->li_cil, &cil->xc_cil))
441 list_move_tail(&lip->li_cil, &cil->xc_cil);
Dave Chinnerfd638752013-06-27 16:04:51 +1000442 }
Dave Chinnerd1583a32010-09-24 18:14:13 +1000443
444 /* account for space used by new iovec headers */
445 len += diff_iovecs * sizeof(xlog_op_header_t);
Dave Chinnerd1583a32010-09-24 18:14:13 +1000446 ctx->nvecs += diff_iovecs;
447
Dave Chinner991aaf62013-08-12 20:50:07 +1000448 /* attach the transaction to the CIL if it has any busy extents */
449 if (!list_empty(&tp->t_busy))
450 list_splice_init(&tp->t_busy, &ctx->busy_extents);
451
Dave Chinnerd1583a32010-09-24 18:14:13 +1000452 /*
453 * Now transfer enough transaction reservation to the context ticket
454 * for the checkpoint. The context ticket is special - the unit
455 * reservation has to grow as well as the current reservation as we
456 * steal from tickets so we can correctly determine the space used
457 * during the transaction commit.
458 */
459 if (ctx->ticket->t_curr_res == 0) {
Dave Chinnerd1583a32010-09-24 18:14:13 +1000460 ctx->ticket->t_curr_res = ctx->ticket->t_unit_res;
Dave Chinner991aaf62013-08-12 20:50:07 +1000461 tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res;
Dave Chinnerd1583a32010-09-24 18:14:13 +1000462 }
463
464 /* do we need space for more log record headers? */
465 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
466 if (len > 0 && (ctx->space_used / iclog_space !=
467 (ctx->space_used + len) / iclog_space)) {
468 int hdrs;
469
470 hdrs = (len + iclog_space - 1) / iclog_space;
471 /* need to take into account split region headers, too */
472 hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
473 ctx->ticket->t_unit_res += hdrs;
474 ctx->ticket->t_curr_res += hdrs;
Dave Chinner991aaf62013-08-12 20:50:07 +1000475 tp->t_ticket->t_curr_res -= hdrs;
476 ASSERT(tp->t_ticket->t_curr_res >= len);
Dave Chinnerd1583a32010-09-24 18:14:13 +1000477 }
Dave Chinner991aaf62013-08-12 20:50:07 +1000478 tp->t_ticket->t_curr_res -= len;
Dave Chinnerd1583a32010-09-24 18:14:13 +1000479 ctx->space_used += len;
480
481 spin_unlock(&cil->xc_cil_lock);
Dave Chinner3b93c7a2010-08-24 11:45:53 +1000482}
483
484static void
Dave Chinner71e330b2010-05-21 14:37:18 +1000485xlog_cil_free_logvec(
486 struct xfs_log_vec *log_vector)
487{
488 struct xfs_log_vec *lv;
489
490 for (lv = log_vector; lv; ) {
491 struct xfs_log_vec *next = lv->lv_next;
Dave Chinner71e330b2010-05-21 14:37:18 +1000492 kmem_free(lv);
493 lv = next;
494 }
495}
496
Christoph Hellwig4560e782017-02-07 14:07:58 -0800497static void
498xlog_discard_endio_work(
499 struct work_struct *work)
500{
501 struct xfs_cil_ctx *ctx =
502 container_of(work, struct xfs_cil_ctx, discard_endio_work);
503 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
504
505 xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
506 kmem_free(ctx);
507}
508
509/*
510 * Queue up the actual completion to a thread to avoid IRQ-safe locking for
511 * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
512 * get the execution delayed up to 30 seconds for weird reasons.
513 */
514static void
515xlog_discard_endio(
516 struct bio *bio)
517{
518 struct xfs_cil_ctx *ctx = bio->bi_private;
519
520 INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
521 queue_work(xfs_discard_wq, &ctx->discard_endio_work);
522}
523
524static void
525xlog_discard_busy_extents(
526 struct xfs_mount *mp,
527 struct xfs_cil_ctx *ctx)
528{
529 struct list_head *list = &ctx->busy_extents;
530 struct xfs_extent_busy *busyp;
531 struct bio *bio = NULL;
532 struct blk_plug plug;
533 int error = 0;
534
535 ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
536
537 blk_start_plug(&plug);
538 list_for_each_entry(busyp, list, list) {
539 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
540 busyp->length);
541
542 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
543 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
544 XFS_FSB_TO_BB(mp, busyp->length),
545 GFP_NOFS, 0, &bio);
546 if (error && error != -EOPNOTSUPP) {
547 xfs_info(mp,
548 "discard failed for extent [0x%llx,%u], error %d",
549 (unsigned long long)busyp->bno,
550 busyp->length,
551 error);
552 break;
553 }
554 }
555
556 if (bio) {
557 bio->bi_private = ctx;
558 bio->bi_end_io = xlog_discard_endio;
559 submit_bio(bio);
560 } else {
561 xlog_discard_endio_work(&ctx->discard_endio_work);
562 }
563 blk_finish_plug(&plug);
564}
565
Dave Chinner71e330b2010-05-21 14:37:18 +1000566/*
Dave Chinner71e330b2010-05-21 14:37:18 +1000567 * Mark all items committed and clear busy extents. We free the log vector
568 * chains in a separate pass so that we unpin the log items as quickly as
569 * possible.
570 */
571static void
572xlog_cil_committed(
573 void *args,
574 int abort)
575{
576 struct xfs_cil_ctx *ctx = args;
Christoph Hellwige84661a2011-05-20 13:45:32 +0000577 struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
Dave Chinner71e330b2010-05-21 14:37:18 +1000578
Dave Chinner0e57f6a2010-12-20 12:02:19 +1100579 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
580 ctx->start_lsn, abort);
Dave Chinner71e330b2010-05-21 14:37:18 +1000581
Dave Chinner4ecbfe62012-04-29 10:41:10 +0000582 xfs_extent_busy_sort(&ctx->busy_extents);
583 xfs_extent_busy_clear(mp, &ctx->busy_extents,
Christoph Hellwige84661a2011-05-20 13:45:32 +0000584 (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
Dave Chinner71e330b2010-05-21 14:37:18 +1000585
Dave Chinnerac983512014-05-07 08:05:50 +1000586 /*
587 * If we are aborting the commit, wake up anyone waiting on the
588 * committing list. If we don't, then a shutdown we can leave processes
589 * waiting in xlog_cil_force_lsn() waiting on a sequence commit that
590 * will never happen because we aborted it.
591 */
Dave Chinner4bb928c2013-08-12 20:50:08 +1000592 spin_lock(&ctx->cil->xc_push_lock);
Dave Chinnerac983512014-05-07 08:05:50 +1000593 if (abort)
594 wake_up_all(&ctx->cil->xc_commit_wait);
Dave Chinner71e330b2010-05-21 14:37:18 +1000595 list_del(&ctx->committing);
Dave Chinner4bb928c2013-08-12 20:50:08 +1000596 spin_unlock(&ctx->cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000597
598 xlog_cil_free_logvec(ctx->lv_chain);
Christoph Hellwige84661a2011-05-20 13:45:32 +0000599
Christoph Hellwig4560e782017-02-07 14:07:58 -0800600 if (!list_empty(&ctx->busy_extents))
601 xlog_discard_busy_extents(mp, ctx);
602 else
603 kmem_free(ctx);
Dave Chinner71e330b2010-05-21 14:37:18 +1000604}
605
606/*
Dave Chinnera44f13e2010-08-24 11:40:03 +1000607 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
608 * is a background flush and so we can chose to ignore it. Otherwise, if the
609 * current sequence is the same as @push_seq we need to do a flush. If
610 * @push_seq is less than the current sequence, then it has already been
611 * flushed and we don't need to do anything - the caller will wait for it to
612 * complete if necessary.
613 *
614 * @push_seq is a value rather than a flag because that allows us to do an
615 * unlocked check of the sequence number for a match. Hence we can allows log
616 * forces to run racily and not issue pushes for the same sequence twice. If we
617 * get a race between multiple pushes for the same sequence they will block on
618 * the first one and then abort, hence avoiding needless pushes.
Dave Chinner71e330b2010-05-21 14:37:18 +1000619 */
Dave Chinnera44f13e2010-08-24 11:40:03 +1000620STATIC int
Dave Chinner71e330b2010-05-21 14:37:18 +1000621xlog_cil_push(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -0500622 struct xlog *log)
Dave Chinner71e330b2010-05-21 14:37:18 +1000623{
624 struct xfs_cil *cil = log->l_cilp;
625 struct xfs_log_vec *lv;
626 struct xfs_cil_ctx *ctx;
627 struct xfs_cil_ctx *new_ctx;
628 struct xlog_in_core *commit_iclog;
629 struct xlog_ticket *tic;
Dave Chinner71e330b2010-05-21 14:37:18 +1000630 int num_iovecs;
Dave Chinner71e330b2010-05-21 14:37:18 +1000631 int error = 0;
632 struct xfs_trans_header thdr;
633 struct xfs_log_iovec lhdr;
634 struct xfs_log_vec lvhdr = { NULL };
635 xfs_lsn_t commit_lsn;
Dave Chinner4c2d5422012-04-23 17:54:32 +1000636 xfs_lsn_t push_seq;
Dave Chinner71e330b2010-05-21 14:37:18 +1000637
638 if (!cil)
639 return 0;
640
Dave Chinner71e330b2010-05-21 14:37:18 +1000641 new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS);
642 new_ctx->ticket = xlog_cil_ticket_alloc(log);
643
Dave Chinner4c2d5422012-04-23 17:54:32 +1000644 down_write(&cil->xc_ctx_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000645 ctx = cil->xc_ctx;
646
Dave Chinner4bb928c2013-08-12 20:50:08 +1000647 spin_lock(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000648 push_seq = cil->xc_push_seq;
649 ASSERT(push_seq <= ctx->sequence);
Dave Chinner71e330b2010-05-21 14:37:18 +1000650
Dave Chinner4c2d5422012-04-23 17:54:32 +1000651 /*
652 * Check if we've anything to push. If there is nothing, then we don't
653 * move on to a new sequence number and so we have to be able to push
654 * this sequence again later.
655 */
656 if (list_empty(&cil->xc_cil)) {
657 cil->xc_push_seq = 0;
Dave Chinner4bb928c2013-08-12 20:50:08 +1000658 spin_unlock(&cil->xc_push_lock);
Dave Chinnera44f13e2010-08-24 11:40:03 +1000659 goto out_skip;
Dave Chinner4c2d5422012-04-23 17:54:32 +1000660 }
Dave Chinner4c2d5422012-04-23 17:54:32 +1000661
Dave Chinnera44f13e2010-08-24 11:40:03 +1000662
663 /* check for a previously pushed seqeunce */
Dave Chinner8af3dcd2014-09-23 15:57:59 +1000664 if (push_seq < cil->xc_ctx->sequence) {
665 spin_unlock(&cil->xc_push_lock);
Dave Chinnerdf806152010-05-17 15:52:13 +1000666 goto out_skip;
Dave Chinner8af3dcd2014-09-23 15:57:59 +1000667 }
668
669 /*
670 * We are now going to push this context, so add it to the committing
671 * list before we do anything else. This ensures that anyone waiting on
672 * this push can easily detect the difference between a "push in
673 * progress" and "CIL is empty, nothing to do".
674 *
675 * IOWs, a wait loop can now check for:
676 * the current sequence not being found on the committing list;
677 * an empty CIL; and
678 * an unchanged sequence number
679 * to detect a push that had nothing to do and therefore does not need
680 * waiting on. If the CIL is not empty, we get put on the committing
681 * list before emptying the CIL and bumping the sequence number. Hence
682 * an empty CIL and an unchanged sequence number means we jumped out
683 * above after doing nothing.
684 *
685 * Hence the waiter will either find the commit sequence on the
686 * committing list or the sequence number will be unchanged and the CIL
687 * still dirty. In that latter case, the push has not yet started, and
688 * so the waiter will have to continue trying to check the CIL
689 * committing list until it is found. In extreme cases of delay, the
690 * sequence may fully commit between the attempts the wait makes to wait
691 * on the commit sequence.
692 */
693 list_add(&ctx->committing, &cil->xc_committing);
694 spin_unlock(&cil->xc_push_lock);
Dave Chinnerdf806152010-05-17 15:52:13 +1000695
Dave Chinner71e330b2010-05-21 14:37:18 +1000696 /*
697 * pull all the log vectors off the items in the CIL, and
698 * remove the items from the CIL. We don't need the CIL lock
699 * here because it's only needed on the transaction commit
700 * side which is currently locked out by the flush lock.
701 */
702 lv = NULL;
Dave Chinner71e330b2010-05-21 14:37:18 +1000703 num_iovecs = 0;
Dave Chinner71e330b2010-05-21 14:37:18 +1000704 while (!list_empty(&cil->xc_cil)) {
705 struct xfs_log_item *item;
Dave Chinner71e330b2010-05-21 14:37:18 +1000706
707 item = list_first_entry(&cil->xc_cil,
708 struct xfs_log_item, li_cil);
709 list_del_init(&item->li_cil);
710 if (!ctx->lv_chain)
711 ctx->lv_chain = item->li_lv;
712 else
713 lv->lv_next = item->li_lv;
714 lv = item->li_lv;
715 item->li_lv = NULL;
Dave Chinner71e330b2010-05-21 14:37:18 +1000716 num_iovecs += lv->lv_niovecs;
Dave Chinner71e330b2010-05-21 14:37:18 +1000717 }
718
719 /*
720 * initialise the new context and attach it to the CIL. Then attach
721 * the current context to the CIL committing lsit so it can be found
722 * during log forces to extract the commit lsn of the sequence that
723 * needs to be forced.
724 */
725 INIT_LIST_HEAD(&new_ctx->committing);
726 INIT_LIST_HEAD(&new_ctx->busy_extents);
727 new_ctx->sequence = ctx->sequence + 1;
728 new_ctx->cil = cil;
729 cil->xc_ctx = new_ctx;
730
731 /*
732 * The switch is now done, so we can drop the context lock and move out
733 * of a shared context. We can't just go straight to the commit record,
734 * though - we need to synchronise with previous and future commits so
735 * that the commit records are correctly ordered in the log to ensure
736 * that we process items during log IO completion in the correct order.
737 *
738 * For example, if we get an EFI in one checkpoint and the EFD in the
739 * next (e.g. due to log forces), we do not want the checkpoint with
740 * the EFD to be committed before the checkpoint with the EFI. Hence
741 * we must strictly order the commit records of the checkpoints so
742 * that: a) the checkpoint callbacks are attached to the iclogs in the
743 * correct order; and b) the checkpoints are replayed in correct order
744 * in log recovery.
745 *
746 * Hence we need to add this context to the committing context list so
747 * that higher sequences will wait for us to write out a commit record
748 * before they do.
Dave Chinnerf876e442014-02-27 16:40:42 +1100749 *
750 * xfs_log_force_lsn requires us to mirror the new sequence into the cil
751 * structure atomically with the addition of this sequence to the
752 * committing list. This also ensures that we can do unlocked checks
753 * against the current sequence in log forces without risking
754 * deferencing a freed context pointer.
Dave Chinner71e330b2010-05-21 14:37:18 +1000755 */
Dave Chinner4bb928c2013-08-12 20:50:08 +1000756 spin_lock(&cil->xc_push_lock);
Dave Chinnerf876e442014-02-27 16:40:42 +1100757 cil->xc_current_sequence = new_ctx->sequence;
Dave Chinner4bb928c2013-08-12 20:50:08 +1000758 spin_unlock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000759 up_write(&cil->xc_ctx_lock);
760
761 /*
762 * Build a checkpoint transaction header and write it to the log to
763 * begin the transaction. We need to account for the space used by the
764 * transaction header here as it is not accounted for in xlog_write().
765 *
766 * The LSN we need to pass to the log items on transaction commit is
767 * the LSN reported by the first log vector write. If we use the commit
768 * record lsn then we can move the tail beyond the grant write head.
769 */
770 tic = ctx->ticket;
771 thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
772 thdr.th_type = XFS_TRANS_CHECKPOINT;
773 thdr.th_tid = tic->t_tid;
774 thdr.th_num_items = num_iovecs;
Christoph Hellwig4e0d5f92010-06-23 18:11:15 +1000775 lhdr.i_addr = &thdr;
Dave Chinner71e330b2010-05-21 14:37:18 +1000776 lhdr.i_len = sizeof(xfs_trans_header_t);
777 lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
778 tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
779
780 lvhdr.lv_niovecs = 1;
781 lvhdr.lv_iovecp = &lhdr;
782 lvhdr.lv_next = ctx->lv_chain;
783
784 error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
785 if (error)
Dave Chinner7db37c52011-01-27 12:02:00 +1100786 goto out_abort_free_ticket;
Dave Chinner71e330b2010-05-21 14:37:18 +1000787
788 /*
789 * now that we've written the checkpoint into the log, strictly
790 * order the commit records so replay will get them in the right order.
791 */
792restart:
Dave Chinner4bb928c2013-08-12 20:50:08 +1000793 spin_lock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000794 list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
795 /*
Dave Chinnerac983512014-05-07 08:05:50 +1000796 * Avoid getting stuck in this loop because we were woken by the
797 * shutdown, but then went back to sleep once already in the
798 * shutdown state.
799 */
800 if (XLOG_FORCED_SHUTDOWN(log)) {
801 spin_unlock(&cil->xc_push_lock);
802 goto out_abort_free_ticket;
803 }
804
805 /*
Dave Chinner71e330b2010-05-21 14:37:18 +1000806 * Higher sequences will wait for this one so skip them.
Dave Chinnerac983512014-05-07 08:05:50 +1000807 * Don't wait for our own sequence, either.
Dave Chinner71e330b2010-05-21 14:37:18 +1000808 */
809 if (new_ctx->sequence >= ctx->sequence)
810 continue;
811 if (!new_ctx->commit_lsn) {
812 /*
813 * It is still being pushed! Wait for the push to
814 * complete, then start again from the beginning.
815 */
Dave Chinner4bb928c2013-08-12 20:50:08 +1000816 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000817 goto restart;
818 }
819 }
Dave Chinner4bb928c2013-08-12 20:50:08 +1000820 spin_unlock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000821
Dave Chinner7db37c52011-01-27 12:02:00 +1100822 /* xfs_log_done always frees the ticket on error. */
Christoph Hellwigf78c3902015-06-04 13:48:20 +1000823 commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false);
Dave Chinner7db37c52011-01-27 12:02:00 +1100824 if (commit_lsn == -1)
Dave Chinner71e330b2010-05-21 14:37:18 +1000825 goto out_abort;
826
827 /* attach all the transactions w/ busy extents to iclog */
828 ctx->log_cb.cb_func = xlog_cil_committed;
829 ctx->log_cb.cb_arg = ctx;
830 error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb);
831 if (error)
832 goto out_abort;
833
834 /*
835 * now the checkpoint commit is complete and we've attached the
836 * callbacks to the iclog we can assign the commit LSN to the context
837 * and wake up anyone who is waiting for the commit to complete.
838 */
Dave Chinner4bb928c2013-08-12 20:50:08 +1000839 spin_lock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000840 ctx->commit_lsn = commit_lsn;
Dave Chinnereb40a872010-12-21 12:09:01 +1100841 wake_up_all(&cil->xc_commit_wait);
Dave Chinner4bb928c2013-08-12 20:50:08 +1000842 spin_unlock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000843
844 /* release the hounds! */
845 return xfs_log_release_iclog(log->l_mp, commit_iclog);
846
847out_skip:
848 up_write(&cil->xc_ctx_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +1000849 xfs_log_ticket_put(new_ctx->ticket);
850 kmem_free(new_ctx);
851 return 0;
852
Dave Chinner7db37c52011-01-27 12:02:00 +1100853out_abort_free_ticket:
854 xfs_log_ticket_put(tic);
Dave Chinner71e330b2010-05-21 14:37:18 +1000855out_abort:
856 xlog_cil_committed(ctx, XFS_LI_ABORTED);
Dave Chinner24513372014-06-25 14:58:08 +1000857 return -EIO;
Dave Chinner71e330b2010-05-21 14:37:18 +1000858}
859
Dave Chinner4c2d5422012-04-23 17:54:32 +1000860static void
861xlog_cil_push_work(
862 struct work_struct *work)
863{
864 struct xfs_cil *cil = container_of(work, struct xfs_cil,
865 xc_push_work);
866 xlog_cil_push(cil->xc_log);
867}
868
869/*
870 * We need to push CIL every so often so we don't cache more than we can fit in
871 * the log. The limit really is that a checkpoint can't be more than half the
872 * log (the current checkpoint is not allowed to overwrite the previous
873 * checkpoint), but commit latency and memory usage limit this to a smaller
874 * size.
875 */
876static void
877xlog_cil_push_background(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -0500878 struct xlog *log)
Dave Chinner4c2d5422012-04-23 17:54:32 +1000879{
880 struct xfs_cil *cil = log->l_cilp;
881
882 /*
883 * The cil won't be empty because we are called while holding the
884 * context lock so whatever we added to the CIL will still be there
885 */
886 ASSERT(!list_empty(&cil->xc_cil));
887
888 /*
889 * don't do a background push if we haven't used up all the
890 * space available yet.
891 */
892 if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log))
893 return;
894
Dave Chinner4bb928c2013-08-12 20:50:08 +1000895 spin_lock(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000896 if (cil->xc_push_seq < cil->xc_current_sequence) {
897 cil->xc_push_seq = cil->xc_current_sequence;
898 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
899 }
Dave Chinner4bb928c2013-08-12 20:50:08 +1000900 spin_unlock(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000901
902}
903
Dave Chinnerf876e442014-02-27 16:40:42 +1100904/*
905 * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
906 * number that is passed. When it returns, the work will be queued for
907 * @push_seq, but it won't be completed. The caller is expected to do any
908 * waiting for push_seq to complete if it is required.
909 */
Dave Chinner4c2d5422012-04-23 17:54:32 +1000910static void
Dave Chinnerf876e442014-02-27 16:40:42 +1100911xlog_cil_push_now(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -0500912 struct xlog *log,
Dave Chinner4c2d5422012-04-23 17:54:32 +1000913 xfs_lsn_t push_seq)
914{
915 struct xfs_cil *cil = log->l_cilp;
916
917 if (!cil)
918 return;
919
920 ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
921
922 /* start on any pending background push to minimise wait time on it */
923 flush_work(&cil->xc_push_work);
924
925 /*
926 * If the CIL is empty or we've already pushed the sequence then
927 * there's no work we need to do.
928 */
Dave Chinner4bb928c2013-08-12 20:50:08 +1000929 spin_lock(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000930 if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
Dave Chinner4bb928c2013-08-12 20:50:08 +1000931 spin_unlock(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000932 return;
933 }
934
935 cil->xc_push_seq = push_seq;
Dave Chinnerf876e442014-02-27 16:40:42 +1100936 queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
Dave Chinner4bb928c2013-08-12 20:50:08 +1000937 spin_unlock(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +1000938}
939
Dave Chinner2c6e24c2013-10-15 09:17:49 +1100940bool
941xlog_cil_empty(
942 struct xlog *log)
943{
944 struct xfs_cil *cil = log->l_cilp;
945 bool empty = false;
946
947 spin_lock(&cil->xc_push_lock);
948 if (list_empty(&cil->xc_cil))
949 empty = true;
950 spin_unlock(&cil->xc_push_lock);
951 return empty;
952}
953
Dave Chinner71e330b2010-05-21 14:37:18 +1000954/*
Dave Chinnera44f13e2010-08-24 11:40:03 +1000955 * Commit a transaction with the given vector to the Committed Item List.
956 *
957 * To do this, we need to format the item, pin it in memory if required and
958 * account for the space used by the transaction. Once we have done that we
959 * need to release the unused reservation for the transaction, attach the
960 * transaction to the checkpoint context so we carry the busy extents through
961 * to checkpoint completion, and then unlock all the items in the transaction.
962 *
Dave Chinnera44f13e2010-08-24 11:40:03 +1000963 * Called with the context lock already held in read mode to lock out
964 * background commit, returns without it held once background commits are
965 * allowed again.
966 */
Jie Liuc6f97262014-02-07 15:26:07 +1100967void
Dave Chinnera44f13e2010-08-24 11:40:03 +1000968xfs_log_commit_cil(
969 struct xfs_mount *mp,
970 struct xfs_trans *tp,
Dave Chinnera44f13e2010-08-24 11:40:03 +1000971 xfs_lsn_t *commit_lsn,
Christoph Hellwig70393312015-06-04 13:48:08 +1000972 bool regrant)
Dave Chinnera44f13e2010-08-24 11:40:03 +1000973{
Mark Tinguelyf7bdf032012-06-14 09:22:15 -0500974 struct xlog *log = mp->m_log;
Dave Chinner991aaf62013-08-12 20:50:07 +1000975 struct xfs_cil *cil = log->l_cilp;
Dave Chinnera44f13e2010-08-24 11:40:03 +1000976
Dave Chinnerb1c5ebb2016-07-22 09:52:35 +1000977 /*
978 * Do all necessary memory allocation before we lock the CIL.
979 * This ensures the allocation does not deadlock with a CIL
980 * push in memory reclaim (e.g. from kswapd).
981 */
982 xlog_cil_alloc_shadow_bufs(log, tp);
983
Dave Chinnerf5baac32013-08-12 20:50:06 +1000984 /* lock out background commit */
Dave Chinner991aaf62013-08-12 20:50:07 +1000985 down_read(&cil->xc_ctx_lock);
Dave Chinnerf5baac32013-08-12 20:50:06 +1000986
Dave Chinner991aaf62013-08-12 20:50:07 +1000987 xlog_cil_insert_items(log, tp);
Dave Chinnera44f13e2010-08-24 11:40:03 +1000988
989 /* check we didn't blow the reservation */
990 if (tp->t_ticket->t_curr_res < 0)
Dave Chinner991aaf62013-08-12 20:50:07 +1000991 xlog_print_tic_res(mp, tp->t_ticket);
Dave Chinnera44f13e2010-08-24 11:40:03 +1000992
Dave Chinner991aaf62013-08-12 20:50:07 +1000993 tp->t_commit_lsn = cil->xc_ctx->sequence;
994 if (commit_lsn)
995 *commit_lsn = tp->t_commit_lsn;
Dave Chinnera44f13e2010-08-24 11:40:03 +1000996
Christoph Hellwigf78c3902015-06-04 13:48:20 +1000997 xfs_log_done(mp, tp->t_ticket, NULL, regrant);
Dave Chinnera44f13e2010-08-24 11:40:03 +1000998 xfs_trans_unreserve_and_mod_sb(tp);
999
1000 /*
1001 * Once all the items of the transaction have been copied to the CIL,
1002 * the items can be unlocked and freed.
1003 *
1004 * This needs to be done before we drop the CIL context lock because we
1005 * have to update state in the log items and unlock them before they go
1006 * to disk. If we don't, then the CIL checkpoint can race with us and
1007 * we can run checkpoint completion before we've updated and unlocked
1008 * the log items. This affects (at least) processing of stale buffers,
1009 * inodes and EFIs.
1010 */
Christoph Hellwigeacb24e2015-06-04 13:47:43 +10001011 xfs_trans_free_items(tp, tp->t_commit_lsn, false);
Dave Chinnera44f13e2010-08-24 11:40:03 +10001012
Dave Chinner4c2d5422012-04-23 17:54:32 +10001013 xlog_cil_push_background(log);
Dave Chinnera44f13e2010-08-24 11:40:03 +10001014
Dave Chinner991aaf62013-08-12 20:50:07 +10001015 up_read(&cil->xc_ctx_lock);
Dave Chinnera44f13e2010-08-24 11:40:03 +10001016}
1017
1018/*
Dave Chinner71e330b2010-05-21 14:37:18 +10001019 * Conditionally push the CIL based on the sequence passed in.
1020 *
1021 * We only need to push if we haven't already pushed the sequence
1022 * number given. Hence the only time we will trigger a push here is
1023 * if the push sequence is the same as the current context.
1024 *
1025 * We return the current commit lsn to allow the callers to determine if a
1026 * iclog flush is necessary following this call.
Dave Chinner71e330b2010-05-21 14:37:18 +10001027 */
1028xfs_lsn_t
Dave Chinnera44f13e2010-08-24 11:40:03 +10001029xlog_cil_force_lsn(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -05001030 struct xlog *log,
Dave Chinnera44f13e2010-08-24 11:40:03 +10001031 xfs_lsn_t sequence)
Dave Chinner71e330b2010-05-21 14:37:18 +10001032{
1033 struct xfs_cil *cil = log->l_cilp;
1034 struct xfs_cil_ctx *ctx;
1035 xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1036
Dave Chinnera44f13e2010-08-24 11:40:03 +10001037 ASSERT(sequence <= cil->xc_current_sequence);
Dave Chinner71e330b2010-05-21 14:37:18 +10001038
Dave Chinnera44f13e2010-08-24 11:40:03 +10001039 /*
1040 * check to see if we need to force out the current context.
1041 * xlog_cil_push() handles racing pushes for the same sequence,
1042 * so no need to deal with it here.
1043 */
Dave Chinnerf876e442014-02-27 16:40:42 +11001044restart:
1045 xlog_cil_push_now(log, sequence);
Dave Chinner71e330b2010-05-21 14:37:18 +10001046
1047 /*
1048 * See if we can find a previous sequence still committing.
Dave Chinner71e330b2010-05-21 14:37:18 +10001049 * We need to wait for all previous sequence commits to complete
1050 * before allowing the force of push_seq to go ahead. Hence block
1051 * on commits for those as well.
1052 */
Dave Chinner4bb928c2013-08-12 20:50:08 +10001053 spin_lock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +10001054 list_for_each_entry(ctx, &cil->xc_committing, committing) {
Dave Chinnerac983512014-05-07 08:05:50 +10001055 /*
1056 * Avoid getting stuck in this loop because we were woken by the
1057 * shutdown, but then went back to sleep once already in the
1058 * shutdown state.
1059 */
1060 if (XLOG_FORCED_SHUTDOWN(log))
1061 goto out_shutdown;
Dave Chinnera44f13e2010-08-24 11:40:03 +10001062 if (ctx->sequence > sequence)
Dave Chinner71e330b2010-05-21 14:37:18 +10001063 continue;
1064 if (!ctx->commit_lsn) {
1065 /*
1066 * It is still being pushed! Wait for the push to
1067 * complete, then start again from the beginning.
1068 */
Dave Chinner4bb928c2013-08-12 20:50:08 +10001069 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +10001070 goto restart;
1071 }
Dave Chinnera44f13e2010-08-24 11:40:03 +10001072 if (ctx->sequence != sequence)
Dave Chinner71e330b2010-05-21 14:37:18 +10001073 continue;
1074 /* found it! */
1075 commit_lsn = ctx->commit_lsn;
1076 }
Dave Chinnerf876e442014-02-27 16:40:42 +11001077
1078 /*
1079 * The call to xlog_cil_push_now() executes the push in the background.
1080 * Hence by the time we have got here it our sequence may not have been
1081 * pushed yet. This is true if the current sequence still matches the
1082 * push sequence after the above wait loop and the CIL still contains
Dave Chinner8af3dcd2014-09-23 15:57:59 +10001083 * dirty objects. This is guaranteed by the push code first adding the
1084 * context to the committing list before emptying the CIL.
Dave Chinnerf876e442014-02-27 16:40:42 +11001085 *
Dave Chinner8af3dcd2014-09-23 15:57:59 +10001086 * Hence if we don't find the context in the committing list and the
1087 * current sequence number is unchanged then the CIL contents are
1088 * significant. If the CIL is empty, if means there was nothing to push
1089 * and that means there is nothing to wait for. If the CIL is not empty,
1090 * it means we haven't yet started the push, because if it had started
1091 * we would have found the context on the committing list.
Dave Chinnerf876e442014-02-27 16:40:42 +11001092 */
Dave Chinnerf876e442014-02-27 16:40:42 +11001093 if (sequence == cil->xc_current_sequence &&
1094 !list_empty(&cil->xc_cil)) {
1095 spin_unlock(&cil->xc_push_lock);
1096 goto restart;
1097 }
1098
Dave Chinner4bb928c2013-08-12 20:50:08 +10001099 spin_unlock(&cil->xc_push_lock);
Dave Chinner71e330b2010-05-21 14:37:18 +10001100 return commit_lsn;
Dave Chinnerac983512014-05-07 08:05:50 +10001101
1102 /*
1103 * We detected a shutdown in progress. We need to trigger the log force
1104 * to pass through it's iclog state machine error handling, even though
1105 * we are already in a shutdown state. Hence we can't return
1106 * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1107 * LSN is already stable), so we return a zero LSN instead.
1108 */
1109out_shutdown:
1110 spin_unlock(&cil->xc_push_lock);
1111 return 0;
Dave Chinner71e330b2010-05-21 14:37:18 +10001112}
Dave Chinnerccf7c232010-05-20 23:19:42 +10001113
1114/*
1115 * Check if the current log item was first committed in this sequence.
1116 * We can't rely on just the log item being in the CIL, we have to check
1117 * the recorded commit sequence number.
1118 *
1119 * Note: for this to be used in a non-racy manner, it has to be called with
1120 * CIL flushing locked out. As a result, it should only be used during the
1121 * transaction commit process when deciding what to format into the item.
1122 */
1123bool
1124xfs_log_item_in_current_chkpt(
1125 struct xfs_log_item *lip)
1126{
1127 struct xfs_cil_ctx *ctx;
1128
Dave Chinnerccf7c232010-05-20 23:19:42 +10001129 if (list_empty(&lip->li_cil))
1130 return false;
1131
1132 ctx = lip->li_mountp->m_log->l_cilp->xc_ctx;
1133
1134 /*
1135 * li_seq is written on the first commit of a log item to record the
1136 * first checkpoint it is written to. Hence if it is different to the
1137 * current sequence, we're in a new checkpoint.
1138 */
1139 if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0)
1140 return false;
1141 return true;
1142}
Dave Chinner4c2d5422012-04-23 17:54:32 +10001143
1144/*
1145 * Perform initial CIL structure initialisation.
1146 */
1147int
1148xlog_cil_init(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -05001149 struct xlog *log)
Dave Chinner4c2d5422012-04-23 17:54:32 +10001150{
1151 struct xfs_cil *cil;
1152 struct xfs_cil_ctx *ctx;
1153
1154 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL);
1155 if (!cil)
Dave Chinner24513372014-06-25 14:58:08 +10001156 return -ENOMEM;
Dave Chinner4c2d5422012-04-23 17:54:32 +10001157
1158 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL);
1159 if (!ctx) {
1160 kmem_free(cil);
Dave Chinner24513372014-06-25 14:58:08 +10001161 return -ENOMEM;
Dave Chinner4c2d5422012-04-23 17:54:32 +10001162 }
1163
1164 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1165 INIT_LIST_HEAD(&cil->xc_cil);
1166 INIT_LIST_HEAD(&cil->xc_committing);
1167 spin_lock_init(&cil->xc_cil_lock);
Dave Chinner4bb928c2013-08-12 20:50:08 +10001168 spin_lock_init(&cil->xc_push_lock);
Dave Chinner4c2d5422012-04-23 17:54:32 +10001169 init_rwsem(&cil->xc_ctx_lock);
1170 init_waitqueue_head(&cil->xc_commit_wait);
1171
1172 INIT_LIST_HEAD(&ctx->committing);
1173 INIT_LIST_HEAD(&ctx->busy_extents);
1174 ctx->sequence = 1;
1175 ctx->cil = cil;
1176 cil->xc_ctx = ctx;
1177 cil->xc_current_sequence = ctx->sequence;
1178
1179 cil->xc_log = log;
1180 log->l_cilp = cil;
1181 return 0;
1182}
1183
1184void
1185xlog_cil_destroy(
Mark Tinguelyf7bdf032012-06-14 09:22:15 -05001186 struct xlog *log)
Dave Chinner4c2d5422012-04-23 17:54:32 +10001187{
1188 if (log->l_cilp->xc_ctx) {
1189 if (log->l_cilp->xc_ctx->ticket)
1190 xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1191 kmem_free(log->l_cilp->xc_ctx);
1192 }
1193
1194 ASSERT(list_empty(&log->l_cilp->xc_cil));
1195 kmem_free(log->l_cilp);
1196}
1197