Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it would be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License |
| 14 | * along with this program; if not, write the Free Software Foundation, |
| 15 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 16 | */ |
| 17 | |
| 18 | #include "xfs.h" |
| 19 | #include "xfs_fs.h" |
Christoph Hellwig | 4fb6e8a | 2014-11-28 14:25:04 +1100 | [diff] [blame] | 20 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 21 | #include "xfs_log_format.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 22 | #include "xfs_shared.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 23 | #include "xfs_trans_resv.h" |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 24 | #include "xfs_mount.h" |
| 25 | #include "xfs_error.h" |
| 26 | #include "xfs_alloc.h" |
Dave Chinner | efc27b5 | 2012-04-29 10:39:43 +0000 | [diff] [blame] | 27 | #include "xfs_extent_busy.h" |
Christoph Hellwig | e84661a | 2011-05-20 13:45:32 +0000 | [diff] [blame] | 28 | #include "xfs_discard.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 29 | #include "xfs_trans.h" |
| 30 | #include "xfs_trans_priv.h" |
| 31 | #include "xfs_log.h" |
| 32 | #include "xfs_log_priv.h" |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 33 | #include "xfs_trace.h" |
| 34 | |
| 35 | struct workqueue_struct *xfs_discard_wq; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 36 | |
| 37 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 38 | * Allocate a new ticket. Failing to get a new ticket makes it really hard to |
| 39 | * recover, so we don't allow failure here. Also, we allocate in a context that |
| 40 | * we don't want to be issuing transactions from, so we need to tell the |
| 41 | * allocation code this as well. |
| 42 | * |
| 43 | * We don't reserve any space for the ticket - we are going to steal whatever |
| 44 | * space we require from transactions as they commit. To ensure we reserve all |
| 45 | * the space required, we need to set the current reservation of the ticket to |
| 46 | * zero so that we know to steal the initial transaction overhead from the |
| 47 | * first transaction commit. |
| 48 | */ |
| 49 | static struct xlog_ticket * |
| 50 | xlog_cil_ticket_alloc( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 51 | struct xlog *log) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 52 | { |
| 53 | struct xlog_ticket *tic; |
| 54 | |
| 55 | tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, |
| 56 | KM_SLEEP|KM_NOFS); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 57 | |
| 58 | /* |
| 59 | * set the current reservation to zero so we know to steal the basic |
| 60 | * transaction overhead reservation from the first transaction commit. |
| 61 | */ |
| 62 | tic->t_curr_res = 0; |
| 63 | return tic; |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * After the first stage of log recovery is done, we know where the head and |
| 68 | * tail of the log are. We need this log initialisation done before we can |
| 69 | * initialise the first CIL checkpoint context. |
| 70 | * |
| 71 | * Here we allocate a log ticket to track space usage during a CIL push. This |
| 72 | * ticket is passed to xlog_write() directly so that we don't slowly leak log |
| 73 | * space by failing to account for space used by log headers and additional |
| 74 | * region headers for split regions. |
| 75 | */ |
| 76 | void |
| 77 | xlog_cil_init_post_recovery( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 78 | struct xlog *log) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 79 | { |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 80 | log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); |
| 81 | log->l_cilp->xc_ctx->sequence = 1; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 82 | } |
| 83 | |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 84 | static inline int |
| 85 | xlog_cil_iovec_space( |
| 86 | uint niovecs) |
| 87 | { |
| 88 | return round_up((sizeof(struct xfs_log_vec) + |
| 89 | niovecs * sizeof(struct xfs_log_iovec)), |
| 90 | sizeof(uint64_t)); |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * Allocate or pin log vector buffers for CIL insertion. |
| 95 | * |
| 96 | * The CIL currently uses disposable buffers for copying a snapshot of the |
| 97 | * modified items into the log during a push. The biggest problem with this is |
| 98 | * the requirement to allocate the disposable buffer during the commit if: |
| 99 | * a) does not exist; or |
| 100 | * b) it is too small |
| 101 | * |
| 102 | * If we do this allocation within xlog_cil_insert_format_items(), it is done |
| 103 | * under the xc_ctx_lock, which means that a CIL push cannot occur during |
| 104 | * the memory allocation. This means that we have a potential deadlock situation |
| 105 | * under low memory conditions when we have lots of dirty metadata pinned in |
| 106 | * the CIL and we need a CIL commit to occur to free memory. |
| 107 | * |
| 108 | * To avoid this, we need to move the memory allocation outside the |
| 109 | * xc_ctx_lock, but because the log vector buffers are disposable, that opens |
| 110 | * up a TOCTOU race condition w.r.t. the CIL committing and removing the log |
| 111 | * vector buffers between the check and the formatting of the item into the |
| 112 | * log vector buffer within the xc_ctx_lock. |
| 113 | * |
| 114 | * Because the log vector buffer needs to be unchanged during the CIL push |
| 115 | * process, we cannot share the buffer between the transaction commit (which |
| 116 | * modifies the buffer) and the CIL push context that is writing the changes |
| 117 | * into the log. This means skipping preallocation of buffer space is |
| 118 | * unreliable, but we most definitely do not want to be allocating and freeing |
| 119 | * buffers unnecessarily during commits when overwrites can be done safely. |
| 120 | * |
| 121 | * The simplest solution to this problem is to allocate a shadow buffer when a |
| 122 | * log item is committed for the second time, and then to only use this buffer |
| 123 | * if necessary. The buffer can remain attached to the log item until such time |
| 124 | * it is needed, and this is the buffer that is reallocated to match the size of |
| 125 | * the incoming modification. Then during the formatting of the item we can swap |
| 126 | * the active buffer with the new one if we can't reuse the existing buffer. We |
| 127 | * don't free the old buffer as it may be reused on the next modification if |
| 128 | * it's size is right, otherwise we'll free and reallocate it at that point. |
| 129 | * |
| 130 | * This function builds a vector for the changes in each log item in the |
| 131 | * transaction. It then works out the length of the buffer needed for each log |
| 132 | * item, allocates them and attaches the vector to the log item in preparation |
| 133 | * for the formatting step which occurs under the xc_ctx_lock. |
| 134 | * |
| 135 | * While this means the memory footprint goes up, it avoids the repeated |
| 136 | * alloc/free pattern that repeated modifications of an item would otherwise |
| 137 | * cause, and hence minimises the CPU overhead of such behaviour. |
| 138 | */ |
| 139 | static void |
| 140 | xlog_cil_alloc_shadow_bufs( |
| 141 | struct xlog *log, |
| 142 | struct xfs_trans *tp) |
| 143 | { |
| 144 | struct xfs_log_item_desc *lidp; |
| 145 | |
| 146 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
| 147 | struct xfs_log_item *lip = lidp->lid_item; |
| 148 | struct xfs_log_vec *lv; |
| 149 | int niovecs = 0; |
| 150 | int nbytes = 0; |
| 151 | int buf_size; |
| 152 | bool ordered = false; |
| 153 | |
| 154 | /* Skip items which aren't dirty in this transaction. */ |
| 155 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
| 156 | continue; |
| 157 | |
| 158 | /* get number of vecs and size of data to be stored */ |
| 159 | lip->li_ops->iop_size(lip, &niovecs, &nbytes); |
| 160 | |
| 161 | /* |
| 162 | * Ordered items need to be tracked but we do not wish to write |
| 163 | * them. We need a logvec to track the object, but we do not |
| 164 | * need an iovec or buffer to be allocated for copying data. |
| 165 | */ |
| 166 | if (niovecs == XFS_LOG_VEC_ORDERED) { |
| 167 | ordered = true; |
| 168 | niovecs = 0; |
| 169 | nbytes = 0; |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * We 64-bit align the length of each iovec so that the start |
| 174 | * of the next one is naturally aligned. We'll need to |
| 175 | * account for that slack space here. Then round nbytes up |
| 176 | * to 64-bit alignment so that the initial buffer alignment is |
| 177 | * easy to calculate and verify. |
| 178 | */ |
| 179 | nbytes += niovecs * sizeof(uint64_t); |
| 180 | nbytes = round_up(nbytes, sizeof(uint64_t)); |
| 181 | |
| 182 | /* |
| 183 | * The data buffer needs to start 64-bit aligned, so round up |
| 184 | * that space to ensure we can align it appropriately and not |
| 185 | * overrun the buffer. |
| 186 | */ |
| 187 | buf_size = nbytes + xlog_cil_iovec_space(niovecs); |
| 188 | |
| 189 | /* |
| 190 | * if we have no shadow buffer, or it is too small, we need to |
| 191 | * reallocate it. |
| 192 | */ |
| 193 | if (!lip->li_lv_shadow || |
| 194 | buf_size > lip->li_lv_shadow->lv_size) { |
| 195 | |
| 196 | /* |
| 197 | * We free and allocate here as a realloc would copy |
| 198 | * unecessary data. We don't use kmem_zalloc() for the |
| 199 | * same reason - we don't need to zero the data area in |
| 200 | * the buffer, only the log vector header and the iovec |
| 201 | * storage. |
| 202 | */ |
| 203 | kmem_free(lip->li_lv_shadow); |
| 204 | |
| 205 | lv = kmem_alloc(buf_size, KM_SLEEP|KM_NOFS); |
| 206 | memset(lv, 0, xlog_cil_iovec_space(niovecs)); |
| 207 | |
| 208 | lv->lv_item = lip; |
| 209 | lv->lv_size = buf_size; |
| 210 | if (ordered) |
| 211 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; |
| 212 | else |
| 213 | lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1]; |
| 214 | lip->li_lv_shadow = lv; |
| 215 | } else { |
| 216 | /* same or smaller, optimise common overwrite case */ |
| 217 | lv = lip->li_lv_shadow; |
| 218 | if (ordered) |
| 219 | lv->lv_buf_len = XFS_LOG_VEC_ORDERED; |
| 220 | else |
| 221 | lv->lv_buf_len = 0; |
| 222 | lv->lv_bytes = 0; |
| 223 | lv->lv_next = NULL; |
| 224 | } |
| 225 | |
| 226 | /* Ensure the lv is set up according to ->iop_size */ |
| 227 | lv->lv_niovecs = niovecs; |
| 228 | |
| 229 | /* The allocated data region lies beyond the iovec region */ |
| 230 | lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs); |
| 231 | } |
| 232 | |
| 233 | } |
| 234 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 235 | /* |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 236 | * Prepare the log item for insertion into the CIL. Calculate the difference in |
| 237 | * log space and vectors it will consume, and if it is a new item pin it as |
| 238 | * well. |
| 239 | */ |
| 240 | STATIC void |
| 241 | xfs_cil_prepare_item( |
| 242 | struct xlog *log, |
| 243 | struct xfs_log_vec *lv, |
| 244 | struct xfs_log_vec *old_lv, |
| 245 | int *diff_len, |
| 246 | int *diff_iovecs) |
| 247 | { |
| 248 | /* Account for the new LV being passed in */ |
| 249 | if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) { |
Dave Chinner | 110dc24 | 2014-05-20 08:18:09 +1000 | [diff] [blame] | 250 | *diff_len += lv->lv_bytes; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 251 | *diff_iovecs += lv->lv_niovecs; |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * If there is no old LV, this is the first time we've seen the item in |
| 256 | * this CIL context and so we need to pin it. If we are replacing the |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 257 | * old_lv, then remove the space it accounts for and make it the shadow |
| 258 | * buffer for later freeing. In both cases we are now switching to the |
| 259 | * shadow buffer, so update the the pointer to it appropriately. |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 260 | */ |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 261 | if (!old_lv) { |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 262 | lv->lv_item->li_ops->iop_pin(lv->lv_item); |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 263 | lv->lv_item->li_lv_shadow = NULL; |
| 264 | } else if (old_lv != lv) { |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 265 | ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED); |
| 266 | |
Dave Chinner | 110dc24 | 2014-05-20 08:18:09 +1000 | [diff] [blame] | 267 | *diff_len -= old_lv->lv_bytes; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 268 | *diff_iovecs -= old_lv->lv_niovecs; |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 269 | lv->lv_item->li_lv_shadow = old_lv; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 270 | } |
| 271 | |
| 272 | /* attach new log vector to log item */ |
| 273 | lv->lv_item->li_lv = lv; |
| 274 | |
| 275 | /* |
| 276 | * If this is the first time the item is being committed to the |
| 277 | * CIL, store the sequence number on the log item so we can |
| 278 | * tell in future commits whether this is the first checkpoint |
| 279 | * the item is being committed into. |
| 280 | */ |
| 281 | if (!lv->lv_item->li_seq) |
| 282 | lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence; |
| 283 | } |
| 284 | |
| 285 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 286 | * Format log item into a flat buffers |
| 287 | * |
| 288 | * For delayed logging, we need to hold a formatted buffer containing all the |
| 289 | * changes on the log item. This enables us to relog the item in memory and |
| 290 | * write it out asynchronously without needing to relock the object that was |
| 291 | * modified at the time it gets written into the iclog. |
| 292 | * |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 293 | * This function takes the prepared log vectors attached to each log item, and |
| 294 | * formats the changes into the log vector buffer. The buffer it uses is |
| 295 | * dependent on the current state of the vector in the CIL - the shadow lv is |
| 296 | * guaranteed to be large enough for the current modification, but we will only |
| 297 | * use that if we can't reuse the existing lv. If we can't reuse the existing |
| 298 | * lv, then simple swap it out for the shadow lv. We don't free it - that is |
| 299 | * done lazily either by th enext modification or the freeing of the log item. |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 300 | * |
| 301 | * We don't set up region headers during this process; we simply copy the |
| 302 | * regions into the flat buffer. We can do this because we still have to do a |
| 303 | * formatting step to write the regions into the iclog buffer. Writing the |
| 304 | * ophdrs during the iclog write means that we can support splitting large |
| 305 | * regions across iclog boundares without needing a change in the format of the |
| 306 | * item/region encapsulation. |
| 307 | * |
| 308 | * Hence what we need to do now is change the rewrite the vector array to point |
| 309 | * to the copied region inside the buffer we just allocated. This allows us to |
| 310 | * format the regions into the iclog as though they are being formatted |
| 311 | * directly out of the objects themselves. |
| 312 | */ |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 313 | static void |
| 314 | xlog_cil_insert_format_items( |
| 315 | struct xlog *log, |
| 316 | struct xfs_trans *tp, |
| 317 | int *diff_len, |
| 318 | int *diff_iovecs) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 319 | { |
Christoph Hellwig | 0244b96 | 2011-12-06 21:58:08 +0000 | [diff] [blame] | 320 | struct xfs_log_item_desc *lidp; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 321 | |
Christoph Hellwig | 0244b96 | 2011-12-06 21:58:08 +0000 | [diff] [blame] | 322 | |
| 323 | /* Bail out if we didn't find a log item. */ |
| 324 | if (list_empty(&tp->t_items)) { |
| 325 | ASSERT(0); |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 326 | return; |
Christoph Hellwig | 0244b96 | 2011-12-06 21:58:08 +0000 | [diff] [blame] | 327 | } |
| 328 | |
| 329 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 330 | struct xfs_log_item *lip = lidp->lid_item; |
Dave Chinner | 7492c5b | 2013-08-12 20:50:05 +1000 | [diff] [blame] | 331 | struct xfs_log_vec *lv; |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 332 | struct xfs_log_vec *old_lv = NULL; |
| 333 | struct xfs_log_vec *shadow; |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 334 | bool ordered = false; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 335 | |
Christoph Hellwig | 0244b96 | 2011-12-06 21:58:08 +0000 | [diff] [blame] | 336 | /* Skip items which aren't dirty in this transaction. */ |
| 337 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
| 338 | continue; |
| 339 | |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 340 | /* |
| 341 | * The formatting size information is already attached to |
| 342 | * the shadow lv on the log item. |
| 343 | */ |
| 344 | shadow = lip->li_lv_shadow; |
| 345 | if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED) |
| 346 | ordered = true; |
Dave Chinner | 166d136 | 2013-08-12 20:50:04 +1000 | [diff] [blame] | 347 | |
Christoph Hellwig | 0244b96 | 2011-12-06 21:58:08 +0000 | [diff] [blame] | 348 | /* Skip items that do not have any vectors for writing */ |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 349 | if (!shadow->lv_niovecs && !ordered) |
Christoph Hellwig | 0244b96 | 2011-12-06 21:58:08 +0000 | [diff] [blame] | 350 | continue; |
| 351 | |
Dave Chinner | f5baac3 | 2013-08-12 20:50:06 +1000 | [diff] [blame] | 352 | /* compare to existing item size */ |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 353 | old_lv = lip->li_lv; |
| 354 | if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) { |
Dave Chinner | f5baac3 | 2013-08-12 20:50:06 +1000 | [diff] [blame] | 355 | /* same or smaller, optimise common overwrite case */ |
| 356 | lv = lip->li_lv; |
| 357 | lv->lv_next = NULL; |
| 358 | |
| 359 | if (ordered) |
| 360 | goto insert; |
| 361 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 362 | /* |
| 363 | * set the item up as though it is a new insertion so |
| 364 | * that the space reservation accounting is correct. |
| 365 | */ |
| 366 | *diff_iovecs -= lv->lv_niovecs; |
Dave Chinner | 110dc24 | 2014-05-20 08:18:09 +1000 | [diff] [blame] | 367 | *diff_len -= lv->lv_bytes; |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 368 | |
| 369 | /* Ensure the lv is set up according to ->iop_size */ |
| 370 | lv->lv_niovecs = shadow->lv_niovecs; |
| 371 | |
| 372 | /* reset the lv buffer information for new formatting */ |
| 373 | lv->lv_buf_len = 0; |
| 374 | lv->lv_bytes = 0; |
| 375 | lv->lv_buf = (char *)lv + |
| 376 | xlog_cil_iovec_space(lv->lv_niovecs); |
Christoph Hellwig | 9597df6 | 2013-12-13 11:00:42 +1100 | [diff] [blame] | 377 | } else { |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 378 | /* switch to shadow buffer! */ |
| 379 | lv = shadow; |
Christoph Hellwig | 9597df6 | 2013-12-13 11:00:42 +1100 | [diff] [blame] | 380 | lv->lv_item = lip; |
Christoph Hellwig | 9597df6 | 2013-12-13 11:00:42 +1100 | [diff] [blame] | 381 | if (ordered) { |
| 382 | /* track as an ordered logvec */ |
| 383 | ASSERT(lip->li_lv == NULL); |
Christoph Hellwig | 9597df6 | 2013-12-13 11:00:42 +1100 | [diff] [blame] | 384 | goto insert; |
| 385 | } |
Dave Chinner | f5baac3 | 2013-08-12 20:50:06 +1000 | [diff] [blame] | 386 | } |
| 387 | |
Dave Chinner | 3895e51 | 2014-02-10 10:37:18 +1100 | [diff] [blame] | 388 | ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t))); |
Christoph Hellwig | bde7cff | 2013-12-13 11:34:02 +1100 | [diff] [blame] | 389 | lip->li_ops->iop_format(lip, lv); |
Dave Chinner | 7492c5b | 2013-08-12 20:50:05 +1000 | [diff] [blame] | 390 | insert: |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 391 | xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 392 | } |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | /* |
| 396 | * Insert the log items into the CIL and calculate the difference in space |
| 397 | * consumed by the item. Add the space to the checkpoint ticket and calculate |
| 398 | * if the change requires additional log metadata. If it does, take that space |
Justin P. Mattock | 42b2aa8 | 2011-11-28 20:31:00 -0800 | [diff] [blame] | 399 | * as well. Remove the amount of space we added to the checkpoint ticket from |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 400 | * the current transaction ticket so that the accounting works out correctly. |
| 401 | */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 402 | static void |
Dave Chinner | 3b93c7a | 2010-08-24 11:45:53 +1000 | [diff] [blame] | 403 | xlog_cil_insert_items( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 404 | struct xlog *log, |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 405 | struct xfs_trans *tp) |
Dave Chinner | 3b93c7a | 2010-08-24 11:45:53 +1000 | [diff] [blame] | 406 | { |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 407 | struct xfs_cil *cil = log->l_cilp; |
| 408 | struct xfs_cil_ctx *ctx = cil->xc_ctx; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 409 | struct xfs_log_item_desc *lidp; |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 410 | int len = 0; |
| 411 | int diff_iovecs = 0; |
| 412 | int iclog_space; |
Dave Chinner | 3b93c7a | 2010-08-24 11:45:53 +1000 | [diff] [blame] | 413 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 414 | ASSERT(tp); |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 415 | |
| 416 | /* |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 417 | * We can do this safely because the context can't checkpoint until we |
| 418 | * are done so it doesn't matter exactly how we update the CIL. |
| 419 | */ |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 420 | xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs); |
| 421 | |
| 422 | /* |
| 423 | * Now (re-)position everything modified at the tail of the CIL. |
| 424 | * We do this here so we only need to take the CIL lock once during |
| 425 | * the transaction commit. |
| 426 | */ |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 427 | spin_lock(&cil->xc_cil_lock); |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 428 | list_for_each_entry(lidp, &tp->t_items, lid_trans) { |
| 429 | struct xfs_log_item *lip = lidp->lid_item; |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 430 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 431 | /* Skip items which aren't dirty in this transaction. */ |
| 432 | if (!(lidp->lid_flags & XFS_LID_DIRTY)) |
| 433 | continue; |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 434 | |
Brian Foster | 4703da7 | 2015-07-29 11:51:01 +1000 | [diff] [blame] | 435 | /* |
| 436 | * Only move the item if it isn't already at the tail. This is |
| 437 | * to prevent a transient list_empty() state when reinserting |
| 438 | * an item that is already the only item in the CIL. |
| 439 | */ |
| 440 | if (!list_is_last(&lip->li_cil, &cil->xc_cil)) |
| 441 | list_move_tail(&lip->li_cil, &cil->xc_cil); |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 442 | } |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 443 | |
| 444 | /* account for space used by new iovec headers */ |
| 445 | len += diff_iovecs * sizeof(xlog_op_header_t); |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 446 | ctx->nvecs += diff_iovecs; |
| 447 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 448 | /* attach the transaction to the CIL if it has any busy extents */ |
| 449 | if (!list_empty(&tp->t_busy)) |
| 450 | list_splice_init(&tp->t_busy, &ctx->busy_extents); |
| 451 | |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 452 | /* |
| 453 | * Now transfer enough transaction reservation to the context ticket |
| 454 | * for the checkpoint. The context ticket is special - the unit |
| 455 | * reservation has to grow as well as the current reservation as we |
| 456 | * steal from tickets so we can correctly determine the space used |
| 457 | * during the transaction commit. |
| 458 | */ |
| 459 | if (ctx->ticket->t_curr_res == 0) { |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 460 | ctx->ticket->t_curr_res = ctx->ticket->t_unit_res; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 461 | tp->t_ticket->t_curr_res -= ctx->ticket->t_unit_res; |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | /* do we need space for more log record headers? */ |
| 465 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; |
| 466 | if (len > 0 && (ctx->space_used / iclog_space != |
| 467 | (ctx->space_used + len) / iclog_space)) { |
| 468 | int hdrs; |
| 469 | |
| 470 | hdrs = (len + iclog_space - 1) / iclog_space; |
| 471 | /* need to take into account split region headers, too */ |
| 472 | hdrs *= log->l_iclog_hsize + sizeof(struct xlog_op_header); |
| 473 | ctx->ticket->t_unit_res += hdrs; |
| 474 | ctx->ticket->t_curr_res += hdrs; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 475 | tp->t_ticket->t_curr_res -= hdrs; |
| 476 | ASSERT(tp->t_ticket->t_curr_res >= len); |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 477 | } |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 478 | tp->t_ticket->t_curr_res -= len; |
Dave Chinner | d1583a3 | 2010-09-24 18:14:13 +1000 | [diff] [blame] | 479 | ctx->space_used += len; |
| 480 | |
| 481 | spin_unlock(&cil->xc_cil_lock); |
Dave Chinner | 3b93c7a | 2010-08-24 11:45:53 +1000 | [diff] [blame] | 482 | } |
| 483 | |
| 484 | static void |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 485 | xlog_cil_free_logvec( |
| 486 | struct xfs_log_vec *log_vector) |
| 487 | { |
| 488 | struct xfs_log_vec *lv; |
| 489 | |
| 490 | for (lv = log_vector; lv; ) { |
| 491 | struct xfs_log_vec *next = lv->lv_next; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 492 | kmem_free(lv); |
| 493 | lv = next; |
| 494 | } |
| 495 | } |
| 496 | |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 497 | static void |
| 498 | xlog_discard_endio_work( |
| 499 | struct work_struct *work) |
| 500 | { |
| 501 | struct xfs_cil_ctx *ctx = |
| 502 | container_of(work, struct xfs_cil_ctx, discard_endio_work); |
| 503 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
| 504 | |
| 505 | xfs_extent_busy_clear(mp, &ctx->busy_extents, false); |
| 506 | kmem_free(ctx); |
| 507 | } |
| 508 | |
| 509 | /* |
| 510 | * Queue up the actual completion to a thread to avoid IRQ-safe locking for |
| 511 | * pagb_lock. Note that we need a unbounded workqueue, otherwise we might |
| 512 | * get the execution delayed up to 30 seconds for weird reasons. |
| 513 | */ |
| 514 | static void |
| 515 | xlog_discard_endio( |
| 516 | struct bio *bio) |
| 517 | { |
| 518 | struct xfs_cil_ctx *ctx = bio->bi_private; |
| 519 | |
| 520 | INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); |
| 521 | queue_work(xfs_discard_wq, &ctx->discard_endio_work); |
| 522 | } |
| 523 | |
| 524 | static void |
| 525 | xlog_discard_busy_extents( |
| 526 | struct xfs_mount *mp, |
| 527 | struct xfs_cil_ctx *ctx) |
| 528 | { |
| 529 | struct list_head *list = &ctx->busy_extents; |
| 530 | struct xfs_extent_busy *busyp; |
| 531 | struct bio *bio = NULL; |
| 532 | struct blk_plug plug; |
| 533 | int error = 0; |
| 534 | |
| 535 | ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); |
| 536 | |
| 537 | blk_start_plug(&plug); |
| 538 | list_for_each_entry(busyp, list, list) { |
| 539 | trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, |
| 540 | busyp->length); |
| 541 | |
| 542 | error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, |
| 543 | XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), |
| 544 | XFS_FSB_TO_BB(mp, busyp->length), |
| 545 | GFP_NOFS, 0, &bio); |
| 546 | if (error && error != -EOPNOTSUPP) { |
| 547 | xfs_info(mp, |
| 548 | "discard failed for extent [0x%llx,%u], error %d", |
| 549 | (unsigned long long)busyp->bno, |
| 550 | busyp->length, |
| 551 | error); |
| 552 | break; |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | if (bio) { |
| 557 | bio->bi_private = ctx; |
| 558 | bio->bi_end_io = xlog_discard_endio; |
| 559 | submit_bio(bio); |
| 560 | } else { |
| 561 | xlog_discard_endio_work(&ctx->discard_endio_work); |
| 562 | } |
| 563 | blk_finish_plug(&plug); |
| 564 | } |
| 565 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 566 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 567 | * Mark all items committed and clear busy extents. We free the log vector |
| 568 | * chains in a separate pass so that we unpin the log items as quickly as |
| 569 | * possible. |
| 570 | */ |
| 571 | static void |
| 572 | xlog_cil_committed( |
| 573 | void *args, |
| 574 | int abort) |
| 575 | { |
| 576 | struct xfs_cil_ctx *ctx = args; |
Christoph Hellwig | e84661a | 2011-05-20 13:45:32 +0000 | [diff] [blame] | 577 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 578 | |
Dave Chinner | 0e57f6a | 2010-12-20 12:02:19 +1100 | [diff] [blame] | 579 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
| 580 | ctx->start_lsn, abort); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 581 | |
Dave Chinner | 4ecbfe6 | 2012-04-29 10:41:10 +0000 | [diff] [blame] | 582 | xfs_extent_busy_sort(&ctx->busy_extents); |
| 583 | xfs_extent_busy_clear(mp, &ctx->busy_extents, |
Christoph Hellwig | e84661a | 2011-05-20 13:45:32 +0000 | [diff] [blame] | 584 | (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 585 | |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 586 | /* |
| 587 | * If we are aborting the commit, wake up anyone waiting on the |
| 588 | * committing list. If we don't, then a shutdown we can leave processes |
| 589 | * waiting in xlog_cil_force_lsn() waiting on a sequence commit that |
| 590 | * will never happen because we aborted it. |
| 591 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 592 | spin_lock(&ctx->cil->xc_push_lock); |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 593 | if (abort) |
| 594 | wake_up_all(&ctx->cil->xc_commit_wait); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 595 | list_del(&ctx->committing); |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 596 | spin_unlock(&ctx->cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 597 | |
| 598 | xlog_cil_free_logvec(ctx->lv_chain); |
Christoph Hellwig | e84661a | 2011-05-20 13:45:32 +0000 | [diff] [blame] | 599 | |
Christoph Hellwig | 4560e78 | 2017-02-07 14:07:58 -0800 | [diff] [blame] | 600 | if (!list_empty(&ctx->busy_extents)) |
| 601 | xlog_discard_busy_extents(mp, ctx); |
| 602 | else |
| 603 | kmem_free(ctx); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 604 | } |
| 605 | |
| 606 | /* |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 607 | * Push the Committed Item List to the log. If @push_seq flag is zero, then it |
| 608 | * is a background flush and so we can chose to ignore it. Otherwise, if the |
| 609 | * current sequence is the same as @push_seq we need to do a flush. If |
| 610 | * @push_seq is less than the current sequence, then it has already been |
| 611 | * flushed and we don't need to do anything - the caller will wait for it to |
| 612 | * complete if necessary. |
| 613 | * |
| 614 | * @push_seq is a value rather than a flag because that allows us to do an |
| 615 | * unlocked check of the sequence number for a match. Hence we can allows log |
| 616 | * forces to run racily and not issue pushes for the same sequence twice. If we |
| 617 | * get a race between multiple pushes for the same sequence they will block on |
| 618 | * the first one and then abort, hence avoiding needless pushes. |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 619 | */ |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 620 | STATIC int |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 621 | xlog_cil_push( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 622 | struct xlog *log) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 623 | { |
| 624 | struct xfs_cil *cil = log->l_cilp; |
| 625 | struct xfs_log_vec *lv; |
| 626 | struct xfs_cil_ctx *ctx; |
| 627 | struct xfs_cil_ctx *new_ctx; |
| 628 | struct xlog_in_core *commit_iclog; |
| 629 | struct xlog_ticket *tic; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 630 | int num_iovecs; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 631 | int error = 0; |
| 632 | struct xfs_trans_header thdr; |
| 633 | struct xfs_log_iovec lhdr; |
| 634 | struct xfs_log_vec lvhdr = { NULL }; |
| 635 | xfs_lsn_t commit_lsn; |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 636 | xfs_lsn_t push_seq; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 637 | |
| 638 | if (!cil) |
| 639 | return 0; |
| 640 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 641 | new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_SLEEP|KM_NOFS); |
| 642 | new_ctx->ticket = xlog_cil_ticket_alloc(log); |
| 643 | |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 644 | down_write(&cil->xc_ctx_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 645 | ctx = cil->xc_ctx; |
| 646 | |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 647 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 648 | push_seq = cil->xc_push_seq; |
| 649 | ASSERT(push_seq <= ctx->sequence); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 650 | |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 651 | /* |
| 652 | * Check if we've anything to push. If there is nothing, then we don't |
| 653 | * move on to a new sequence number and so we have to be able to push |
| 654 | * this sequence again later. |
| 655 | */ |
| 656 | if (list_empty(&cil->xc_cil)) { |
| 657 | cil->xc_push_seq = 0; |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 658 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 659 | goto out_skip; |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 660 | } |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 661 | |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 662 | |
| 663 | /* check for a previously pushed seqeunce */ |
Dave Chinner | 8af3dcd | 2014-09-23 15:57:59 +1000 | [diff] [blame] | 664 | if (push_seq < cil->xc_ctx->sequence) { |
| 665 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | df80615 | 2010-05-17 15:52:13 +1000 | [diff] [blame] | 666 | goto out_skip; |
Dave Chinner | 8af3dcd | 2014-09-23 15:57:59 +1000 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | /* |
| 670 | * We are now going to push this context, so add it to the committing |
| 671 | * list before we do anything else. This ensures that anyone waiting on |
| 672 | * this push can easily detect the difference between a "push in |
| 673 | * progress" and "CIL is empty, nothing to do". |
| 674 | * |
| 675 | * IOWs, a wait loop can now check for: |
| 676 | * the current sequence not being found on the committing list; |
| 677 | * an empty CIL; and |
| 678 | * an unchanged sequence number |
| 679 | * to detect a push that had nothing to do and therefore does not need |
| 680 | * waiting on. If the CIL is not empty, we get put on the committing |
| 681 | * list before emptying the CIL and bumping the sequence number. Hence |
| 682 | * an empty CIL and an unchanged sequence number means we jumped out |
| 683 | * above after doing nothing. |
| 684 | * |
| 685 | * Hence the waiter will either find the commit sequence on the |
| 686 | * committing list or the sequence number will be unchanged and the CIL |
| 687 | * still dirty. In that latter case, the push has not yet started, and |
| 688 | * so the waiter will have to continue trying to check the CIL |
| 689 | * committing list until it is found. In extreme cases of delay, the |
| 690 | * sequence may fully commit between the attempts the wait makes to wait |
| 691 | * on the commit sequence. |
| 692 | */ |
| 693 | list_add(&ctx->committing, &cil->xc_committing); |
| 694 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | df80615 | 2010-05-17 15:52:13 +1000 | [diff] [blame] | 695 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 696 | /* |
| 697 | * pull all the log vectors off the items in the CIL, and |
| 698 | * remove the items from the CIL. We don't need the CIL lock |
| 699 | * here because it's only needed on the transaction commit |
| 700 | * side which is currently locked out by the flush lock. |
| 701 | */ |
| 702 | lv = NULL; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 703 | num_iovecs = 0; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 704 | while (!list_empty(&cil->xc_cil)) { |
| 705 | struct xfs_log_item *item; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 706 | |
| 707 | item = list_first_entry(&cil->xc_cil, |
| 708 | struct xfs_log_item, li_cil); |
| 709 | list_del_init(&item->li_cil); |
| 710 | if (!ctx->lv_chain) |
| 711 | ctx->lv_chain = item->li_lv; |
| 712 | else |
| 713 | lv->lv_next = item->li_lv; |
| 714 | lv = item->li_lv; |
| 715 | item->li_lv = NULL; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 716 | num_iovecs += lv->lv_niovecs; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | /* |
| 720 | * initialise the new context and attach it to the CIL. Then attach |
| 721 | * the current context to the CIL committing lsit so it can be found |
| 722 | * during log forces to extract the commit lsn of the sequence that |
| 723 | * needs to be forced. |
| 724 | */ |
| 725 | INIT_LIST_HEAD(&new_ctx->committing); |
| 726 | INIT_LIST_HEAD(&new_ctx->busy_extents); |
| 727 | new_ctx->sequence = ctx->sequence + 1; |
| 728 | new_ctx->cil = cil; |
| 729 | cil->xc_ctx = new_ctx; |
| 730 | |
| 731 | /* |
| 732 | * The switch is now done, so we can drop the context lock and move out |
| 733 | * of a shared context. We can't just go straight to the commit record, |
| 734 | * though - we need to synchronise with previous and future commits so |
| 735 | * that the commit records are correctly ordered in the log to ensure |
| 736 | * that we process items during log IO completion in the correct order. |
| 737 | * |
| 738 | * For example, if we get an EFI in one checkpoint and the EFD in the |
| 739 | * next (e.g. due to log forces), we do not want the checkpoint with |
| 740 | * the EFD to be committed before the checkpoint with the EFI. Hence |
| 741 | * we must strictly order the commit records of the checkpoints so |
| 742 | * that: a) the checkpoint callbacks are attached to the iclogs in the |
| 743 | * correct order; and b) the checkpoints are replayed in correct order |
| 744 | * in log recovery. |
| 745 | * |
| 746 | * Hence we need to add this context to the committing context list so |
| 747 | * that higher sequences will wait for us to write out a commit record |
| 748 | * before they do. |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 749 | * |
| 750 | * xfs_log_force_lsn requires us to mirror the new sequence into the cil |
| 751 | * structure atomically with the addition of this sequence to the |
| 752 | * committing list. This also ensures that we can do unlocked checks |
| 753 | * against the current sequence in log forces without risking |
| 754 | * deferencing a freed context pointer. |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 755 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 756 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 757 | cil->xc_current_sequence = new_ctx->sequence; |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 758 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 759 | up_write(&cil->xc_ctx_lock); |
| 760 | |
| 761 | /* |
| 762 | * Build a checkpoint transaction header and write it to the log to |
| 763 | * begin the transaction. We need to account for the space used by the |
| 764 | * transaction header here as it is not accounted for in xlog_write(). |
| 765 | * |
| 766 | * The LSN we need to pass to the log items on transaction commit is |
| 767 | * the LSN reported by the first log vector write. If we use the commit |
| 768 | * record lsn then we can move the tail beyond the grant write head. |
| 769 | */ |
| 770 | tic = ctx->ticket; |
| 771 | thdr.th_magic = XFS_TRANS_HEADER_MAGIC; |
| 772 | thdr.th_type = XFS_TRANS_CHECKPOINT; |
| 773 | thdr.th_tid = tic->t_tid; |
| 774 | thdr.th_num_items = num_iovecs; |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 775 | lhdr.i_addr = &thdr; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 776 | lhdr.i_len = sizeof(xfs_trans_header_t); |
| 777 | lhdr.i_type = XLOG_REG_TYPE_TRANSHDR; |
| 778 | tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t); |
| 779 | |
| 780 | lvhdr.lv_niovecs = 1; |
| 781 | lvhdr.lv_iovecp = &lhdr; |
| 782 | lvhdr.lv_next = ctx->lv_chain; |
| 783 | |
| 784 | error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); |
| 785 | if (error) |
Dave Chinner | 7db37c5 | 2011-01-27 12:02:00 +1100 | [diff] [blame] | 786 | goto out_abort_free_ticket; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 787 | |
| 788 | /* |
| 789 | * now that we've written the checkpoint into the log, strictly |
| 790 | * order the commit records so replay will get them in the right order. |
| 791 | */ |
| 792 | restart: |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 793 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 794 | list_for_each_entry(new_ctx, &cil->xc_committing, committing) { |
| 795 | /* |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 796 | * Avoid getting stuck in this loop because we were woken by the |
| 797 | * shutdown, but then went back to sleep once already in the |
| 798 | * shutdown state. |
| 799 | */ |
| 800 | if (XLOG_FORCED_SHUTDOWN(log)) { |
| 801 | spin_unlock(&cil->xc_push_lock); |
| 802 | goto out_abort_free_ticket; |
| 803 | } |
| 804 | |
| 805 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 806 | * Higher sequences will wait for this one so skip them. |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 807 | * Don't wait for our own sequence, either. |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 808 | */ |
| 809 | if (new_ctx->sequence >= ctx->sequence) |
| 810 | continue; |
| 811 | if (!new_ctx->commit_lsn) { |
| 812 | /* |
| 813 | * It is still being pushed! Wait for the push to |
| 814 | * complete, then start again from the beginning. |
| 815 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 816 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 817 | goto restart; |
| 818 | } |
| 819 | } |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 820 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 821 | |
Dave Chinner | 7db37c5 | 2011-01-27 12:02:00 +1100 | [diff] [blame] | 822 | /* xfs_log_done always frees the ticket on error. */ |
Christoph Hellwig | f78c390 | 2015-06-04 13:48:20 +1000 | [diff] [blame] | 823 | commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, false); |
Dave Chinner | 7db37c5 | 2011-01-27 12:02:00 +1100 | [diff] [blame] | 824 | if (commit_lsn == -1) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 825 | goto out_abort; |
| 826 | |
| 827 | /* attach all the transactions w/ busy extents to iclog */ |
| 828 | ctx->log_cb.cb_func = xlog_cil_committed; |
| 829 | ctx->log_cb.cb_arg = ctx; |
| 830 | error = xfs_log_notify(log->l_mp, commit_iclog, &ctx->log_cb); |
| 831 | if (error) |
| 832 | goto out_abort; |
| 833 | |
| 834 | /* |
| 835 | * now the checkpoint commit is complete and we've attached the |
| 836 | * callbacks to the iclog we can assign the commit LSN to the context |
| 837 | * and wake up anyone who is waiting for the commit to complete. |
| 838 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 839 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 840 | ctx->commit_lsn = commit_lsn; |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 841 | wake_up_all(&cil->xc_commit_wait); |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 842 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 843 | |
| 844 | /* release the hounds! */ |
| 845 | return xfs_log_release_iclog(log->l_mp, commit_iclog); |
| 846 | |
| 847 | out_skip: |
| 848 | up_write(&cil->xc_ctx_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 849 | xfs_log_ticket_put(new_ctx->ticket); |
| 850 | kmem_free(new_ctx); |
| 851 | return 0; |
| 852 | |
Dave Chinner | 7db37c5 | 2011-01-27 12:02:00 +1100 | [diff] [blame] | 853 | out_abort_free_ticket: |
| 854 | xfs_log_ticket_put(tic); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 855 | out_abort: |
| 856 | xlog_cil_committed(ctx, XFS_LI_ABORTED); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 857 | return -EIO; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 858 | } |
| 859 | |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 860 | static void |
| 861 | xlog_cil_push_work( |
| 862 | struct work_struct *work) |
| 863 | { |
| 864 | struct xfs_cil *cil = container_of(work, struct xfs_cil, |
| 865 | xc_push_work); |
| 866 | xlog_cil_push(cil->xc_log); |
| 867 | } |
| 868 | |
| 869 | /* |
| 870 | * We need to push CIL every so often so we don't cache more than we can fit in |
| 871 | * the log. The limit really is that a checkpoint can't be more than half the |
| 872 | * log (the current checkpoint is not allowed to overwrite the previous |
| 873 | * checkpoint), but commit latency and memory usage limit this to a smaller |
| 874 | * size. |
| 875 | */ |
| 876 | static void |
| 877 | xlog_cil_push_background( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 878 | struct xlog *log) |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 879 | { |
| 880 | struct xfs_cil *cil = log->l_cilp; |
| 881 | |
| 882 | /* |
| 883 | * The cil won't be empty because we are called while holding the |
| 884 | * context lock so whatever we added to the CIL will still be there |
| 885 | */ |
| 886 | ASSERT(!list_empty(&cil->xc_cil)); |
| 887 | |
| 888 | /* |
| 889 | * don't do a background push if we haven't used up all the |
| 890 | * space available yet. |
| 891 | */ |
| 892 | if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) |
| 893 | return; |
| 894 | |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 895 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 896 | if (cil->xc_push_seq < cil->xc_current_sequence) { |
| 897 | cil->xc_push_seq = cil->xc_current_sequence; |
| 898 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); |
| 899 | } |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 900 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 901 | |
| 902 | } |
| 903 | |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 904 | /* |
| 905 | * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence |
| 906 | * number that is passed. When it returns, the work will be queued for |
| 907 | * @push_seq, but it won't be completed. The caller is expected to do any |
| 908 | * waiting for push_seq to complete if it is required. |
| 909 | */ |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 910 | static void |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 911 | xlog_cil_push_now( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 912 | struct xlog *log, |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 913 | xfs_lsn_t push_seq) |
| 914 | { |
| 915 | struct xfs_cil *cil = log->l_cilp; |
| 916 | |
| 917 | if (!cil) |
| 918 | return; |
| 919 | |
| 920 | ASSERT(push_seq && push_seq <= cil->xc_current_sequence); |
| 921 | |
| 922 | /* start on any pending background push to minimise wait time on it */ |
| 923 | flush_work(&cil->xc_push_work); |
| 924 | |
| 925 | /* |
| 926 | * If the CIL is empty or we've already pushed the sequence then |
| 927 | * there's no work we need to do. |
| 928 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 929 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 930 | if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) { |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 931 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 932 | return; |
| 933 | } |
| 934 | |
| 935 | cil->xc_push_seq = push_seq; |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 936 | queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work); |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 937 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 938 | } |
| 939 | |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 940 | bool |
| 941 | xlog_cil_empty( |
| 942 | struct xlog *log) |
| 943 | { |
| 944 | struct xfs_cil *cil = log->l_cilp; |
| 945 | bool empty = false; |
| 946 | |
| 947 | spin_lock(&cil->xc_push_lock); |
| 948 | if (list_empty(&cil->xc_cil)) |
| 949 | empty = true; |
| 950 | spin_unlock(&cil->xc_push_lock); |
| 951 | return empty; |
| 952 | } |
| 953 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 954 | /* |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 955 | * Commit a transaction with the given vector to the Committed Item List. |
| 956 | * |
| 957 | * To do this, we need to format the item, pin it in memory if required and |
| 958 | * account for the space used by the transaction. Once we have done that we |
| 959 | * need to release the unused reservation for the transaction, attach the |
| 960 | * transaction to the checkpoint context so we carry the busy extents through |
| 961 | * to checkpoint completion, and then unlock all the items in the transaction. |
| 962 | * |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 963 | * Called with the context lock already held in read mode to lock out |
| 964 | * background commit, returns without it held once background commits are |
| 965 | * allowed again. |
| 966 | */ |
Jie Liu | c6f9726 | 2014-02-07 15:26:07 +1100 | [diff] [blame] | 967 | void |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 968 | xfs_log_commit_cil( |
| 969 | struct xfs_mount *mp, |
| 970 | struct xfs_trans *tp, |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 971 | xfs_lsn_t *commit_lsn, |
Christoph Hellwig | 7039331 | 2015-06-04 13:48:08 +1000 | [diff] [blame] | 972 | bool regrant) |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 973 | { |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 974 | struct xlog *log = mp->m_log; |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 975 | struct xfs_cil *cil = log->l_cilp; |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 976 | |
Dave Chinner | b1c5ebb | 2016-07-22 09:52:35 +1000 | [diff] [blame] | 977 | /* |
| 978 | * Do all necessary memory allocation before we lock the CIL. |
| 979 | * This ensures the allocation does not deadlock with a CIL |
| 980 | * push in memory reclaim (e.g. from kswapd). |
| 981 | */ |
| 982 | xlog_cil_alloc_shadow_bufs(log, tp); |
| 983 | |
Dave Chinner | f5baac3 | 2013-08-12 20:50:06 +1000 | [diff] [blame] | 984 | /* lock out background commit */ |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 985 | down_read(&cil->xc_ctx_lock); |
Dave Chinner | f5baac3 | 2013-08-12 20:50:06 +1000 | [diff] [blame] | 986 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 987 | xlog_cil_insert_items(log, tp); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 988 | |
| 989 | /* check we didn't blow the reservation */ |
| 990 | if (tp->t_ticket->t_curr_res < 0) |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 991 | xlog_print_tic_res(mp, tp->t_ticket); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 992 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 993 | tp->t_commit_lsn = cil->xc_ctx->sequence; |
| 994 | if (commit_lsn) |
| 995 | *commit_lsn = tp->t_commit_lsn; |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 996 | |
Christoph Hellwig | f78c390 | 2015-06-04 13:48:20 +1000 | [diff] [blame] | 997 | xfs_log_done(mp, tp->t_ticket, NULL, regrant); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 998 | xfs_trans_unreserve_and_mod_sb(tp); |
| 999 | |
| 1000 | /* |
| 1001 | * Once all the items of the transaction have been copied to the CIL, |
| 1002 | * the items can be unlocked and freed. |
| 1003 | * |
| 1004 | * This needs to be done before we drop the CIL context lock because we |
| 1005 | * have to update state in the log items and unlock them before they go |
| 1006 | * to disk. If we don't, then the CIL checkpoint can race with us and |
| 1007 | * we can run checkpoint completion before we've updated and unlocked |
| 1008 | * the log items. This affects (at least) processing of stale buffers, |
| 1009 | * inodes and EFIs. |
| 1010 | */ |
Christoph Hellwig | eacb24e | 2015-06-04 13:47:43 +1000 | [diff] [blame] | 1011 | xfs_trans_free_items(tp, tp->t_commit_lsn, false); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1012 | |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1013 | xlog_cil_push_background(log); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1014 | |
Dave Chinner | 991aaf6 | 2013-08-12 20:50:07 +1000 | [diff] [blame] | 1015 | up_read(&cil->xc_ctx_lock); |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1016 | } |
| 1017 | |
| 1018 | /* |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1019 | * Conditionally push the CIL based on the sequence passed in. |
| 1020 | * |
| 1021 | * We only need to push if we haven't already pushed the sequence |
| 1022 | * number given. Hence the only time we will trigger a push here is |
| 1023 | * if the push sequence is the same as the current context. |
| 1024 | * |
| 1025 | * We return the current commit lsn to allow the callers to determine if a |
| 1026 | * iclog flush is necessary following this call. |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1027 | */ |
| 1028 | xfs_lsn_t |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1029 | xlog_cil_force_lsn( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1030 | struct xlog *log, |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1031 | xfs_lsn_t sequence) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1032 | { |
| 1033 | struct xfs_cil *cil = log->l_cilp; |
| 1034 | struct xfs_cil_ctx *ctx; |
| 1035 | xfs_lsn_t commit_lsn = NULLCOMMITLSN; |
| 1036 | |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1037 | ASSERT(sequence <= cil->xc_current_sequence); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1038 | |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1039 | /* |
| 1040 | * check to see if we need to force out the current context. |
| 1041 | * xlog_cil_push() handles racing pushes for the same sequence, |
| 1042 | * so no need to deal with it here. |
| 1043 | */ |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 1044 | restart: |
| 1045 | xlog_cil_push_now(log, sequence); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1046 | |
| 1047 | /* |
| 1048 | * See if we can find a previous sequence still committing. |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1049 | * We need to wait for all previous sequence commits to complete |
| 1050 | * before allowing the force of push_seq to go ahead. Hence block |
| 1051 | * on commits for those as well. |
| 1052 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 1053 | spin_lock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1054 | list_for_each_entry(ctx, &cil->xc_committing, committing) { |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 1055 | /* |
| 1056 | * Avoid getting stuck in this loop because we were woken by the |
| 1057 | * shutdown, but then went back to sleep once already in the |
| 1058 | * shutdown state. |
| 1059 | */ |
| 1060 | if (XLOG_FORCED_SHUTDOWN(log)) |
| 1061 | goto out_shutdown; |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1062 | if (ctx->sequence > sequence) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1063 | continue; |
| 1064 | if (!ctx->commit_lsn) { |
| 1065 | /* |
| 1066 | * It is still being pushed! Wait for the push to |
| 1067 | * complete, then start again from the beginning. |
| 1068 | */ |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 1069 | xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1070 | goto restart; |
| 1071 | } |
Dave Chinner | a44f13e | 2010-08-24 11:40:03 +1000 | [diff] [blame] | 1072 | if (ctx->sequence != sequence) |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1073 | continue; |
| 1074 | /* found it! */ |
| 1075 | commit_lsn = ctx->commit_lsn; |
| 1076 | } |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 1077 | |
| 1078 | /* |
| 1079 | * The call to xlog_cil_push_now() executes the push in the background. |
| 1080 | * Hence by the time we have got here it our sequence may not have been |
| 1081 | * pushed yet. This is true if the current sequence still matches the |
| 1082 | * push sequence after the above wait loop and the CIL still contains |
Dave Chinner | 8af3dcd | 2014-09-23 15:57:59 +1000 | [diff] [blame] | 1083 | * dirty objects. This is guaranteed by the push code first adding the |
| 1084 | * context to the committing list before emptying the CIL. |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 1085 | * |
Dave Chinner | 8af3dcd | 2014-09-23 15:57:59 +1000 | [diff] [blame] | 1086 | * Hence if we don't find the context in the committing list and the |
| 1087 | * current sequence number is unchanged then the CIL contents are |
| 1088 | * significant. If the CIL is empty, if means there was nothing to push |
| 1089 | * and that means there is nothing to wait for. If the CIL is not empty, |
| 1090 | * it means we haven't yet started the push, because if it had started |
| 1091 | * we would have found the context on the committing list. |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 1092 | */ |
Dave Chinner | f876e44 | 2014-02-27 16:40:42 +1100 | [diff] [blame] | 1093 | if (sequence == cil->xc_current_sequence && |
| 1094 | !list_empty(&cil->xc_cil)) { |
| 1095 | spin_unlock(&cil->xc_push_lock); |
| 1096 | goto restart; |
| 1097 | } |
| 1098 | |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 1099 | spin_unlock(&cil->xc_push_lock); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1100 | return commit_lsn; |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 1101 | |
| 1102 | /* |
| 1103 | * We detected a shutdown in progress. We need to trigger the log force |
| 1104 | * to pass through it's iclog state machine error handling, even though |
| 1105 | * we are already in a shutdown state. Hence we can't return |
| 1106 | * NULLCOMMITLSN here as that has special meaning to log forces (i.e. |
| 1107 | * LSN is already stable), so we return a zero LSN instead. |
| 1108 | */ |
| 1109 | out_shutdown: |
| 1110 | spin_unlock(&cil->xc_push_lock); |
| 1111 | return 0; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1112 | } |
Dave Chinner | ccf7c23 | 2010-05-20 23:19:42 +1000 | [diff] [blame] | 1113 | |
| 1114 | /* |
| 1115 | * Check if the current log item was first committed in this sequence. |
| 1116 | * We can't rely on just the log item being in the CIL, we have to check |
| 1117 | * the recorded commit sequence number. |
| 1118 | * |
| 1119 | * Note: for this to be used in a non-racy manner, it has to be called with |
| 1120 | * CIL flushing locked out. As a result, it should only be used during the |
| 1121 | * transaction commit process when deciding what to format into the item. |
| 1122 | */ |
| 1123 | bool |
| 1124 | xfs_log_item_in_current_chkpt( |
| 1125 | struct xfs_log_item *lip) |
| 1126 | { |
| 1127 | struct xfs_cil_ctx *ctx; |
| 1128 | |
Dave Chinner | ccf7c23 | 2010-05-20 23:19:42 +1000 | [diff] [blame] | 1129 | if (list_empty(&lip->li_cil)) |
| 1130 | return false; |
| 1131 | |
| 1132 | ctx = lip->li_mountp->m_log->l_cilp->xc_ctx; |
| 1133 | |
| 1134 | /* |
| 1135 | * li_seq is written on the first commit of a log item to record the |
| 1136 | * first checkpoint it is written to. Hence if it is different to the |
| 1137 | * current sequence, we're in a new checkpoint. |
| 1138 | */ |
| 1139 | if (XFS_LSN_CMP(lip->li_seq, ctx->sequence) != 0) |
| 1140 | return false; |
| 1141 | return true; |
| 1142 | } |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1143 | |
| 1144 | /* |
| 1145 | * Perform initial CIL structure initialisation. |
| 1146 | */ |
| 1147 | int |
| 1148 | xlog_cil_init( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1149 | struct xlog *log) |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1150 | { |
| 1151 | struct xfs_cil *cil; |
| 1152 | struct xfs_cil_ctx *ctx; |
| 1153 | |
| 1154 | cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); |
| 1155 | if (!cil) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1156 | return -ENOMEM; |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1157 | |
| 1158 | ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); |
| 1159 | if (!ctx) { |
| 1160 | kmem_free(cil); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1161 | return -ENOMEM; |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1162 | } |
| 1163 | |
| 1164 | INIT_WORK(&cil->xc_push_work, xlog_cil_push_work); |
| 1165 | INIT_LIST_HEAD(&cil->xc_cil); |
| 1166 | INIT_LIST_HEAD(&cil->xc_committing); |
| 1167 | spin_lock_init(&cil->xc_cil_lock); |
Dave Chinner | 4bb928c | 2013-08-12 20:50:08 +1000 | [diff] [blame] | 1168 | spin_lock_init(&cil->xc_push_lock); |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1169 | init_rwsem(&cil->xc_ctx_lock); |
| 1170 | init_waitqueue_head(&cil->xc_commit_wait); |
| 1171 | |
| 1172 | INIT_LIST_HEAD(&ctx->committing); |
| 1173 | INIT_LIST_HEAD(&ctx->busy_extents); |
| 1174 | ctx->sequence = 1; |
| 1175 | ctx->cil = cil; |
| 1176 | cil->xc_ctx = ctx; |
| 1177 | cil->xc_current_sequence = ctx->sequence; |
| 1178 | |
| 1179 | cil->xc_log = log; |
| 1180 | log->l_cilp = cil; |
| 1181 | return 0; |
| 1182 | } |
| 1183 | |
| 1184 | void |
| 1185 | xlog_cil_destroy( |
Mark Tinguely | f7bdf03 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1186 | struct xlog *log) |
Dave Chinner | 4c2d542 | 2012-04-23 17:54:32 +1000 | [diff] [blame] | 1187 | { |
| 1188 | if (log->l_cilp->xc_ctx) { |
| 1189 | if (log->l_cilp->xc_ctx->ticket) |
| 1190 | xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket); |
| 1191 | kmem_free(log->l_cilp->xc_ctx); |
| 1192 | } |
| 1193 | |
| 1194 | ASSERT(list_empty(&log->l_cilp->xc_cil)); |
| 1195 | kmem_free(log->l_cilp); |
| 1196 | } |
| 1197 | |