Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
| 3 | * All Rights Reserved. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License as |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * published by the Free Software Foundation. |
| 8 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 9 | * This program is distributed in the hope that it would be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | * |
Nathan Scott | 7b71876 | 2005-11-02 14:58:39 +1100 | [diff] [blame] | 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include "xfs.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 19 | #include "xfs_fs.h" |
Dave Chinner | 70a9883 | 2013-10-23 10:36:05 +1100 | [diff] [blame] | 20 | #include "xfs_shared.h" |
Dave Chinner | a4fbe6a | 2013-10-23 10:51:50 +1100 | [diff] [blame] | 21 | #include "xfs_format.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 22 | #include "xfs_log_format.h" |
| 23 | #include "xfs_trans_resv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include "xfs_mount.h" |
| 25 | #include "xfs_error.h" |
Dave Chinner | 239880e | 2013-10-23 10:50:10 +1100 | [diff] [blame] | 26 | #include "xfs_trans.h" |
| 27 | #include "xfs_trans_priv.h" |
| 28 | #include "xfs_log.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include "xfs_log_priv.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include "xfs_log_recover.h" |
Nathan Scott | a844f45 | 2005-11-02 14:38:42 +1100 | [diff] [blame] | 31 | #include "xfs_inode.h" |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 32 | #include "xfs_trace.h" |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 33 | #include "xfs_fsops.h" |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 34 | #include "xfs_cksum.h" |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 35 | #include "xfs_sysfs.h" |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 36 | #include "xfs_sb.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
David Chinner | eb01c9c | 2008-04-10 12:18:46 +1000 | [diff] [blame] | 38 | kmem_zone_t *xfs_log_ticket_zone; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | /* Local miscellaneous function prototypes */ |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 41 | STATIC int |
| 42 | xlog_commit_record( |
| 43 | struct xlog *log, |
| 44 | struct xlog_ticket *ticket, |
| 45 | struct xlog_in_core **iclog, |
| 46 | xfs_lsn_t *commitlsnp); |
| 47 | |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 48 | STATIC struct xlog * |
| 49 | xlog_alloc_log( |
| 50 | struct xfs_mount *mp, |
| 51 | struct xfs_buftarg *log_target, |
| 52 | xfs_daddr_t blk_offset, |
| 53 | int num_bblks); |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 54 | STATIC int |
| 55 | xlog_space_left( |
| 56 | struct xlog *log, |
| 57 | atomic64_t *head); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 58 | STATIC int |
| 59 | xlog_sync( |
| 60 | struct xlog *log, |
| 61 | struct xlog_in_core *iclog); |
| 62 | STATIC void |
| 63 | xlog_dealloc_log( |
| 64 | struct xlog *log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
| 66 | /* local state machine functions */ |
| 67 | STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 68 | STATIC void |
| 69 | xlog_state_do_callback( |
| 70 | struct xlog *log, |
| 71 | int aborted, |
| 72 | struct xlog_in_core *iclog); |
| 73 | STATIC int |
| 74 | xlog_state_get_iclog_space( |
| 75 | struct xlog *log, |
| 76 | int len, |
| 77 | struct xlog_in_core **iclog, |
| 78 | struct xlog_ticket *ticket, |
| 79 | int *continued_write, |
| 80 | int *logoffsetp); |
| 81 | STATIC int |
| 82 | xlog_state_release_iclog( |
| 83 | struct xlog *log, |
| 84 | struct xlog_in_core *iclog); |
| 85 | STATIC void |
| 86 | xlog_state_switch_iclogs( |
| 87 | struct xlog *log, |
| 88 | struct xlog_in_core *iclog, |
| 89 | int eventual_size); |
| 90 | STATIC void |
| 91 | xlog_state_want_sync( |
| 92 | struct xlog *log, |
| 93 | struct xlog_in_core *iclog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 95 | STATIC void |
| 96 | xlog_grant_push_ail( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 97 | struct xlog *log, |
| 98 | int need_bytes); |
| 99 | STATIC void |
| 100 | xlog_regrant_reserve_log_space( |
| 101 | struct xlog *log, |
| 102 | struct xlog_ticket *ticket); |
| 103 | STATIC void |
| 104 | xlog_ungrant_log_space( |
| 105 | struct xlog *log, |
| 106 | struct xlog_ticket *ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 108 | #if defined(DEBUG) |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 109 | STATIC void |
| 110 | xlog_verify_dest_ptr( |
| 111 | struct xlog *log, |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 112 | void *ptr); |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 113 | STATIC void |
| 114 | xlog_verify_grant_tail( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 115 | struct xlog *log); |
| 116 | STATIC void |
| 117 | xlog_verify_iclog( |
| 118 | struct xlog *log, |
| 119 | struct xlog_in_core *iclog, |
| 120 | int count, |
Thiago Farina | 667a929 | 2012-11-12 21:32:59 -0200 | [diff] [blame] | 121 | bool syncing); |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 122 | STATIC void |
| 123 | xlog_verify_tail_lsn( |
| 124 | struct xlog *log, |
| 125 | struct xlog_in_core *iclog, |
| 126 | xfs_lsn_t tail_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | #else |
| 128 | #define xlog_verify_dest_ptr(a,b) |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 129 | #define xlog_verify_grant_tail(a) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | #define xlog_verify_iclog(a,b,c,d) |
| 131 | #define xlog_verify_tail_lsn(a,b,c) |
| 132 | #endif |
| 133 | |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 134 | STATIC int |
| 135 | xlog_iclogs_empty( |
| 136 | struct xlog *log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 138 | static void |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 139 | xlog_grant_sub_space( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 140 | struct xlog *log, |
| 141 | atomic64_t *head, |
| 142 | int bytes) |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 143 | { |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 144 | int64_t head_val = atomic64_read(head); |
| 145 | int64_t new, old; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 146 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 147 | do { |
| 148 | int cycle, space; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 149 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 150 | xlog_crack_grant_head_val(head_val, &cycle, &space); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 151 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 152 | space -= bytes; |
| 153 | if (space < 0) { |
| 154 | space += log->l_logsize; |
| 155 | cycle--; |
| 156 | } |
| 157 | |
| 158 | old = head_val; |
| 159 | new = xlog_assign_grant_head_val(cycle, space); |
| 160 | head_val = atomic64_cmpxchg(head, old, new); |
| 161 | } while (head_val != old); |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | static void |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 165 | xlog_grant_add_space( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 166 | struct xlog *log, |
| 167 | atomic64_t *head, |
| 168 | int bytes) |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 169 | { |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 170 | int64_t head_val = atomic64_read(head); |
| 171 | int64_t new, old; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 172 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 173 | do { |
| 174 | int tmp; |
| 175 | int cycle, space; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 176 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 177 | xlog_crack_grant_head_val(head_val, &cycle, &space); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 178 | |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 179 | tmp = log->l_logsize - space; |
| 180 | if (tmp > bytes) |
| 181 | space += bytes; |
| 182 | else { |
| 183 | space = bytes - tmp; |
| 184 | cycle++; |
| 185 | } |
| 186 | |
| 187 | old = head_val; |
| 188 | new = xlog_assign_grant_head_val(cycle, space); |
| 189 | head_val = atomic64_cmpxchg(head, old, new); |
| 190 | } while (head_val != old); |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 191 | } |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 192 | |
Christoph Hellwig | c303c5b | 2012-02-20 02:31:26 +0000 | [diff] [blame] | 193 | STATIC void |
| 194 | xlog_grant_head_init( |
| 195 | struct xlog_grant_head *head) |
| 196 | { |
| 197 | xlog_assign_grant_head(&head->grant, 1, 0); |
| 198 | INIT_LIST_HEAD(&head->waiters); |
| 199 | spin_lock_init(&head->lock); |
| 200 | } |
| 201 | |
Christoph Hellwig | a79bf2d | 2012-02-20 02:31:27 +0000 | [diff] [blame] | 202 | STATIC void |
| 203 | xlog_grant_head_wake_all( |
| 204 | struct xlog_grant_head *head) |
| 205 | { |
| 206 | struct xlog_ticket *tic; |
| 207 | |
| 208 | spin_lock(&head->lock); |
| 209 | list_for_each_entry(tic, &head->waiters, t_queue) |
| 210 | wake_up_process(tic->t_task); |
| 211 | spin_unlock(&head->lock); |
| 212 | } |
| 213 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 214 | static inline int |
| 215 | xlog_ticket_reservation( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 216 | struct xlog *log, |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 217 | struct xlog_grant_head *head, |
| 218 | struct xlog_ticket *tic) |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 219 | { |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 220 | if (head == &log->l_write_head) { |
| 221 | ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); |
| 222 | return tic->t_unit_res; |
| 223 | } else { |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 224 | if (tic->t_flags & XLOG_TIC_PERM_RESERV) |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 225 | return tic->t_unit_res * tic->t_cnt; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 226 | else |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 227 | return tic->t_unit_res; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 228 | } |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | STATIC bool |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 232 | xlog_grant_head_wake( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 233 | struct xlog *log, |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 234 | struct xlog_grant_head *head, |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 235 | int *free_bytes) |
| 236 | { |
| 237 | struct xlog_ticket *tic; |
| 238 | int need_bytes; |
| 239 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 240 | list_for_each_entry(tic, &head->waiters, t_queue) { |
| 241 | need_bytes = xlog_ticket_reservation(log, head, tic); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 242 | if (*free_bytes < need_bytes) |
| 243 | return false; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 244 | |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 245 | *free_bytes -= need_bytes; |
| 246 | trace_xfs_log_grant_wake_up(log, tic); |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 247 | wake_up_process(tic->t_task); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | return true; |
| 251 | } |
| 252 | |
| 253 | STATIC int |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 254 | xlog_grant_head_wait( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 255 | struct xlog *log, |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 256 | struct xlog_grant_head *head, |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 257 | struct xlog_ticket *tic, |
Dave Chinner | a30b036 | 2013-09-02 20:49:36 +1000 | [diff] [blame] | 258 | int need_bytes) __releases(&head->lock) |
| 259 | __acquires(&head->lock) |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 260 | { |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 261 | list_add_tail(&tic->t_queue, &head->waiters); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 262 | |
| 263 | do { |
| 264 | if (XLOG_FORCED_SHUTDOWN(log)) |
| 265 | goto shutdown; |
| 266 | xlog_grant_push_ail(log, need_bytes); |
| 267 | |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 268 | __set_current_state(TASK_UNINTERRUPTIBLE); |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 269 | spin_unlock(&head->lock); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 270 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 271 | XFS_STATS_INC(log->l_mp, xs_sleep_logspace); |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 272 | |
| 273 | trace_xfs_log_grant_sleep(log, tic); |
| 274 | schedule(); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 275 | trace_xfs_log_grant_wake(log, tic); |
| 276 | |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 277 | spin_lock(&head->lock); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 278 | if (XLOG_FORCED_SHUTDOWN(log)) |
| 279 | goto shutdown; |
Christoph Hellwig | 23ee3df | 2012-02-20 02:31:28 +0000 | [diff] [blame] | 280 | } while (xlog_space_left(log, &head->grant) < need_bytes); |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 281 | |
| 282 | list_del_init(&tic->t_queue); |
| 283 | return 0; |
| 284 | shutdown: |
| 285 | list_del_init(&tic->t_queue); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 286 | return -EIO; |
Christoph Hellwig | 9f9c19e | 2011-11-28 08:17:36 +0000 | [diff] [blame] | 287 | } |
| 288 | |
Christoph Hellwig | 42ceedb | 2012-02-20 02:31:30 +0000 | [diff] [blame] | 289 | /* |
| 290 | * Atomically get the log space required for a log ticket. |
| 291 | * |
| 292 | * Once a ticket gets put onto head->waiters, it will only return after the |
| 293 | * needed reservation is satisfied. |
| 294 | * |
| 295 | * This function is structured so that it has a lock free fast path. This is |
| 296 | * necessary because every new transaction reservation will come through this |
| 297 | * path. Hence any lock will be globally hot if we take it unconditionally on |
| 298 | * every pass. |
| 299 | * |
| 300 | * As tickets are only ever moved on and off head->waiters under head->lock, we |
| 301 | * only need to take that lock if we are going to add the ticket to the queue |
| 302 | * and sleep. We can avoid taking the lock if the ticket was never added to |
| 303 | * head->waiters because the t_queue list head will be empty and we hold the |
| 304 | * only reference to it so it can safely be checked unlocked. |
| 305 | */ |
| 306 | STATIC int |
| 307 | xlog_grant_head_check( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 308 | struct xlog *log, |
Christoph Hellwig | 42ceedb | 2012-02-20 02:31:30 +0000 | [diff] [blame] | 309 | struct xlog_grant_head *head, |
| 310 | struct xlog_ticket *tic, |
| 311 | int *need_bytes) |
| 312 | { |
| 313 | int free_bytes; |
| 314 | int error = 0; |
| 315 | |
| 316 | ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); |
| 317 | |
| 318 | /* |
| 319 | * If there are other waiters on the queue then give them a chance at |
| 320 | * logspace before us. Wake up the first waiters, if we do not wake |
| 321 | * up all the waiters then go to sleep waiting for more free space, |
| 322 | * otherwise try to get some space for this transaction. |
| 323 | */ |
| 324 | *need_bytes = xlog_ticket_reservation(log, head, tic); |
| 325 | free_bytes = xlog_space_left(log, &head->grant); |
| 326 | if (!list_empty_careful(&head->waiters)) { |
| 327 | spin_lock(&head->lock); |
| 328 | if (!xlog_grant_head_wake(log, head, &free_bytes) || |
| 329 | free_bytes < *need_bytes) { |
| 330 | error = xlog_grant_head_wait(log, head, tic, |
| 331 | *need_bytes); |
| 332 | } |
| 333 | spin_unlock(&head->lock); |
| 334 | } else if (free_bytes < *need_bytes) { |
| 335 | spin_lock(&head->lock); |
| 336 | error = xlog_grant_head_wait(log, head, tic, *need_bytes); |
| 337 | spin_unlock(&head->lock); |
| 338 | } |
| 339 | |
| 340 | return error; |
| 341 | } |
| 342 | |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 343 | static void |
| 344 | xlog_tic_reset_res(xlog_ticket_t *tic) |
| 345 | { |
| 346 | tic->t_res_num = 0; |
| 347 | tic->t_res_arr_sum = 0; |
| 348 | tic->t_res_num_ophdrs = 0; |
| 349 | } |
| 350 | |
| 351 | static void |
| 352 | xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type) |
| 353 | { |
| 354 | if (tic->t_res_num == XLOG_TIC_LEN_MAX) { |
| 355 | /* add to overflow and start again */ |
| 356 | tic->t_res_o_flow += tic->t_res_arr_sum; |
| 357 | tic->t_res_num = 0; |
| 358 | tic->t_res_arr_sum = 0; |
| 359 | } |
| 360 | |
| 361 | tic->t_res_arr[tic->t_res_num].r_len = len; |
| 362 | tic->t_res_arr[tic->t_res_num].r_type = type; |
| 363 | tic->t_res_arr_sum += len; |
| 364 | tic->t_res_num++; |
| 365 | } |
Christoph Hellwig | dd954c6 | 2006-01-11 15:34:50 +1100 | [diff] [blame] | 366 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | /* |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 368 | * Replenish the byte reservation required by moving the grant write head. |
| 369 | */ |
| 370 | int |
| 371 | xfs_log_regrant( |
| 372 | struct xfs_mount *mp, |
| 373 | struct xlog_ticket *tic) |
| 374 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 375 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 376 | int need_bytes; |
| 377 | int error = 0; |
| 378 | |
| 379 | if (XLOG_FORCED_SHUTDOWN(log)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 380 | return -EIO; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 381 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 382 | XFS_STATS_INC(mp, xs_try_logspace); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 383 | |
| 384 | /* |
| 385 | * This is a new transaction on the ticket, so we need to change the |
| 386 | * transaction ID so that the next transaction has a different TID in |
| 387 | * the log. Just add one to the existing tid so that we can see chains |
| 388 | * of rolling transactions in the log easily. |
| 389 | */ |
| 390 | tic->t_tid++; |
| 391 | |
| 392 | xlog_grant_push_ail(log, tic->t_unit_res); |
| 393 | |
| 394 | tic->t_curr_res = tic->t_unit_res; |
| 395 | xlog_tic_reset_res(tic); |
| 396 | |
| 397 | if (tic->t_cnt > 0) |
| 398 | return 0; |
| 399 | |
| 400 | trace_xfs_log_regrant(log, tic); |
| 401 | |
| 402 | error = xlog_grant_head_check(log, &log->l_write_head, tic, |
| 403 | &need_bytes); |
| 404 | if (error) |
| 405 | goto out_error; |
| 406 | |
| 407 | xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); |
| 408 | trace_xfs_log_regrant_exit(log, tic); |
| 409 | xlog_verify_grant_tail(log); |
| 410 | return 0; |
| 411 | |
| 412 | out_error: |
| 413 | /* |
| 414 | * If we are failing, make sure the ticket doesn't have any current |
| 415 | * reservations. We don't want to add this back when the ticket/ |
| 416 | * transaction gets cancelled. |
| 417 | */ |
| 418 | tic->t_curr_res = 0; |
| 419 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ |
| 420 | return error; |
| 421 | } |
| 422 | |
| 423 | /* |
| 424 | * Reserve log space and return a ticket corresponding the reservation. |
| 425 | * |
| 426 | * Each reservation is going to reserve extra space for a log record header. |
| 427 | * When writes happen to the on-disk log, we don't subtract the length of the |
| 428 | * log record header from any reservation. By wasting space in each |
| 429 | * reservation, we prevent over allocation problems. |
| 430 | */ |
| 431 | int |
| 432 | xfs_log_reserve( |
| 433 | struct xfs_mount *mp, |
| 434 | int unit_bytes, |
| 435 | int cnt, |
| 436 | struct xlog_ticket **ticp, |
| 437 | __uint8_t client, |
Christoph Hellwig | 710b1e2 | 2016-04-06 09:20:36 +1000 | [diff] [blame] | 438 | bool permanent) |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 439 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 440 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 441 | struct xlog_ticket *tic; |
| 442 | int need_bytes; |
| 443 | int error = 0; |
| 444 | |
| 445 | ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); |
| 446 | |
| 447 | if (XLOG_FORCED_SHUTDOWN(log)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 448 | return -EIO; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 449 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 450 | XFS_STATS_INC(mp, xs_try_logspace); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 451 | |
| 452 | ASSERT(*ticp == NULL); |
| 453 | tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, |
| 454 | KM_SLEEP | KM_MAYFAIL); |
| 455 | if (!tic) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 456 | return -ENOMEM; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 457 | |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 458 | *ticp = tic; |
| 459 | |
Dave Chinner | 437a255 | 2012-11-28 13:01:00 +1100 | [diff] [blame] | 460 | xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt |
| 461 | : tic->t_unit_res); |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 462 | |
| 463 | trace_xfs_log_reserve(log, tic); |
| 464 | |
| 465 | error = xlog_grant_head_check(log, &log->l_reserve_head, tic, |
| 466 | &need_bytes); |
| 467 | if (error) |
| 468 | goto out_error; |
| 469 | |
| 470 | xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); |
| 471 | xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); |
| 472 | trace_xfs_log_reserve_exit(log, tic); |
| 473 | xlog_verify_grant_tail(log); |
| 474 | return 0; |
| 475 | |
| 476 | out_error: |
| 477 | /* |
| 478 | * If we are failing, make sure the ticket doesn't have any current |
| 479 | * reservations. We don't want to add this back when the ticket/ |
| 480 | * transaction gets cancelled. |
| 481 | */ |
| 482 | tic->t_curr_res = 0; |
| 483 | tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ |
| 484 | return error; |
| 485 | } |
| 486 | |
| 487 | |
| 488 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | * NOTES: |
| 490 | * |
| 491 | * 1. currblock field gets updated at startup and after in-core logs |
| 492 | * marked as with WANT_SYNC. |
| 493 | */ |
| 494 | |
| 495 | /* |
| 496 | * This routine is called when a user of a log manager ticket is done with |
| 497 | * the reservation. If the ticket was ever used, then a commit record for |
| 498 | * the associated transaction is written out as a log operation header with |
| 499 | * no data. The flag XLOG_TIC_INITED is set when the first write occurs with |
| 500 | * a given ticket. If the ticket was one with a permanent reservation, then |
| 501 | * a few operations are done differently. Permanent reservation tickets by |
| 502 | * default don't release the reservation. They just commit the current |
| 503 | * transaction with the belief that the reservation is still needed. A flag |
| 504 | * must be passed in before permanent reservations are actually released. |
| 505 | * When these type of tickets are not released, they need to be set into |
| 506 | * the inited state again. By doing this, a start record will be written |
| 507 | * out when the next write occurs. |
| 508 | */ |
| 509 | xfs_lsn_t |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 510 | xfs_log_done( |
| 511 | struct xfs_mount *mp, |
| 512 | struct xlog_ticket *ticket, |
| 513 | struct xlog_in_core **iclog, |
Christoph Hellwig | f78c390 | 2015-06-04 13:48:20 +1000 | [diff] [blame] | 514 | bool regrant) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 516 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 517 | xfs_lsn_t lsn = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | if (XLOG_FORCED_SHUTDOWN(log) || |
| 520 | /* |
| 521 | * If nothing was ever written, don't write out commit record. |
| 522 | * If we get an error, just continue and give back the log ticket. |
| 523 | */ |
| 524 | (((ticket->t_flags & XLOG_TIC_INITED) == 0) && |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 525 | (xlog_commit_record(log, ticket, iclog, &lsn)))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | lsn = (xfs_lsn_t) -1; |
Christoph Hellwig | f78c390 | 2015-06-04 13:48:20 +1000 | [diff] [blame] | 527 | regrant = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | } |
| 529 | |
| 530 | |
Christoph Hellwig | f78c390 | 2015-06-04 13:48:20 +1000 | [diff] [blame] | 531 | if (!regrant) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 532 | trace_xfs_log_done_nonperm(log, ticket); |
| 533 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | /* |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 535 | * Release ticket if not permanent reservation or a specific |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | * request has been made to release a permanent reservation. |
| 537 | */ |
| 538 | xlog_ungrant_log_space(log, ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | } else { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 540 | trace_xfs_log_done_perm(log, ticket); |
| 541 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | xlog_regrant_reserve_log_space(log, ticket); |
Lachlan McIlroy | c6a7b0f | 2008-08-13 16:52:50 +1000 | [diff] [blame] | 543 | /* If this ticket was a permanent reservation and we aren't |
| 544 | * trying to release it, reset the inited flags; so next time |
| 545 | * we write, a start record will be written out. |
| 546 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | ticket->t_flags |= XLOG_TIC_INITED; |
Lachlan McIlroy | c6a7b0f | 2008-08-13 16:52:50 +1000 | [diff] [blame] | 548 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | |
Christoph Hellwig | f78c390 | 2015-06-04 13:48:20 +1000 | [diff] [blame] | 550 | xfs_log_ticket_put(ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | return lsn; |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 552 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 553 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | /* |
| 555 | * Attaches a new iclog I/O completion callback routine during |
| 556 | * transaction commit. If the log is in error state, a non-zero |
| 557 | * return code is handed back and the caller is responsible for |
| 558 | * executing the callback at an appropriate time. |
| 559 | */ |
| 560 | int |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 561 | xfs_log_notify( |
| 562 | struct xfs_mount *mp, |
| 563 | struct xlog_in_core *iclog, |
| 564 | xfs_log_callback_t *cb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 566 | int abortflg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 568 | spin_lock(&iclog->ic_callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); |
| 570 | if (!abortflg) { |
| 571 | ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || |
| 572 | (iclog->ic_state == XLOG_STATE_WANT_SYNC)); |
| 573 | cb->cb_next = NULL; |
| 574 | *(iclog->ic_callback_tail) = cb; |
| 575 | iclog->ic_callback_tail = &(cb->cb_next); |
| 576 | } |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 577 | spin_unlock(&iclog->ic_callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | return abortflg; |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 579 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | |
| 581 | int |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 582 | xfs_log_release_iclog( |
| 583 | struct xfs_mount *mp, |
| 584 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 585 | { |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 586 | if (xlog_state_release_iclog(mp->m_log, iclog)) { |
Nathan Scott | 7d04a33 | 2006-06-09 14:58:38 +1000 | [diff] [blame] | 587 | xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 588 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | return 0; |
| 592 | } |
| 593 | |
| 594 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | * Mount a log filesystem |
| 596 | * |
| 597 | * mp - ubiquitous xfs mount point structure |
| 598 | * log_target - buftarg of on-disk log device |
| 599 | * blk_offset - Start block # where block size is 512 bytes (BBSIZE) |
| 600 | * num_bblocks - Number of BBSIZE blocks in on-disk log |
| 601 | * |
| 602 | * Return error or zero. |
| 603 | */ |
| 604 | int |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 605 | xfs_log_mount( |
| 606 | xfs_mount_t *mp, |
| 607 | xfs_buftarg_t *log_target, |
| 608 | xfs_daddr_t blk_offset, |
| 609 | int num_bblks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | { |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 611 | int error = 0; |
| 612 | int min_logfsbs; |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 613 | |
Dave Chinner | c99d609 | 2014-05-05 16:18:37 +1000 | [diff] [blame] | 614 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { |
| 615 | xfs_notice(mp, "Mounting V%d Filesystem", |
| 616 | XFS_SB_VERSION_NUM(&mp->m_sb)); |
| 617 | } else { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 618 | xfs_notice(mp, |
Dave Chinner | c99d609 | 2014-05-05 16:18:37 +1000 | [diff] [blame] | 619 | "Mounting V%d filesystem in no-recovery mode. Filesystem will be inconsistent.", |
| 620 | XFS_SB_VERSION_NUM(&mp->m_sb)); |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 621 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 625 | if (IS_ERR(mp->m_log)) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 626 | error = PTR_ERR(mp->m_log); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 627 | goto out; |
| 628 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | /* |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 631 | * Validate the given log space and drop a critical message via syslog |
| 632 | * if the log size is too small that would lead to some unexpected |
| 633 | * situations in transaction log space reservation stage. |
| 634 | * |
| 635 | * Note: we can't just reject the mount if the validation fails. This |
| 636 | * would mean that people would have to downgrade their kernel just to |
| 637 | * remedy the situation as there is no way to grow the log (short of |
| 638 | * black magic surgery with xfs_db). |
| 639 | * |
| 640 | * We can, however, reject mounts for CRC format filesystems, as the |
| 641 | * mkfs binary being used to make the filesystem should never create a |
| 642 | * filesystem with a log that is too small. |
| 643 | */ |
| 644 | min_logfsbs = xfs_log_calc_minimum_size(mp); |
| 645 | |
| 646 | if (mp->m_sb.sb_logblocks < min_logfsbs) { |
| 647 | xfs_warn(mp, |
| 648 | "Log size %d blocks too small, minimum size is %d blocks", |
| 649 | mp->m_sb.sb_logblocks, min_logfsbs); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 650 | error = -EINVAL; |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 651 | } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { |
| 652 | xfs_warn(mp, |
| 653 | "Log size %d blocks too large, maximum size is %lld blocks", |
| 654 | mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 655 | error = -EINVAL; |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 656 | } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { |
| 657 | xfs_warn(mp, |
| 658 | "log size %lld bytes too large, maximum size is %lld bytes", |
| 659 | XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), |
| 660 | XFS_MAX_LOG_BYTES); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 661 | error = -EINVAL; |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 662 | } |
| 663 | if (error) { |
| 664 | if (xfs_sb_version_hascrc(&mp->m_sb)) { |
| 665 | xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); |
| 666 | ASSERT(0); |
| 667 | goto out_free_log; |
| 668 | } |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 669 | xfs_crit(mp, "Log size out of supported range."); |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 670 | xfs_crit(mp, |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 671 | "Continuing onwards, but if log hangs are experienced then please report this message in the bug report."); |
Jie Liu | 3e7b91c | 2013-08-12 20:50:03 +1000 | [diff] [blame] | 672 | } |
| 673 | |
| 674 | /* |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 675 | * Initialize the AIL now we have a log. |
| 676 | */ |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 677 | error = xfs_trans_ail_init(mp); |
| 678 | if (error) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 679 | xfs_warn(mp, "AIL initialisation failed: error %d", error); |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 680 | goto out_free_log; |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 681 | } |
David Chinner | a9c21c1 | 2008-10-30 17:39:35 +1100 | [diff] [blame] | 682 | mp->m_log->l_ailp = mp->m_ail; |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 683 | |
| 684 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 685 | * skip log recovery on a norecovery mount. pretend it all |
| 686 | * just worked. |
| 687 | */ |
| 688 | if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 689 | int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | |
| 691 | if (readonly) |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 692 | mp->m_flags &= ~XFS_MOUNT_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | |
Eric Sandeen | 65be605 | 2006-01-11 15:34:19 +1100 | [diff] [blame] | 694 | error = xlog_recover(mp->m_log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | |
| 696 | if (readonly) |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 697 | mp->m_flags |= XFS_MOUNT_RDONLY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | if (error) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 699 | xfs_warn(mp, "log mount/recovery failed: error %d", |
| 700 | error); |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 701 | xlog_recover_cancel(mp->m_log); |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 702 | goto out_destroy_ail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | } |
| 704 | } |
| 705 | |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 706 | error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, |
| 707 | "log"); |
| 708 | if (error) |
| 709 | goto out_destroy_ail; |
| 710 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | /* Normal transactions can now occur */ |
| 712 | mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; |
| 713 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 714 | /* |
| 715 | * Now the log has been fully initialised and we know were our |
| 716 | * space grant counters are, we can initialise the permanent ticket |
| 717 | * needed for delayed logging to work. |
| 718 | */ |
| 719 | xlog_cil_init_post_recovery(mp->m_log); |
| 720 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 721 | return 0; |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 722 | |
| 723 | out_destroy_ail: |
| 724 | xfs_trans_ail_destroy(mp); |
| 725 | out_free_log: |
| 726 | xlog_dealloc_log(mp->m_log); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 727 | out: |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 728 | return error; |
Christoph Hellwig | 2643075 | 2009-02-12 19:55:48 +0100 | [diff] [blame] | 729 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | |
| 731 | /* |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 732 | * Finish the recovery of the file system. This is separate from the |
| 733 | * xfs_log_mount() call, because it depends on the code in xfs_mountfs() to read |
| 734 | * in the root and real-time bitmap inodes between calling xfs_log_mount() and |
| 735 | * here. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | * |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 737 | * If we finish recovery successfully, start the background log work. If we are |
| 738 | * not doing recovery, then we have a RO filesystem and we don't need to start |
| 739 | * it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | */ |
| 741 | int |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 742 | xfs_log_mount_finish( |
| 743 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | { |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 745 | int error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 747 | if (mp->m_flags & XFS_MOUNT_NORECOVERY) { |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 748 | ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 749 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | } |
| 751 | |
Brian Foster | f0b2efa | 2015-08-19 09:58:36 +1000 | [diff] [blame] | 752 | error = xlog_recover_finish(mp->m_log); |
| 753 | if (!error) |
| 754 | xfs_log_work_queue(mp); |
| 755 | |
| 756 | return error; |
| 757 | } |
| 758 | |
| 759 | /* |
| 760 | * The mount has failed. Cancel the recovery if it hasn't completed and destroy |
| 761 | * the log. |
| 762 | */ |
| 763 | int |
| 764 | xfs_log_mount_cancel( |
| 765 | struct xfs_mount *mp) |
| 766 | { |
| 767 | int error; |
| 768 | |
| 769 | error = xlog_recover_cancel(mp->m_log); |
| 770 | xfs_log_unmount(mp); |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 771 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | return error; |
| 773 | } |
| 774 | |
| 775 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | * Final log writes as part of unmount. |
| 777 | * |
| 778 | * Mark the filesystem clean as unmount happens. Note that during relocation |
| 779 | * this routine needs to be executed as part of source-bag while the |
| 780 | * deallocation must not be done until source-end. |
| 781 | */ |
| 782 | |
| 783 | /* |
| 784 | * Unmount record used to have a string "Unmount filesystem--" in the |
| 785 | * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE). |
| 786 | * We just write the magic number now since that particular field isn't |
Zhi Yong Wu | 8e159e7 | 2013-08-12 03:15:00 +0000 | [diff] [blame] | 787 | * currently architecture converted and "Unmount" is a bit foo. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | * As far as I know, there weren't any dependencies on the old behaviour. |
| 789 | */ |
| 790 | |
| 791 | int |
| 792 | xfs_log_unmount_write(xfs_mount_t *mp) |
| 793 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 794 | struct xlog *log = mp->m_log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 795 | xlog_in_core_t *iclog; |
| 796 | #ifdef DEBUG |
| 797 | xlog_in_core_t *first_iclog; |
| 798 | #endif |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 799 | xlog_ticket_t *tic = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | xfs_lsn_t lsn; |
| 801 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | /* |
| 804 | * Don't write out unmount record on read-only mounts. |
| 805 | * Or, if we are doing a forced umount (typically because of IO errors). |
| 806 | */ |
Christoph Hellwig | bd186aa | 2007-08-30 17:21:12 +1000 | [diff] [blame] | 807 | if (mp->m_flags & XFS_MOUNT_RDONLY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | return 0; |
| 809 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 810 | error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL); |
David Chinner | b911ca0 | 2008-04-10 12:24:30 +1000 | [diff] [blame] | 811 | ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | |
| 813 | #ifdef DEBUG |
| 814 | first_iclog = iclog = log->l_iclog; |
| 815 | do { |
| 816 | if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { |
| 817 | ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE); |
| 818 | ASSERT(iclog->ic_offset == 0); |
| 819 | } |
| 820 | iclog = iclog->ic_next; |
| 821 | } while (iclog != first_iclog); |
| 822 | #endif |
| 823 | if (! (XLOG_FORCED_SHUTDOWN(log))) { |
Christoph Hellwig | 710b1e2 | 2016-04-06 09:20:36 +1000 | [diff] [blame] | 824 | error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | if (!error) { |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 826 | /* the data section must be 32 bit size aligned */ |
| 827 | struct { |
| 828 | __uint16_t magic; |
| 829 | __uint16_t pad1; |
| 830 | __uint32_t pad2; /* may as well make it 64 bits */ |
| 831 | } magic = { |
| 832 | .magic = XLOG_UNMOUNT_TYPE, |
| 833 | }; |
| 834 | struct xfs_log_iovec reg = { |
Christoph Hellwig | 4e0d5f9 | 2010-06-23 18:11:15 +1000 | [diff] [blame] | 835 | .i_addr = &magic, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 836 | .i_len = sizeof(magic), |
| 837 | .i_type = XLOG_REG_TYPE_UNMOUNT, |
| 838 | }; |
| 839 | struct xfs_log_vec vec = { |
| 840 | .lv_niovecs = 1, |
| 841 | .lv_iovecp = ®, |
| 842 | }; |
| 843 | |
Dave Chinner | 3948659 | 2012-03-22 05:15:11 +0000 | [diff] [blame] | 844 | /* remove inited flag, and account for space used */ |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 845 | tic->t_flags = 0; |
Dave Chinner | 3948659 | 2012-03-22 05:15:11 +0000 | [diff] [blame] | 846 | tic->t_curr_res -= sizeof(magic); |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 847 | error = xlog_write(log, &vec, tic, &lsn, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | NULL, XLOG_UNMOUNT_TRANS); |
| 849 | /* |
| 850 | * At this point, we're umounting anyway, |
| 851 | * so there's no point in transitioning log state |
| 852 | * to IOERROR. Just continue... |
| 853 | */ |
| 854 | } |
| 855 | |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 856 | if (error) |
| 857 | xfs_alert(mp, "%s: unmount record failed", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | |
| 859 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 860 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | iclog = log->l_iclog; |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 862 | atomic_inc(&iclog->ic_refcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | xlog_state_want_sync(log, iclog); |
Christoph Hellwig | 39e2def | 2008-12-03 12:20:28 +0100 | [diff] [blame] | 864 | spin_unlock(&log->l_icloglock); |
David Chinner | 1bb7d6b | 2008-04-10 12:24:38 +1000 | [diff] [blame] | 865 | error = xlog_state_release_iclog(log, iclog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 867 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | if (!(iclog->ic_state == XLOG_STATE_ACTIVE || |
| 869 | iclog->ic_state == XLOG_STATE_DIRTY)) { |
| 870 | if (!XLOG_FORCED_SHUTDOWN(log)) { |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 871 | xlog_wait(&iclog->ic_force_wait, |
| 872 | &log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | } else { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 874 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | } |
| 876 | } else { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 877 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | } |
Tim Shimmin | 955e47a | 2006-09-28 11:04:16 +1000 | [diff] [blame] | 879 | if (tic) { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 880 | trace_xfs_log_umount_write(log, tic); |
Tim Shimmin | 955e47a | 2006-09-28 11:04:16 +1000 | [diff] [blame] | 881 | xlog_ungrant_log_space(log, tic); |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 882 | xfs_log_ticket_put(tic); |
Tim Shimmin | 955e47a | 2006-09-28 11:04:16 +1000 | [diff] [blame] | 883 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | } else { |
| 885 | /* |
| 886 | * We're already in forced_shutdown mode, couldn't |
| 887 | * even attempt to write out the unmount transaction. |
| 888 | * |
| 889 | * Go through the motions of sync'ing and releasing |
| 890 | * the iclog, even though no I/O will actually happen, |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 891 | * we need to wait for other log I/Os that may already |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | * be in progress. Do this as a separate section of |
| 893 | * code so we'll know if we ever get stuck here that |
| 894 | * we're in this odd situation of trying to unmount |
| 895 | * a file system that went into forced_shutdown as |
| 896 | * the result of an unmount.. |
| 897 | */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 898 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | iclog = log->l_iclog; |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 900 | atomic_inc(&iclog->ic_refcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | |
| 902 | xlog_state_want_sync(log, iclog); |
Christoph Hellwig | 39e2def | 2008-12-03 12:20:28 +0100 | [diff] [blame] | 903 | spin_unlock(&log->l_icloglock); |
David Chinner | 1bb7d6b | 2008-04-10 12:24:38 +1000 | [diff] [blame] | 904 | error = xlog_state_release_iclog(log, iclog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 906 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 907 | |
| 908 | if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE |
| 909 | || iclog->ic_state == XLOG_STATE_DIRTY |
| 910 | || iclog->ic_state == XLOG_STATE_IOERROR) ) { |
| 911 | |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 912 | xlog_wait(&iclog->ic_force_wait, |
| 913 | &log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | } else { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 915 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | } |
| 917 | } |
| 918 | |
David Chinner | 1bb7d6b | 2008-04-10 12:24:38 +1000 | [diff] [blame] | 919 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | } /* xfs_log_unmount_write */ |
| 921 | |
| 922 | /* |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 923 | * Empty the log for unmount/freeze. |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 924 | * |
| 925 | * To do this, we first need to shut down the background log work so it is not |
| 926 | * trying to cover the log as we clean up. We then need to unpin all objects in |
| 927 | * the log so we can then flush them out. Once they have completed their IO and |
| 928 | * run the callbacks removing themselves from the AIL, we can write the unmount |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 929 | * record. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | */ |
| 931 | void |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 932 | xfs_log_quiesce( |
| 933 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 | { |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 935 | cancel_delayed_work_sync(&mp->m_log->l_work); |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 936 | xfs_log_force(mp, XFS_LOG_SYNC); |
| 937 | |
| 938 | /* |
| 939 | * The superblock buffer is uncached and while xfs_ail_push_all_sync() |
| 940 | * will push it, xfs_wait_buftarg() will not wait for it. Further, |
| 941 | * xfs_buf_iowait() cannot be used because it was pushed with the |
| 942 | * XBF_ASYNC flag set, so we need to use a lock/unlock pair to wait for |
| 943 | * the IO to complete. |
| 944 | */ |
| 945 | xfs_ail_push_all_sync(mp->m_ail); |
| 946 | xfs_wait_buftarg(mp->m_ddev_targp); |
| 947 | xfs_buf_lock(mp->m_sb_bp); |
| 948 | xfs_buf_unlock(mp->m_sb_bp); |
| 949 | |
| 950 | xfs_log_unmount_write(mp); |
Dave Chinner | c75921a | 2012-10-08 21:56:08 +1100 | [diff] [blame] | 951 | } |
| 952 | |
| 953 | /* |
| 954 | * Shut down and release the AIL and Log. |
| 955 | * |
| 956 | * During unmount, we need to ensure we flush all the dirty metadata objects |
| 957 | * from the AIL so that the log is empty before we write the unmount record to |
| 958 | * the log. Once this is done, we can tear down the AIL and the log. |
| 959 | */ |
| 960 | void |
| 961 | xfs_log_unmount( |
| 962 | struct xfs_mount *mp) |
| 963 | { |
| 964 | xfs_log_quiesce(mp); |
Dave Chinner | cf2931d | 2012-10-08 21:56:03 +1100 | [diff] [blame] | 965 | |
David Chinner | 249a8c1 | 2008-02-05 12:13:32 +1100 | [diff] [blame] | 966 | xfs_trans_ail_destroy(mp); |
Brian Foster | baff4e4 | 2014-07-15 08:07:29 +1000 | [diff] [blame] | 967 | |
| 968 | xfs_sysfs_del(&mp->m_log->l_kobj); |
| 969 | |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 970 | xlog_dealloc_log(mp->m_log); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | } |
| 972 | |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 973 | void |
| 974 | xfs_log_item_init( |
| 975 | struct xfs_mount *mp, |
| 976 | struct xfs_log_item *item, |
| 977 | int type, |
Christoph Hellwig | 272e42b | 2011-10-28 09:54:24 +0000 | [diff] [blame] | 978 | const struct xfs_item_ops *ops) |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 979 | { |
| 980 | item->li_mountp = mp; |
| 981 | item->li_ailp = mp->m_ail; |
| 982 | item->li_type = type; |
| 983 | item->li_ops = ops; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 984 | item->li_lv = NULL; |
| 985 | |
| 986 | INIT_LIST_HEAD(&item->li_ail); |
| 987 | INIT_LIST_HEAD(&item->li_cil); |
Dave Chinner | 43f5efc | 2010-03-23 10:10:00 +1100 | [diff] [blame] | 988 | } |
| 989 | |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 990 | /* |
| 991 | * Wake up processes waiting for log space after we have moved the log tail. |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 992 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | void |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 994 | xfs_log_space_wake( |
Christoph Hellwig | cfb7cdc | 2012-02-20 02:31:23 +0000 | [diff] [blame] | 995 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 997 | struct xlog *log = mp->m_log; |
Christoph Hellwig | cfb7cdc | 2012-02-20 02:31:23 +0000 | [diff] [blame] | 998 | int free_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | if (XLOG_FORCED_SHUTDOWN(log)) |
| 1001 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1003 | if (!list_empty_careful(&log->l_write_head.waiters)) { |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1004 | ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); |
| 1005 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1006 | spin_lock(&log->l_write_head.lock); |
| 1007 | free_bytes = xlog_space_left(log, &log->l_write_head.grant); |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 1008 | xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1009 | spin_unlock(&log->l_write_head.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1010 | } |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 1011 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1012 | if (!list_empty_careful(&log->l_reserve_head.waiters)) { |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1013 | ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); |
| 1014 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1015 | spin_lock(&log->l_reserve_head.lock); |
| 1016 | free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); |
Christoph Hellwig | e179840d | 2012-02-20 02:31:29 +0000 | [diff] [blame] | 1017 | xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1018 | spin_unlock(&log->l_reserve_head.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | } |
Dave Chinner | 3f16b98 | 2010-12-21 12:29:01 +1100 | [diff] [blame] | 1020 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | |
| 1022 | /* |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1023 | * Determine if we have a transaction that has gone to disk that needs to be |
| 1024 | * covered. To begin the transition to the idle state firstly the log needs to |
| 1025 | * be idle. That means the CIL, the AIL and the iclogs needs to be empty before |
| 1026 | * we start attempting to cover the log. |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1027 | * |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1028 | * Only if we are then in a state where covering is needed, the caller is |
| 1029 | * informed that dummy transactions are required to move the log into the idle |
| 1030 | * state. |
| 1031 | * |
| 1032 | * If there are any items in the AIl or CIL, then we do not want to attempt to |
| 1033 | * cover the log as we may be in a situation where there isn't log space |
| 1034 | * available to run a dummy transaction and this can lead to deadlocks when the |
| 1035 | * tail of the log is pinned by an item that is modified in the CIL. Hence |
| 1036 | * there's no point in running a dummy transaction at this point because we |
| 1037 | * can't start trying to idle the log until both the CIL and AIL are empty. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | */ |
| 1039 | int |
| 1040 | xfs_log_need_covered(xfs_mount_t *mp) |
| 1041 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1042 | struct xlog *log = mp->m_log; |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1043 | int needed = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | |
Brian Foster | 91ee575 | 2014-11-28 14:02:59 +1100 | [diff] [blame] | 1045 | if (!xfs_fs_writable(mp, SB_FREEZE_WRITE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | return 0; |
| 1047 | |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1048 | if (!xlog_cil_empty(log)) |
| 1049 | return 0; |
| 1050 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 1051 | spin_lock(&log->l_icloglock); |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1052 | switch (log->l_covered_state) { |
| 1053 | case XLOG_STATE_COVER_DONE: |
| 1054 | case XLOG_STATE_COVER_DONE2: |
| 1055 | case XLOG_STATE_COVER_IDLE: |
| 1056 | break; |
| 1057 | case XLOG_STATE_COVER_NEED: |
| 1058 | case XLOG_STATE_COVER_NEED2: |
Dave Chinner | 2c6e24c | 2013-10-15 09:17:49 +1100 | [diff] [blame] | 1059 | if (xfs_ail_min_lsn(log->l_ailp)) |
| 1060 | break; |
| 1061 | if (!xlog_iclogs_empty(log)) |
| 1062 | break; |
| 1063 | |
| 1064 | needed = 1; |
| 1065 | if (log->l_covered_state == XLOG_STATE_COVER_NEED) |
| 1066 | log->l_covered_state = XLOG_STATE_COVER_DONE; |
| 1067 | else |
| 1068 | log->l_covered_state = XLOG_STATE_COVER_DONE2; |
| 1069 | break; |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1070 | default: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | needed = 1; |
Dave Chinner | b6f8dd4 | 2010-04-13 15:06:44 +1000 | [diff] [blame] | 1072 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | } |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 1074 | spin_unlock(&log->l_icloglock); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1075 | return needed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | } |
| 1077 | |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1078 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | * We may be holding the log iclog lock upon entering this routine. |
| 1080 | */ |
| 1081 | xfs_lsn_t |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1082 | xlog_assign_tail_lsn_locked( |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1083 | struct xfs_mount *mp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1084 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1085 | struct xlog *log = mp->m_log; |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1086 | struct xfs_log_item *lip; |
| 1087 | xfs_lsn_t tail_lsn; |
| 1088 | |
| 1089 | assert_spin_locked(&mp->m_ail->xa_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1091 | /* |
| 1092 | * To make sure we always have a valid LSN for the log tail we keep |
| 1093 | * track of the last LSN which was committed in log->l_last_sync_lsn, |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1094 | * and use that when the AIL was empty. |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 1095 | */ |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1096 | lip = xfs_ail_min(mp->m_ail); |
| 1097 | if (lip) |
| 1098 | tail_lsn = lip->li_lsn; |
| 1099 | else |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1100 | tail_lsn = atomic64_read(&log->l_last_sync_lsn); |
Dave Chinner | 750b9c9 | 2013-11-01 15:27:18 +1100 | [diff] [blame] | 1101 | trace_xfs_log_assign_tail_lsn(log, tail_lsn); |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1102 | atomic64_set(&log->l_tail_lsn, tail_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | return tail_lsn; |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1104 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | |
Christoph Hellwig | 1c30462 | 2012-04-23 15:58:33 +1000 | [diff] [blame] | 1106 | xfs_lsn_t |
| 1107 | xlog_assign_tail_lsn( |
| 1108 | struct xfs_mount *mp) |
| 1109 | { |
| 1110 | xfs_lsn_t tail_lsn; |
| 1111 | |
| 1112 | spin_lock(&mp->m_ail->xa_lock); |
| 1113 | tail_lsn = xlog_assign_tail_lsn_locked(mp); |
| 1114 | spin_unlock(&mp->m_ail->xa_lock); |
| 1115 | |
| 1116 | return tail_lsn; |
| 1117 | } |
| 1118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | /* |
| 1120 | * Return the space in the log between the tail and the head. The head |
| 1121 | * is passed in the cycle/bytes formal parms. In the special case where |
| 1122 | * the reserve head has wrapped passed the tail, this calculation is no |
| 1123 | * longer valid. In this case, just return 0 which means there is no space |
| 1124 | * in the log. This works for all places where this function is called |
| 1125 | * with the reserve head. Of course, if the write head were to ever |
| 1126 | * wrap the tail, we should blow up. Rather than catch this case here, |
| 1127 | * we depend on other ASSERTions in other parts of the code. XXXmiken |
| 1128 | * |
| 1129 | * This code also handles the case where the reservation head is behind |
| 1130 | * the tail. The details of this case are described below, but the end |
| 1131 | * result is that we return the size of the log as the amount of space left. |
| 1132 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 1133 | STATIC int |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1134 | xlog_space_left( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1135 | struct xlog *log, |
Dave Chinner | c8a09ff | 2010-12-04 00:02:40 +1100 | [diff] [blame] | 1136 | atomic64_t *head) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1137 | { |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1138 | int free_bytes; |
| 1139 | int tail_bytes; |
| 1140 | int tail_cycle; |
| 1141 | int head_cycle; |
| 1142 | int head_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1144 | xlog_crack_grant_head(head, &head_cycle, &head_bytes); |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1145 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); |
| 1146 | tail_bytes = BBTOB(tail_bytes); |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1147 | if (tail_cycle == head_cycle && head_bytes >= tail_bytes) |
| 1148 | free_bytes = log->l_logsize - (head_bytes - tail_bytes); |
| 1149 | else if (tail_cycle + 1 < head_cycle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | return 0; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1151 | else if (tail_cycle < head_cycle) { |
| 1152 | ASSERT(tail_cycle == (head_cycle - 1)); |
| 1153 | free_bytes = tail_bytes - head_bytes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | } else { |
| 1155 | /* |
| 1156 | * The reservation head is behind the tail. |
| 1157 | * In this case we just want to return the size of the |
| 1158 | * log as the amount of space left. |
| 1159 | */ |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 1160 | xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1161 | xfs_alert(log->l_mp, |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 1162 | " tail_cycle = %d, tail_bytes = %d", |
| 1163 | tail_cycle, tail_bytes); |
| 1164 | xfs_alert(log->l_mp, |
| 1165 | " GH cycle = %d, GH bytes = %d", |
| 1166 | head_cycle, head_bytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | ASSERT(0); |
| 1168 | free_bytes = log->l_logsize; |
| 1169 | } |
| 1170 | return free_bytes; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 1171 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1172 | |
| 1173 | |
| 1174 | /* |
| 1175 | * Log function which is called when an io completes. |
| 1176 | * |
| 1177 | * The log manager needs its own routine, in order to control what |
| 1178 | * happens with the buffer after the write completes. |
| 1179 | */ |
| 1180 | void |
| 1181 | xlog_iodone(xfs_buf_t *bp) |
| 1182 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1183 | struct xlog_in_core *iclog = bp->b_fspriv; |
| 1184 | struct xlog *l = iclog->ic_log; |
| 1185 | int aborted = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 | |
| 1187 | /* |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 1188 | * Race to shutdown the filesystem if we see an error or the iclog is in |
| 1189 | * IOABORT state. The IOABORT state is only set in DEBUG mode to inject |
| 1190 | * CRC errors into log recovery. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | */ |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 1192 | if (XFS_TEST_ERROR(bp->b_error, l->l_mp, XFS_ERRTAG_IODONE_IOERR, |
| 1193 | XFS_RANDOM_IODONE_IOERR) || |
| 1194 | iclog->ic_state & XLOG_STATE_IOABORT) { |
| 1195 | if (iclog->ic_state & XLOG_STATE_IOABORT) |
| 1196 | iclog->ic_state &= ~XLOG_STATE_IOABORT; |
| 1197 | |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 1198 | xfs_buf_ioerror_alert(bp, __func__); |
Christoph Hellwig | c867cb6 | 2011-10-10 16:52:46 +0000 | [diff] [blame] | 1199 | xfs_buf_stale(bp); |
Nathan Scott | 7d04a33 | 2006-06-09 14:58:38 +1000 | [diff] [blame] | 1200 | xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | /* |
| 1202 | * This flag will be propagated to the trans-committed |
| 1203 | * callback routines to let them know that the log-commit |
| 1204 | * didn't succeed. |
| 1205 | */ |
| 1206 | aborted = XFS_LI_ABORTED; |
| 1207 | } else if (iclog->ic_state & XLOG_STATE_IOERROR) { |
| 1208 | aborted = XFS_LI_ABORTED; |
| 1209 | } |
David Chinner | 3db296f | 2007-05-14 18:24:16 +1000 | [diff] [blame] | 1210 | |
| 1211 | /* log I/O is always issued ASYNC */ |
Dave Chinner | 1157b32c | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 1212 | ASSERT(bp->b_flags & XBF_ASYNC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1213 | xlog_state_done_syncing(iclog, aborted); |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1214 | |
David Chinner | 3db296f | 2007-05-14 18:24:16 +1000 | [diff] [blame] | 1215 | /* |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1216 | * drop the buffer lock now that we are done. Nothing references |
| 1217 | * the buffer after this, so an unmount waiting on this lock can now |
| 1218 | * tear it down safely. As such, it is unsafe to reference the buffer |
| 1219 | * (bp) after the unlock as we could race with it being freed. |
David Chinner | 3db296f | 2007-05-14 18:24:16 +1000 | [diff] [blame] | 1220 | */ |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1221 | xfs_buf_unlock(bp); |
Dave Chinner | c3f8fc7 | 2012-11-12 22:54:01 +1100 | [diff] [blame] | 1222 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1223 | |
| 1224 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | * Return size of each in-core log record buffer. |
| 1226 | * |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1227 | * All machines get 8 x 32kB buffers by default, unless tuned otherwise. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | * |
| 1229 | * If the filesystem blocksize is too large, we may need to choose a |
| 1230 | * larger size since the directory code currently logs entire blocks. |
| 1231 | */ |
| 1232 | |
| 1233 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1234 | xlog_get_iclog_buffer_size( |
| 1235 | struct xfs_mount *mp, |
| 1236 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1237 | { |
| 1238 | int size; |
| 1239 | int xhdrs; |
| 1240 | |
Eric Sandeen | 1cb5125 | 2007-08-16 16:24:43 +1000 | [diff] [blame] | 1241 | if (mp->m_logbufs <= 0) |
| 1242 | log->l_iclog_bufs = XLOG_MAX_ICLOGS; |
| 1243 | else |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 1244 | log->l_iclog_bufs = mp->m_logbufs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | |
| 1246 | /* |
| 1247 | * Buffer size passed in from mount system call. |
| 1248 | */ |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 1249 | if (mp->m_logbsize > 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1250 | size = log->l_iclog_size = mp->m_logbsize; |
| 1251 | log->l_iclog_size_log = 0; |
| 1252 | while (size != 1) { |
| 1253 | log->l_iclog_size_log++; |
| 1254 | size >>= 1; |
| 1255 | } |
| 1256 | |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1257 | if (xfs_sb_version_haslogv2(&mp->m_sb)) { |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1258 | /* # headers = size / 32k |
| 1259 | * one header holds cycles from 32k of data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | */ |
| 1261 | |
| 1262 | xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE; |
| 1263 | if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE) |
| 1264 | xhdrs++; |
| 1265 | log->l_iclog_hsize = xhdrs << BBSHIFT; |
| 1266 | log->l_iclog_heads = xhdrs; |
| 1267 | } else { |
| 1268 | ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE); |
| 1269 | log->l_iclog_hsize = BBSIZE; |
| 1270 | log->l_iclog_heads = 1; |
| 1271 | } |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 1272 | goto done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1273 | } |
| 1274 | |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 1275 | /* All machines use 32kB buffers by default. */ |
Eric Sandeen | 1cb5125 | 2007-08-16 16:24:43 +1000 | [diff] [blame] | 1276 | log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; |
| 1277 | log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | |
| 1279 | /* the default log size is 16k or 32k which is one header sector */ |
| 1280 | log->l_iclog_hsize = BBSIZE; |
| 1281 | log->l_iclog_heads = 1; |
| 1282 | |
Christoph Hellwig | 7153f8b | 2009-02-09 08:36:46 +0100 | [diff] [blame] | 1283 | done: |
| 1284 | /* are we being asked to make the sizes selected above visible? */ |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 1285 | if (mp->m_logbufs == 0) |
| 1286 | mp->m_logbufs = log->l_iclog_bufs; |
| 1287 | if (mp->m_logbsize == 0) |
| 1288 | mp->m_logbsize = log->l_iclog_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | } /* xlog_get_iclog_buffer_size */ |
| 1290 | |
| 1291 | |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1292 | void |
| 1293 | xfs_log_work_queue( |
| 1294 | struct xfs_mount *mp) |
| 1295 | { |
Dave Chinner | 5889608 | 2012-10-08 21:56:05 +1100 | [diff] [blame] | 1296 | queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work, |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1297 | msecs_to_jiffies(xfs_syncd_centisecs * 10)); |
| 1298 | } |
| 1299 | |
| 1300 | /* |
| 1301 | * Every sync period we need to unpin all items in the AIL and push them to |
| 1302 | * disk. If there is nothing dirty, then we might need to cover the log to |
| 1303 | * indicate that the filesystem is idle. |
| 1304 | */ |
| 1305 | void |
| 1306 | xfs_log_worker( |
| 1307 | struct work_struct *work) |
| 1308 | { |
| 1309 | struct xlog *log = container_of(to_delayed_work(work), |
| 1310 | struct xlog, l_work); |
| 1311 | struct xfs_mount *mp = log->l_mp; |
| 1312 | |
| 1313 | /* dgc: errors ignored - not fatal and nowhere to report them */ |
Dave Chinner | 61e63ec | 2015-01-22 09:10:31 +1100 | [diff] [blame] | 1314 | if (xfs_log_need_covered(mp)) { |
| 1315 | /* |
| 1316 | * Dump a transaction into the log that contains no real change. |
| 1317 | * This is needed to stamp the current tail LSN into the log |
| 1318 | * during the covering operation. |
| 1319 | * |
| 1320 | * We cannot use an inode here for this - that will push dirty |
| 1321 | * state back up into the VFS and then periodic inode flushing |
| 1322 | * will prevent log covering from making progress. Hence we |
| 1323 | * synchronously log the superblock instead to ensure the |
| 1324 | * superblock is immediately unpinned and can be written back. |
| 1325 | */ |
| 1326 | xfs_sync_sb(mp, true); |
| 1327 | } else |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1328 | xfs_log_force(mp, 0); |
| 1329 | |
| 1330 | /* start pushing all the metadata that is currently dirty */ |
| 1331 | xfs_ail_push_all(mp->m_ail); |
| 1332 | |
| 1333 | /* queue us up again */ |
| 1334 | xfs_log_work_queue(mp); |
| 1335 | } |
| 1336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | /* |
| 1338 | * This routine initializes some of the log structure for a given mount point. |
| 1339 | * Its primary purpose is to fill in enough, so recovery can occur. However, |
| 1340 | * some other stuff may be filled in too. |
| 1341 | */ |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1342 | STATIC struct xlog * |
| 1343 | xlog_alloc_log( |
| 1344 | struct xfs_mount *mp, |
| 1345 | struct xfs_buftarg *log_target, |
| 1346 | xfs_daddr_t blk_offset, |
| 1347 | int num_bblks) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1349 | struct xlog *log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | xlog_rec_header_t *head; |
| 1351 | xlog_in_core_t **iclogp; |
| 1352 | xlog_in_core_t *iclog, *prev_iclog=NULL; |
| 1353 | xfs_buf_t *bp; |
| 1354 | int i; |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1355 | int error = -ENOMEM; |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1356 | uint log2_size = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1358 | log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1359 | if (!log) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1360 | xfs_warn(mp, "Log allocation failed: No memory!"); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1361 | goto out; |
| 1362 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 | |
| 1364 | log->l_mp = mp; |
| 1365 | log->l_targ = log_target; |
| 1366 | log->l_logsize = BBTOB(num_bblks); |
| 1367 | log->l_logBBstart = blk_offset; |
| 1368 | log->l_logBBsize = num_bblks; |
| 1369 | log->l_covered_state = XLOG_STATE_COVER_IDLE; |
| 1370 | log->l_flags |= XLOG_ACTIVE_RECOVERY; |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 1371 | INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | |
| 1373 | log->l_prev_block = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1375 | xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); |
| 1376 | xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ |
Christoph Hellwig | c303c5b | 2012-02-20 02:31:26 +0000 | [diff] [blame] | 1378 | |
| 1379 | xlog_grant_head_init(&log->l_reserve_head); |
| 1380 | xlog_grant_head_init(&log->l_write_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1382 | error = -EFSCORRUPTED; |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1383 | if (xfs_sb_version_hassector(&mp->m_sb)) { |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1384 | log2_size = mp->m_sb.sb_logsectlog; |
| 1385 | if (log2_size < BBSHIFT) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1386 | xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", |
| 1387 | log2_size, BBSHIFT); |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1388 | goto out_free_log; |
| 1389 | } |
| 1390 | |
| 1391 | log2_size -= BBSHIFT; |
| 1392 | if (log2_size > mp->m_sectbb_log) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1393 | xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", |
| 1394 | log2_size, mp->m_sectbb_log); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1395 | goto out_free_log; |
| 1396 | } |
| 1397 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 | /* for larger sector sizes, must have v2 or external log */ |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1399 | if (log2_size && log->l_logBBstart > 0 && |
| 1400 | !xfs_sb_version_haslogv2(&mp->m_sb)) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 1401 | xfs_warn(mp, |
| 1402 | "log sector size (0x%x) invalid for configuration.", |
| 1403 | log2_size); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1404 | goto out_free_log; |
| 1405 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1406 | } |
Alex Elder | 69ce58f | 2010-04-20 17:09:59 +1000 | [diff] [blame] | 1407 | log->l_sectBBsize = 1 << log2_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | |
| 1409 | xlog_get_iclog_buffer_size(mp, log); |
| 1410 | |
Dave Chinner | 400b9d8 | 2014-08-04 12:42:40 +1000 | [diff] [blame] | 1411 | /* |
| 1412 | * Use a NULL block for the extra log buffer used during splits so that |
| 1413 | * it will trigger errors if we ever try to do IO on it without first |
| 1414 | * having set it up properly. |
| 1415 | */ |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1416 | error = -ENOMEM; |
Dave Chinner | 400b9d8 | 2014-08-04 12:42:40 +1000 | [diff] [blame] | 1417 | bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, |
| 1418 | BTOBB(log->l_iclog_size), 0); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1419 | if (!bp) |
| 1420 | goto out_free_log; |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1421 | |
| 1422 | /* |
| 1423 | * The iclogbuf buffer locks are held over IO but we are not going to do |
| 1424 | * IO yet. Hence unlock the buffer so that the log IO path can grab it |
| 1425 | * when appropriately. |
| 1426 | */ |
Christoph Hellwig | 0c842ad | 2011-07-08 14:36:19 +0200 | [diff] [blame] | 1427 | ASSERT(xfs_buf_islocked(bp)); |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1428 | xfs_buf_unlock(bp); |
| 1429 | |
Brian Foster | 96ab795 | 2014-12-24 09:46:23 +1100 | [diff] [blame] | 1430 | /* use high priority wq for log I/O completion */ |
| 1431 | bp->b_ioend_wq = mp->m_log_workqueue; |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1432 | bp->b_iodone = xlog_iodone; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1433 | log->l_xbuf = bp; |
| 1434 | |
Eric Sandeen | 007c61c | 2007-10-11 17:43:56 +1000 | [diff] [blame] | 1435 | spin_lock_init(&log->l_icloglock); |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 1436 | init_waitqueue_head(&log->l_flush_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1437 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 | iclogp = &log->l_iclog; |
| 1439 | /* |
| 1440 | * The amount of memory to allocate for the iclog structure is |
| 1441 | * rather funky due to the way the structure is defined. It is |
| 1442 | * done this way so that we can use different sizes for machines |
| 1443 | * with different amounts of memory. See the definition of |
| 1444 | * xlog_in_core_t in xfs_log_priv.h for details. |
| 1445 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 | ASSERT(log->l_iclog_size >= 4096); |
| 1447 | for (i=0; i < log->l_iclog_bufs; i++) { |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1448 | *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); |
| 1449 | if (!*iclogp) |
| 1450 | goto out_free_iclog; |
| 1451 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1452 | iclog = *iclogp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1453 | iclog->ic_prev = prev_iclog; |
| 1454 | prev_iclog = iclog; |
Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 1455 | |
Dave Chinner | 686865f | 2010-09-24 20:07:47 +1000 | [diff] [blame] | 1456 | bp = xfs_buf_get_uncached(mp->m_logdev_targp, |
Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 1457 | BTOBB(log->l_iclog_size), 0); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1458 | if (!bp) |
| 1459 | goto out_free_iclog; |
Christoph Hellwig | c8da0fa | 2011-07-08 14:36:25 +0200 | [diff] [blame] | 1460 | |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1461 | ASSERT(xfs_buf_islocked(bp)); |
| 1462 | xfs_buf_unlock(bp); |
| 1463 | |
Brian Foster | 96ab795 | 2014-12-24 09:46:23 +1100 | [diff] [blame] | 1464 | /* use high priority wq for log I/O completion */ |
| 1465 | bp->b_ioend_wq = mp->m_log_workqueue; |
Christoph Hellwig | cb669ca | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 1466 | bp->b_iodone = xlog_iodone; |
Christoph Hellwig | 1fa40b0 | 2007-05-14 18:23:50 +1000 | [diff] [blame] | 1467 | iclog->ic_bp = bp; |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 1468 | iclog->ic_data = bp->b_addr; |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 1469 | #ifdef DEBUG |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 1470 | log->l_iclog_bak[i] = &iclog->ic_header; |
David Chinner | 4679b2d | 2008-04-10 12:18:54 +1000 | [diff] [blame] | 1471 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1472 | head = &iclog->ic_header; |
| 1473 | memset(head, 0, sizeof(xlog_rec_header_t)); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1474 | head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); |
| 1475 | head->h_version = cpu_to_be32( |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1476 | xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1477 | head->h_size = cpu_to_be32(log->l_iclog_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1478 | /* new fields */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1479 | head->h_fmt = cpu_to_be32(XLOG_FMT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); |
| 1481 | |
Dave Chinner | 4e94b71 | 2012-04-23 15:58:51 +1000 | [diff] [blame] | 1482 | iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1483 | iclog->ic_state = XLOG_STATE_ACTIVE; |
| 1484 | iclog->ic_log = log; |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 1485 | atomic_set(&iclog->ic_refcnt, 0); |
| 1486 | spin_lock_init(&iclog->ic_callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | iclog->ic_callback_tail = &(iclog->ic_callback); |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 1488 | iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 1490 | init_waitqueue_head(&iclog->ic_force_wait); |
| 1491 | init_waitqueue_head(&iclog->ic_write_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1492 | |
| 1493 | iclogp = &iclog->ic_next; |
| 1494 | } |
| 1495 | *iclogp = log->l_iclog; /* complete ring */ |
| 1496 | log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ |
| 1497 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1498 | error = xlog_cil_init(log); |
| 1499 | if (error) |
| 1500 | goto out_free_iclog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | return log; |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1502 | |
| 1503 | out_free_iclog: |
| 1504 | for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { |
| 1505 | prev_iclog = iclog->ic_next; |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 1506 | if (iclog->ic_bp) |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1507 | xfs_buf_free(iclog->ic_bp); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1508 | kmem_free(iclog); |
| 1509 | } |
| 1510 | spinlock_destroy(&log->l_icloglock); |
Dave Chinner | 644c356 | 2008-11-10 16:50:24 +1100 | [diff] [blame] | 1511 | xfs_buf_free(log->l_xbuf); |
| 1512 | out_free_log: |
| 1513 | kmem_free(log); |
Dave Chinner | a6cb767 | 2009-04-06 18:39:27 +0200 | [diff] [blame] | 1514 | out: |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1515 | return ERR_PTR(error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1516 | } /* xlog_alloc_log */ |
| 1517 | |
| 1518 | |
| 1519 | /* |
| 1520 | * Write out the commit record of a transaction associated with the given |
| 1521 | * ticket. Return the lsn of the commit record. |
| 1522 | */ |
| 1523 | STATIC int |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 1524 | xlog_commit_record( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1525 | struct xlog *log, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 1526 | struct xlog_ticket *ticket, |
| 1527 | struct xlog_in_core **iclog, |
| 1528 | xfs_lsn_t *commitlsnp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | { |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 1530 | struct xfs_mount *mp = log->l_mp; |
| 1531 | int error; |
| 1532 | struct xfs_log_iovec reg = { |
| 1533 | .i_addr = NULL, |
| 1534 | .i_len = 0, |
| 1535 | .i_type = XLOG_REG_TYPE_COMMIT, |
| 1536 | }; |
| 1537 | struct xfs_log_vec vec = { |
| 1538 | .lv_niovecs = 1, |
| 1539 | .lv_iovecp = ®, |
| 1540 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1541 | |
| 1542 | ASSERT_ALWAYS(iclog); |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 1543 | error = xlog_write(log, &vec, ticket, commitlsnp, iclog, |
| 1544 | XLOG_COMMIT_TRANS); |
| 1545 | if (error) |
Nathan Scott | 7d04a33 | 2006-06-09 14:58:38 +1000 | [diff] [blame] | 1546 | xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1547 | return error; |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 1548 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | |
| 1550 | /* |
| 1551 | * Push on the buffer cache code if we ever use more than 75% of the on-disk |
| 1552 | * log space. This code pushes on the lsn which would supposedly free up |
| 1553 | * the 25% which we want to leave free. We may need to adopt a policy which |
| 1554 | * pushes on an lsn which is further along in the log once we reach the high |
| 1555 | * water mark. In this manner, we would be creating a low water mark. |
| 1556 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 1557 | STATIC void |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1558 | xlog_grant_push_ail( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 1559 | struct xlog *log, |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1560 | int need_bytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1561 | { |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1562 | xfs_lsn_t threshold_lsn = 0; |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1563 | xfs_lsn_t last_sync_lsn; |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1564 | int free_blocks; |
| 1565 | int free_bytes; |
| 1566 | int threshold_block; |
| 1567 | int threshold_cycle; |
| 1568 | int free_threshold; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1569 | |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1570 | ASSERT(BTOBB(need_bytes) < log->l_logBBsize); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1571 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1572 | free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1573 | free_blocks = BTOBBT(free_bytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1575 | /* |
| 1576 | * Set the threshold for the minimum number of free blocks in the |
| 1577 | * log to the maximum of what the caller needs, one quarter of the |
| 1578 | * log, and 256 blocks. |
| 1579 | */ |
| 1580 | free_threshold = BTOBB(need_bytes); |
| 1581 | free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); |
| 1582 | free_threshold = MAX(free_threshold, 256); |
| 1583 | if (free_blocks >= free_threshold) |
| 1584 | return; |
| 1585 | |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 1586 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, |
| 1587 | &threshold_block); |
| 1588 | threshold_block += free_threshold; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1589 | if (threshold_block >= log->l_logBBsize) { |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1590 | threshold_block -= log->l_logBBsize; |
| 1591 | threshold_cycle += 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | } |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1593 | threshold_lsn = xlog_assign_lsn(threshold_cycle, |
| 1594 | threshold_block); |
| 1595 | /* |
| 1596 | * Don't pass in an lsn greater than the lsn of the last |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1597 | * log record known to be on disk. Use a snapshot of the last sync lsn |
| 1598 | * so that it doesn't change between the compare and the set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | */ |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 1600 | last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); |
| 1601 | if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) |
| 1602 | threshold_lsn = last_sync_lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1604 | /* |
| 1605 | * Get the transaction layer to kick the dirty buffers out to |
| 1606 | * disk asynchronously. No point in trying to do this if |
| 1607 | * the filesystem is shutting down. |
| 1608 | */ |
| 1609 | if (!XLOG_FORCED_SHUTDOWN(log)) |
Dave Chinner | fd07484 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 1610 | xfs_ail_push(log->l_ailp, threshold_lsn); |
Dave Chinner | 2ced19c | 2010-12-21 12:09:20 +1100 | [diff] [blame] | 1611 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 | |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1613 | /* |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1614 | * Stamp cycle number in every block |
| 1615 | */ |
| 1616 | STATIC void |
| 1617 | xlog_pack_data( |
| 1618 | struct xlog *log, |
| 1619 | struct xlog_in_core *iclog, |
| 1620 | int roundoff) |
| 1621 | { |
| 1622 | int i, j, k; |
| 1623 | int size = iclog->ic_offset + roundoff; |
| 1624 | __be32 cycle_lsn; |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 1625 | char *dp; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1626 | |
| 1627 | cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); |
| 1628 | |
| 1629 | dp = iclog->ic_datap; |
| 1630 | for (i = 0; i < BTOBB(size); i++) { |
| 1631 | if (i >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) |
| 1632 | break; |
| 1633 | iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; |
| 1634 | *(__be32 *)dp = cycle_lsn; |
| 1635 | dp += BBSIZE; |
| 1636 | } |
| 1637 | |
| 1638 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { |
| 1639 | xlog_in_core_2_t *xhdr = iclog->ic_data; |
| 1640 | |
| 1641 | for ( ; i < BTOBB(size); i++) { |
| 1642 | j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 1643 | k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 1644 | xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; |
| 1645 | *(__be32 *)dp = cycle_lsn; |
| 1646 | dp += BBSIZE; |
| 1647 | } |
| 1648 | |
| 1649 | for (i = 1; i < log->l_iclog_heads; i++) |
| 1650 | xhdr[i].hic_xheader.xh_cycle = cycle_lsn; |
| 1651 | } |
| 1652 | } |
| 1653 | |
| 1654 | /* |
| 1655 | * Calculate the checksum for a log buffer. |
| 1656 | * |
| 1657 | * This is a little more complicated than it should be because the various |
| 1658 | * headers and the actual data are non-contiguous. |
| 1659 | */ |
Dave Chinner | f9668a0 | 2012-11-28 13:01:03 +1100 | [diff] [blame] | 1660 | __le32 |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1661 | xlog_cksum( |
| 1662 | struct xlog *log, |
| 1663 | struct xlog_rec_header *rhead, |
| 1664 | char *dp, |
| 1665 | int size) |
| 1666 | { |
| 1667 | __uint32_t crc; |
| 1668 | |
| 1669 | /* first generate the crc for the record header ... */ |
| 1670 | crc = xfs_start_cksum((char *)rhead, |
| 1671 | sizeof(struct xlog_rec_header), |
| 1672 | offsetof(struct xlog_rec_header, h_crc)); |
| 1673 | |
| 1674 | /* ... then for additional cycle data for v2 logs ... */ |
| 1675 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { |
| 1676 | union xlog_in_core2 *xhdr = (union xlog_in_core2 *)rhead; |
| 1677 | int i; |
Brian Foster | a3f2001 | 2015-08-19 09:59:50 +1000 | [diff] [blame] | 1678 | int xheads; |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1679 | |
Brian Foster | a3f2001 | 2015-08-19 09:59:50 +1000 | [diff] [blame] | 1680 | xheads = size / XLOG_HEADER_CYCLE_SIZE; |
| 1681 | if (size % XLOG_HEADER_CYCLE_SIZE) |
| 1682 | xheads++; |
| 1683 | |
| 1684 | for (i = 1; i < xheads; i++) { |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1685 | crc = crc32c(crc, &xhdr[i].hic_xheader, |
| 1686 | sizeof(struct xlog_rec_ext_header)); |
| 1687 | } |
| 1688 | } |
| 1689 | |
| 1690 | /* ... and finally for the payload */ |
| 1691 | crc = crc32c(crc, dp, size); |
| 1692 | |
| 1693 | return xfs_end_cksum(crc); |
| 1694 | } |
| 1695 | |
| 1696 | /* |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1697 | * The bdstrat callback function for log bufs. This gives us a central |
| 1698 | * place to trap bufs in case we get hit by a log I/O error and need to |
| 1699 | * shutdown. Actually, in practice, even when we didn't get a log error, |
| 1700 | * we transition the iclogs to IOERROR state *after* flushing all existing |
| 1701 | * iclogs to disk. This is because we don't want anymore new transactions to be |
| 1702 | * started or completed afterwards. |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1703 | * |
| 1704 | * We lock the iclogbufs here so that we can serialise against IO completion |
| 1705 | * during unmount. We might be processing a shutdown triggered during unmount, |
| 1706 | * and that can occur asynchronously to the unmount thread, and hence we need to |
| 1707 | * ensure that completes before tearing down the iclogbufs. Hence we need to |
| 1708 | * hold the buffer lock across the log IO to acheive that. |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1709 | */ |
| 1710 | STATIC int |
| 1711 | xlog_bdstrat( |
| 1712 | struct xfs_buf *bp) |
| 1713 | { |
Christoph Hellwig | adadbee | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 1714 | struct xlog_in_core *iclog = bp->b_fspriv; |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1715 | |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1716 | xfs_buf_lock(bp); |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1717 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 1718 | xfs_buf_ioerror(bp, -EIO); |
Christoph Hellwig | c867cb6 | 2011-10-10 16:52:46 +0000 | [diff] [blame] | 1719 | xfs_buf_stale(bp); |
Dave Chinner | e8aaba9 | 2014-10-02 09:04:22 +1000 | [diff] [blame] | 1720 | xfs_buf_ioend(bp); |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1721 | /* |
| 1722 | * It would seem logical to return EIO here, but we rely on |
| 1723 | * the log state machine to propagate I/O errors instead of |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1724 | * doing it here. Similarly, IO completion will unlock the |
| 1725 | * buffer, so we don't do it here. |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1726 | */ |
| 1727 | return 0; |
| 1728 | } |
| 1729 | |
Dave Chinner | 595bff7 | 2014-10-02 09:05:14 +1000 | [diff] [blame] | 1730 | xfs_buf_submit(bp); |
Christoph Hellwig | 873ff55 | 2010-01-13 22:17:57 +0000 | [diff] [blame] | 1731 | return 0; |
| 1732 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | |
| 1734 | /* |
| 1735 | * Flush out the in-core log (iclog) to the on-disk log in an asynchronous |
| 1736 | * fashion. Previously, we should have moved the current iclog |
| 1737 | * ptr in the log to point to the next available iclog. This allows further |
| 1738 | * write to continue while this code syncs out an iclog ready to go. |
| 1739 | * Before an in-core log can be written out, the data section must be scanned |
| 1740 | * to save away the 1st word of each BBSIZE block into the header. We replace |
| 1741 | * it with the current cycle count. Each BBSIZE block is tagged with the |
| 1742 | * cycle count because there in an implicit assumption that drives will |
| 1743 | * guarantee that entire 512 byte blocks get written at once. In other words, |
| 1744 | * we can't have part of a 512 byte block written and part not written. By |
| 1745 | * tagging each block, we will know which blocks are valid when recovering |
| 1746 | * after an unclean shutdown. |
| 1747 | * |
| 1748 | * This routine is single threaded on the iclog. No other thread can be in |
| 1749 | * this routine with the same iclog. Changing contents of iclog can there- |
| 1750 | * fore be done without grabbing the state machine lock. Updating the global |
| 1751 | * log will require grabbing the lock though. |
| 1752 | * |
| 1753 | * The entire log manager uses a logical block numbering scheme. Only |
| 1754 | * log_sync (and then only bwrite()) know about the fact that the log may |
| 1755 | * not start with block zero on a given device. The log block start offset |
| 1756 | * is added immediately before calling bwrite(). |
| 1757 | */ |
| 1758 | |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 1759 | STATIC int |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1760 | xlog_sync( |
| 1761 | struct xlog *log, |
| 1762 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | xfs_buf_t *bp; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1765 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1766 | uint count; /* byte count of bwrite */ |
| 1767 | uint count_init; /* initial count before roundup */ |
| 1768 | int roundoff; /* roundoff to BB or stripe */ |
| 1769 | int split = 0; /* split write into two regions */ |
| 1770 | int error; |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 1771 | int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1772 | int size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1773 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 1774 | XFS_STATS_INC(log->l_mp, xs_log_writes); |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 1775 | ASSERT(atomic_read(&iclog->ic_refcnt) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | |
| 1777 | /* Add for LR header */ |
| 1778 | count_init = log->l_iclog_hsize + iclog->ic_offset; |
| 1779 | |
| 1780 | /* Round out the log write size */ |
| 1781 | if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { |
| 1782 | /* we have a v2 stripe unit to use */ |
| 1783 | count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); |
| 1784 | } else { |
| 1785 | count = BBTOB(BTOBB(count_init)); |
| 1786 | } |
| 1787 | roundoff = count - count_init; |
| 1788 | ASSERT(roundoff >= 0); |
| 1789 | ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && |
| 1790 | roundoff < log->l_mp->m_sb.sb_logsunit) |
| 1791 | || |
| 1792 | (log->l_mp->m_sb.sb_logsunit <= 1 && |
| 1793 | roundoff < BBTOB(1))); |
| 1794 | |
| 1795 | /* move grant heads by roundoff in sync */ |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 1796 | xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); |
| 1797 | xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | |
| 1799 | /* put cycle number in every block */ |
| 1800 | xlog_pack_data(log, iclog, roundoff); |
| 1801 | |
| 1802 | /* real byte length */ |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1803 | size = iclog->ic_offset; |
| 1804 | if (v2) |
| 1805 | size += roundoff; |
| 1806 | iclog->ic_header.h_len = cpu_to_be32(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1807 | |
Nathan Scott | f5faad7 | 2006-07-28 17:04:44 +1000 | [diff] [blame] | 1808 | bp = iclog->ic_bp; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 1809 | XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1810 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 1811 | XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | |
| 1813 | /* Do we need to split this write into 2 parts? */ |
| 1814 | if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1815 | char *dptr; |
| 1816 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 | split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); |
| 1818 | count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1819 | iclog->ic_bwritecnt = 2; |
| 1820 | |
| 1821 | /* |
| 1822 | * Bump the cycle numbers at the start of each block in the |
| 1823 | * part of the iclog that ends up in the buffer that gets |
| 1824 | * written to the start of the log. |
| 1825 | * |
| 1826 | * Watch out for the header magic number case, though. |
| 1827 | */ |
| 1828 | dptr = (char *)&iclog->ic_header + count; |
| 1829 | for (i = 0; i < split; i += BBSIZE) { |
| 1830 | __uint32_t cycle = be32_to_cpu(*(__be32 *)dptr); |
| 1831 | if (++cycle == XLOG_HEADER_MAGIC_NUM) |
| 1832 | cycle++; |
| 1833 | *(__be32 *)dptr = cpu_to_be32(cycle); |
| 1834 | |
| 1835 | dptr += BBSIZE; |
| 1836 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1837 | } else { |
| 1838 | iclog->ic_bwritecnt = 1; |
| 1839 | } |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1840 | |
| 1841 | /* calculcate the checksum */ |
| 1842 | iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, |
| 1843 | iclog->ic_datap, size); |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 1844 | #ifdef DEBUG |
| 1845 | /* |
| 1846 | * Intentionally corrupt the log record CRC based on the error injection |
| 1847 | * frequency, if defined. This facilitates testing log recovery in the |
| 1848 | * event of torn writes. Hence, set the IOABORT state to abort the log |
| 1849 | * write on I/O completion and shutdown the fs. The subsequent mount |
| 1850 | * detects the bad CRC and attempts to recover. |
| 1851 | */ |
| 1852 | if (log->l_badcrc_factor && |
| 1853 | (prandom_u32() % log->l_badcrc_factor == 0)) { |
| 1854 | iclog->ic_header.h_crc &= 0xAAAAAAAA; |
| 1855 | iclog->ic_state |= XLOG_STATE_IOABORT; |
| 1856 | xfs_warn(log->l_mp, |
| 1857 | "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", |
| 1858 | be64_to_cpu(iclog->ic_header.h_lsn)); |
| 1859 | } |
| 1860 | #endif |
Christoph Hellwig | 0e446be | 2012-11-12 22:54:24 +1100 | [diff] [blame] | 1861 | |
Dave Chinner | aa0e883 | 2012-04-23 15:58:52 +1000 | [diff] [blame] | 1862 | bp->b_io_length = BTOBB(count); |
Christoph Hellwig | adadbee | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 1863 | bp->b_fspriv = iclog; |
Dave Chinner | 12877da | 2016-02-10 15:01:30 +1100 | [diff] [blame] | 1864 | bp->b_flags &= ~(XBF_FUA | XBF_FLUSH); |
Dave Chinner | b68c082 | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 1865 | bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE); |
Christoph Hellwig | 651701d | 2010-06-28 10:34:34 -0400 | [diff] [blame] | 1866 | |
Christoph Hellwig | a27a263 | 2011-06-16 12:02:23 +0000 | [diff] [blame] | 1867 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { |
Christoph Hellwig | e163cbd | 2011-07-08 14:36:36 +0200 | [diff] [blame] | 1868 | bp->b_flags |= XBF_FUA; |
| 1869 | |
Christoph Hellwig | a27a263 | 2011-06-16 12:02:23 +0000 | [diff] [blame] | 1870 | /* |
Christoph Hellwig | e163cbd | 2011-07-08 14:36:36 +0200 | [diff] [blame] | 1871 | * Flush the data device before flushing the log to make |
| 1872 | * sure all meta data written back from the AIL actually made |
| 1873 | * it to disk before stamping the new log tail LSN into the |
| 1874 | * log buffer. For an external log we need to issue the |
| 1875 | * flush explicitly, and unfortunately synchronously here; |
| 1876 | * for an internal log we can simply use the block layer |
| 1877 | * state machine for preflushes. |
Christoph Hellwig | a27a263 | 2011-06-16 12:02:23 +0000 | [diff] [blame] | 1878 | */ |
| 1879 | if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) |
| 1880 | xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); |
Christoph Hellwig | e163cbd | 2011-07-08 14:36:36 +0200 | [diff] [blame] | 1881 | else |
| 1882 | bp->b_flags |= XBF_FLUSH; |
Christoph Hellwig | a27a263 | 2011-06-16 12:02:23 +0000 | [diff] [blame] | 1883 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 | |
| 1885 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); |
| 1886 | ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); |
| 1887 | |
Ben Myers | 003fd6c | 2013-01-18 14:17:46 -0600 | [diff] [blame] | 1888 | xlog_verify_iclog(log, iclog, count, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1889 | |
| 1890 | /* account for log which doesn't start at block #0 */ |
| 1891 | XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); |
Dave Chinner | b68c082 | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 1892 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1893 | /* |
| 1894 | * Don't call xfs_bwrite here. We do log-syncs even when the filesystem |
| 1895 | * is shutting down. |
| 1896 | */ |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 1897 | error = xlog_bdstrat(bp); |
| 1898 | if (error) { |
| 1899 | xfs_buf_ioerror_alert(bp, "xlog_sync"); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1900 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1901 | } |
| 1902 | if (split) { |
Nathan Scott | f5faad7 | 2006-07-28 17:04:44 +1000 | [diff] [blame] | 1903 | bp = iclog->ic_log->l_xbuf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1904 | XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ |
Chandra Seetharaman | 02fe03d | 2011-07-22 23:40:22 +0000 | [diff] [blame] | 1905 | xfs_buf_associate_memory(bp, |
| 1906 | (char *)&iclog->ic_header + count, split); |
Christoph Hellwig | adadbee | 2011-07-13 13:43:49 +0200 | [diff] [blame] | 1907 | bp->b_fspriv = iclog; |
Dave Chinner | 12877da | 2016-02-10 15:01:30 +1100 | [diff] [blame] | 1908 | bp->b_flags &= ~(XBF_FUA | XBF_FLUSH); |
Dave Chinner | b68c082 | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 1909 | bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE); |
Christoph Hellwig | f538d4d | 2005-11-02 10:26:59 +1100 | [diff] [blame] | 1910 | if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) |
Christoph Hellwig | e163cbd | 2011-07-08 14:36:36 +0200 | [diff] [blame] | 1911 | bp->b_flags |= XBF_FUA; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | |
| 1913 | ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); |
| 1914 | ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); |
| 1915 | |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 1916 | /* account for internal log which doesn't start at block #0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); |
Christoph Hellwig | 901796a | 2011-10-10 16:52:49 +0000 | [diff] [blame] | 1918 | error = xlog_bdstrat(bp); |
| 1919 | if (error) { |
| 1920 | xfs_buf_ioerror_alert(bp, "xlog_sync (split)"); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1921 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1922 | } |
| 1923 | } |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 1924 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1925 | } /* xlog_sync */ |
| 1926 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1927 | /* |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 1928 | * Deallocate a log structure |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1929 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 1930 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1931 | xlog_dealloc_log( |
| 1932 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1933 | { |
| 1934 | xlog_in_core_t *iclog, *next_iclog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1935 | int i; |
| 1936 | |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1937 | xlog_cil_destroy(log); |
| 1938 | |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 1939 | /* |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1940 | * Cycle all the iclogbuf locks to make sure all log IO completion |
| 1941 | * is done before we tear down these buffers. |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 1942 | */ |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1943 | iclog = log->l_iclog; |
| 1944 | for (i = 0; i < log->l_iclog_bufs; i++) { |
| 1945 | xfs_buf_lock(iclog->ic_bp); |
| 1946 | xfs_buf_unlock(iclog->ic_bp); |
| 1947 | iclog = iclog->ic_next; |
| 1948 | } |
| 1949 | |
| 1950 | /* |
| 1951 | * Always need to ensure that the extra buffer does not point to memory |
| 1952 | * owned by another log buffer before we free it. Also, cycle the lock |
| 1953 | * first to ensure we've completed IO on it. |
| 1954 | */ |
| 1955 | xfs_buf_lock(log->l_xbuf); |
| 1956 | xfs_buf_unlock(log->l_xbuf); |
Dave Chinner | e70b73f | 2012-04-23 15:58:49 +1000 | [diff] [blame] | 1957 | xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); |
Dave Chinner | 4439647 | 2011-04-21 09:34:27 +0000 | [diff] [blame] | 1958 | xfs_buf_free(log->l_xbuf); |
| 1959 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | iclog = log->l_iclog; |
Dave Chinner | 9c23ecc | 2014-04-17 08:15:26 +1000 | [diff] [blame] | 1961 | for (i = 0; i < log->l_iclog_bufs; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | xfs_buf_free(iclog->ic_bp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1963 | next_iclog = iclog->ic_next; |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 1964 | kmem_free(iclog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1965 | iclog = next_iclog; |
| 1966 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1967 | spinlock_destroy(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1969 | log->l_mp->m_log = NULL; |
Denys Vlasenko | f0e2d93 | 2008-05-19 16:31:57 +1000 | [diff] [blame] | 1970 | kmem_free(log); |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 1971 | } /* xlog_dealloc_log */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1972 | |
| 1973 | /* |
| 1974 | * Update counters atomically now that memcpy is done. |
| 1975 | */ |
| 1976 | /* ARGSUSED */ |
| 1977 | static inline void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 1978 | xlog_state_finish_copy( |
| 1979 | struct xlog *log, |
| 1980 | struct xlog_in_core *iclog, |
| 1981 | int record_cnt, |
| 1982 | int copy_bytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 | { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 1984 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1985 | |
Marcin Slusarz | 413d57c | 2008-02-13 15:03:29 -0800 | [diff] [blame] | 1986 | be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1987 | iclog->ic_offset += copy_bytes; |
| 1988 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 1989 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | } /* xlog_state_finish_copy */ |
| 1991 | |
| 1992 | |
| 1993 | |
| 1994 | |
| 1995 | /* |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 1996 | * print out info relating to regions written which consume |
| 1997 | * the reservation |
| 1998 | */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 1999 | void |
| 2000 | xlog_print_tic_res( |
| 2001 | struct xfs_mount *mp, |
| 2002 | struct xlog_ticket *ticket) |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2003 | { |
| 2004 | uint i; |
| 2005 | uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t); |
| 2006 | |
| 2007 | /* match with XLOG_REG_TYPE_* in xfs_log.h */ |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2008 | #define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str |
| 2009 | static char *res_type_str[XLOG_REG_TYPE_MAX + 1] = { |
| 2010 | REG_TYPE_STR(BFORMAT, "bformat"), |
| 2011 | REG_TYPE_STR(BCHUNK, "bchunk"), |
| 2012 | REG_TYPE_STR(EFI_FORMAT, "efi_format"), |
| 2013 | REG_TYPE_STR(EFD_FORMAT, "efd_format"), |
| 2014 | REG_TYPE_STR(IFORMAT, "iformat"), |
| 2015 | REG_TYPE_STR(ICORE, "icore"), |
| 2016 | REG_TYPE_STR(IEXT, "iext"), |
| 2017 | REG_TYPE_STR(IBROOT, "ibroot"), |
| 2018 | REG_TYPE_STR(ILOCAL, "ilocal"), |
| 2019 | REG_TYPE_STR(IATTR_EXT, "iattr_ext"), |
| 2020 | REG_TYPE_STR(IATTR_BROOT, "iattr_broot"), |
| 2021 | REG_TYPE_STR(IATTR_LOCAL, "iattr_local"), |
| 2022 | REG_TYPE_STR(QFORMAT, "qformat"), |
| 2023 | REG_TYPE_STR(DQUOT, "dquot"), |
| 2024 | REG_TYPE_STR(QUOTAOFF, "quotaoff"), |
| 2025 | REG_TYPE_STR(LRHEADER, "LR header"), |
| 2026 | REG_TYPE_STR(UNMOUNT, "unmount"), |
| 2027 | REG_TYPE_STR(COMMIT, "commit"), |
| 2028 | REG_TYPE_STR(TRANSHDR, "trans header"), |
| 2029 | REG_TYPE_STR(ICREATE, "inode create") |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2030 | }; |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2031 | #undef REG_TYPE_STR |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2032 | |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 2033 | xfs_warn(mp, "xlog_write: reservation summary:"); |
Joe Perches | f41febd | 2015-07-29 11:52:04 +1000 | [diff] [blame] | 2034 | xfs_warn(mp, " unit res = %d bytes", |
| 2035 | ticket->t_unit_res); |
| 2036 | xfs_warn(mp, " current res = %d bytes", |
| 2037 | ticket->t_curr_res); |
| 2038 | xfs_warn(mp, " total reg = %u bytes (o/flow = %u bytes)", |
| 2039 | ticket->t_res_arr_sum, ticket->t_res_o_flow); |
| 2040 | xfs_warn(mp, " ophdrs = %u (ophdr space = %u bytes)", |
| 2041 | ticket->t_res_num_ophdrs, ophdr_spc); |
| 2042 | xfs_warn(mp, " ophdr + reg = %u bytes", |
| 2043 | ticket->t_res_arr_sum + ticket->t_res_o_flow + ophdr_spc); |
| 2044 | xfs_warn(mp, " num regions = %u", |
| 2045 | ticket->t_res_num); |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2046 | |
| 2047 | for (i = 0; i < ticket->t_res_num; i++) { |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2048 | uint r_type = ticket->t_res_arr[i].r_type; |
Eric Sandeen | 08e96e1 | 2013-10-11 20:59:05 -0500 | [diff] [blame] | 2049 | xfs_warn(mp, "region[%u]: %s - %u bytes", i, |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2050 | ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? |
Darrick J. Wong | 5110cd8 | 2016-03-07 08:40:03 +1100 | [diff] [blame] | 2051 | "bad-rtype" : res_type_str[r_type]), |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2052 | ticket->t_res_arr[i].r_len); |
| 2053 | } |
Dave Chinner | 169a7b0 | 2010-05-07 11:05:31 +1000 | [diff] [blame] | 2054 | |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2055 | xfs_alert_tag(mp, XFS_PTAG_LOGRES, |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 2056 | "xlog_write: reservation ran out. Need to up reservation"); |
Chandra Seetharaman | 297aa63 | 2013-07-19 17:31:38 -0500 | [diff] [blame] | 2057 | xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2058 | } |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2059 | |
| 2060 | /* |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2061 | * Calculate the potential space needed by the log vector. Each region gets |
| 2062 | * its own xlog_op_header_t and may need to be double word aligned. |
| 2063 | */ |
| 2064 | static int |
| 2065 | xlog_write_calc_vec_length( |
| 2066 | struct xlog_ticket *ticket, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2067 | struct xfs_log_vec *log_vector) |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2068 | { |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2069 | struct xfs_log_vec *lv; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2070 | int headers = 0; |
| 2071 | int len = 0; |
| 2072 | int i; |
| 2073 | |
| 2074 | /* acct for start rec of xact */ |
| 2075 | if (ticket->t_flags & XLOG_TIC_INITED) |
| 2076 | headers++; |
| 2077 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2078 | for (lv = log_vector; lv; lv = lv->lv_next) { |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2079 | /* we don't write ordered log vectors */ |
| 2080 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) |
| 2081 | continue; |
| 2082 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2083 | headers += lv->lv_niovecs; |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2084 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2085 | for (i = 0; i < lv->lv_niovecs; i++) { |
| 2086 | struct xfs_log_iovec *vecp = &lv->lv_iovecp[i]; |
| 2087 | |
| 2088 | len += vecp->i_len; |
| 2089 | xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type); |
| 2090 | } |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2091 | } |
| 2092 | |
| 2093 | ticket->t_res_num_ophdrs += headers; |
| 2094 | len += headers * sizeof(struct xlog_op_header); |
| 2095 | |
| 2096 | return len; |
| 2097 | } |
| 2098 | |
| 2099 | /* |
| 2100 | * If first write for transaction, insert start record We can't be trying to |
| 2101 | * commit if we are inited. We can't have any "partial_copy" if we are inited. |
| 2102 | */ |
| 2103 | static int |
| 2104 | xlog_write_start_rec( |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2105 | struct xlog_op_header *ophdr, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2106 | struct xlog_ticket *ticket) |
| 2107 | { |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2108 | if (!(ticket->t_flags & XLOG_TIC_INITED)) |
| 2109 | return 0; |
| 2110 | |
| 2111 | ophdr->oh_tid = cpu_to_be32(ticket->t_tid); |
| 2112 | ophdr->oh_clientid = ticket->t_clientid; |
| 2113 | ophdr->oh_len = 0; |
| 2114 | ophdr->oh_flags = XLOG_START_TRANS; |
| 2115 | ophdr->oh_res2 = 0; |
| 2116 | |
| 2117 | ticket->t_flags &= ~XLOG_TIC_INITED; |
| 2118 | |
| 2119 | return sizeof(struct xlog_op_header); |
| 2120 | } |
| 2121 | |
| 2122 | static xlog_op_header_t * |
| 2123 | xlog_write_setup_ophdr( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 2124 | struct xlog *log, |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2125 | struct xlog_op_header *ophdr, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2126 | struct xlog_ticket *ticket, |
| 2127 | uint flags) |
| 2128 | { |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2129 | ophdr->oh_tid = cpu_to_be32(ticket->t_tid); |
| 2130 | ophdr->oh_clientid = ticket->t_clientid; |
| 2131 | ophdr->oh_res2 = 0; |
| 2132 | |
| 2133 | /* are we copying a commit or unmount record? */ |
| 2134 | ophdr->oh_flags = flags; |
| 2135 | |
| 2136 | /* |
| 2137 | * We've seen logs corrupted with bad transaction client ids. This |
| 2138 | * makes sure that XFS doesn't generate them on. Turn this into an EIO |
| 2139 | * and shut down the filesystem. |
| 2140 | */ |
| 2141 | switch (ophdr->oh_clientid) { |
| 2142 | case XFS_TRANSACTION: |
| 2143 | case XFS_VOLUME: |
| 2144 | case XFS_LOG: |
| 2145 | break; |
| 2146 | default: |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2147 | xfs_warn(log->l_mp, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2148 | "Bad XFS transaction clientid 0x%x in ticket 0x%p", |
| 2149 | ophdr->oh_clientid, ticket); |
| 2150 | return NULL; |
| 2151 | } |
| 2152 | |
| 2153 | return ophdr; |
| 2154 | } |
| 2155 | |
| 2156 | /* |
| 2157 | * Set up the parameters of the region copy into the log. This has |
| 2158 | * to handle region write split across multiple log buffers - this |
| 2159 | * state is kept external to this function so that this code can |
Zhi Yong Wu | ac0e300 | 2013-08-07 10:11:02 +0000 | [diff] [blame] | 2160 | * be written in an obvious, self documenting manner. |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2161 | */ |
| 2162 | static int |
| 2163 | xlog_write_setup_copy( |
| 2164 | struct xlog_ticket *ticket, |
| 2165 | struct xlog_op_header *ophdr, |
| 2166 | int space_available, |
| 2167 | int space_required, |
| 2168 | int *copy_off, |
| 2169 | int *copy_len, |
| 2170 | int *last_was_partial_copy, |
| 2171 | int *bytes_consumed) |
| 2172 | { |
| 2173 | int still_to_copy; |
| 2174 | |
| 2175 | still_to_copy = space_required - *bytes_consumed; |
| 2176 | *copy_off = *bytes_consumed; |
| 2177 | |
| 2178 | if (still_to_copy <= space_available) { |
| 2179 | /* write of region completes here */ |
| 2180 | *copy_len = still_to_copy; |
| 2181 | ophdr->oh_len = cpu_to_be32(*copy_len); |
| 2182 | if (*last_was_partial_copy) |
| 2183 | ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); |
| 2184 | *last_was_partial_copy = 0; |
| 2185 | *bytes_consumed = 0; |
| 2186 | return 0; |
| 2187 | } |
| 2188 | |
| 2189 | /* partial write of region, needs extra log op header reservation */ |
| 2190 | *copy_len = space_available; |
| 2191 | ophdr->oh_len = cpu_to_be32(*copy_len); |
| 2192 | ophdr->oh_flags |= XLOG_CONTINUE_TRANS; |
| 2193 | if (*last_was_partial_copy) |
| 2194 | ophdr->oh_flags |= XLOG_WAS_CONT_TRANS; |
| 2195 | *bytes_consumed += *copy_len; |
| 2196 | (*last_was_partial_copy)++; |
| 2197 | |
| 2198 | /* account for new log op header */ |
| 2199 | ticket->t_curr_res -= sizeof(struct xlog_op_header); |
| 2200 | ticket->t_res_num_ophdrs++; |
| 2201 | |
| 2202 | return sizeof(struct xlog_op_header); |
| 2203 | } |
| 2204 | |
| 2205 | static int |
| 2206 | xlog_write_copy_finish( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 2207 | struct xlog *log, |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2208 | struct xlog_in_core *iclog, |
| 2209 | uint flags, |
| 2210 | int *record_cnt, |
| 2211 | int *data_cnt, |
| 2212 | int *partial_copy, |
| 2213 | int *partial_copy_len, |
| 2214 | int log_offset, |
| 2215 | struct xlog_in_core **commit_iclog) |
| 2216 | { |
| 2217 | if (*partial_copy) { |
| 2218 | /* |
| 2219 | * This iclog has already been marked WANT_SYNC by |
| 2220 | * xlog_state_get_iclog_space. |
| 2221 | */ |
| 2222 | xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); |
| 2223 | *record_cnt = 0; |
| 2224 | *data_cnt = 0; |
| 2225 | return xlog_state_release_iclog(log, iclog); |
| 2226 | } |
| 2227 | |
| 2228 | *partial_copy = 0; |
| 2229 | *partial_copy_len = 0; |
| 2230 | |
| 2231 | if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { |
| 2232 | /* no more space in this iclog - push it. */ |
| 2233 | xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); |
| 2234 | *record_cnt = 0; |
| 2235 | *data_cnt = 0; |
| 2236 | |
| 2237 | spin_lock(&log->l_icloglock); |
| 2238 | xlog_state_want_sync(log, iclog); |
| 2239 | spin_unlock(&log->l_icloglock); |
| 2240 | |
| 2241 | if (!commit_iclog) |
| 2242 | return xlog_state_release_iclog(log, iclog); |
| 2243 | ASSERT(flags & XLOG_COMMIT_TRANS); |
| 2244 | *commit_iclog = iclog; |
| 2245 | } |
| 2246 | |
| 2247 | return 0; |
| 2248 | } |
| 2249 | |
| 2250 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | * Write some region out to in-core log |
| 2252 | * |
| 2253 | * This will be called when writing externally provided regions or when |
| 2254 | * writing out a commit record for a given transaction. |
| 2255 | * |
| 2256 | * General algorithm: |
| 2257 | * 1. Find total length of this write. This may include adding to the |
| 2258 | * lengths passed in. |
| 2259 | * 2. Check whether we violate the tickets reservation. |
| 2260 | * 3. While writing to this iclog |
| 2261 | * A. Reserve as much space in this iclog as can get |
| 2262 | * B. If this is first write, save away start lsn |
| 2263 | * C. While writing this region: |
| 2264 | * 1. If first write of transaction, write start record |
| 2265 | * 2. Write log operation header (header per region) |
| 2266 | * 3. Find out if we can fit entire region into this iclog |
| 2267 | * 4. Potentially, verify destination memcpy ptr |
| 2268 | * 5. Memcpy (partial) region |
| 2269 | * 6. If partial copy, release iclog; otherwise, continue |
| 2270 | * copying more regions into current iclog |
| 2271 | * 4. Mark want sync bit (in simulation mode) |
| 2272 | * 5. Release iclog for potential flush to on-disk log. |
| 2273 | * |
| 2274 | * ERRORS: |
| 2275 | * 1. Panic if reservation is overrun. This should never happen since |
| 2276 | * reservation amounts are generated internal to the filesystem. |
| 2277 | * NOTES: |
| 2278 | * 1. Tickets are single threaded data structures. |
| 2279 | * 2. The XLOG_END_TRANS & XLOG_CONTINUE_TRANS flags are passed down to the |
| 2280 | * syncing routine. When a single log_write region needs to span |
| 2281 | * multiple in-core logs, the XLOG_CONTINUE_TRANS bit should be set |
| 2282 | * on all log operation writes which don't contain the end of the |
| 2283 | * region. The XLOG_END_TRANS bit is used for the in-core log |
| 2284 | * operation which contains the end of the continued log_write region. |
| 2285 | * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog, |
| 2286 | * we don't really know exactly how much space will be used. As a result, |
| 2287 | * we don't update ic_offset until the end when we know exactly how many |
| 2288 | * bytes have been written out. |
| 2289 | */ |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 2290 | int |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 2291 | xlog_write( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 2292 | struct xlog *log, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2293 | struct xfs_log_vec *log_vector, |
Christoph Hellwig | 35a8a72 | 2010-02-15 23:34:54 +0000 | [diff] [blame] | 2294 | struct xlog_ticket *ticket, |
| 2295 | xfs_lsn_t *start_lsn, |
| 2296 | struct xlog_in_core **commit_iclog, |
| 2297 | uint flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2298 | { |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2299 | struct xlog_in_core *iclog = NULL; |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2300 | struct xfs_log_iovec *vecp; |
| 2301 | struct xfs_log_vec *lv; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2302 | int len; |
| 2303 | int index; |
| 2304 | int partial_copy = 0; |
| 2305 | int partial_copy_len = 0; |
| 2306 | int contwr = 0; |
| 2307 | int record_cnt = 0; |
| 2308 | int data_cnt = 0; |
| 2309 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2310 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2311 | *start_lsn = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2312 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2313 | len = xlog_write_calc_vec_length(ticket, log_vector); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 2314 | |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 2315 | /* |
| 2316 | * Region headers and bytes are already accounted for. |
| 2317 | * We only need to take into account start records and |
| 2318 | * split regions in this function. |
| 2319 | */ |
| 2320 | if (ticket->t_flags & XLOG_TIC_INITED) |
| 2321 | ticket->t_curr_res -= sizeof(xlog_op_header_t); |
| 2322 | |
| 2323 | /* |
| 2324 | * Commit record headers need to be accounted for. These |
| 2325 | * come in as separate writes so are easy to detect. |
| 2326 | */ |
| 2327 | if (flags & (XLOG_COMMIT_TRANS | XLOG_UNMOUNT_TRANS)) |
| 2328 | ticket->t_curr_res -= sizeof(xlog_op_header_t); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 2329 | |
| 2330 | if (ticket->t_curr_res < 0) |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2331 | xlog_print_tic_res(log->l_mp, ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2332 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2333 | index = 0; |
| 2334 | lv = log_vector; |
| 2335 | vecp = lv->lv_iovecp; |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2336 | while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2337 | void *ptr; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2338 | int log_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2339 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2340 | error = xlog_state_get_iclog_space(log, len, &iclog, ticket, |
| 2341 | &contwr, &log_offset); |
| 2342 | if (error) |
| 2343 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2344 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2345 | ASSERT(log_offset <= iclog->ic_size - 1); |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2346 | ptr = iclog->ic_datap + log_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2347 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2348 | /* start_lsn is the first lsn written to. That's all we need. */ |
| 2349 | if (!*start_lsn) |
| 2350 | *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2351 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2352 | /* |
| 2353 | * This loop writes out as many regions as can fit in the amount |
| 2354 | * of space which was allocated by xlog_state_get_iclog_space(). |
| 2355 | */ |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2356 | while (lv && (!lv->lv_niovecs || index < lv->lv_niovecs)) { |
| 2357 | struct xfs_log_iovec *reg; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2358 | struct xlog_op_header *ophdr; |
| 2359 | int start_rec_copy; |
| 2360 | int copy_len; |
| 2361 | int copy_off; |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2362 | bool ordered = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2363 | |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2364 | /* ordered log vectors have no regions to write */ |
| 2365 | if (lv->lv_buf_len == XFS_LOG_VEC_ORDERED) { |
| 2366 | ASSERT(lv->lv_niovecs == 0); |
| 2367 | ordered = true; |
| 2368 | goto next_lv; |
| 2369 | } |
| 2370 | |
| 2371 | reg = &vecp[index]; |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2372 | ASSERT(reg->i_len % sizeof(__int32_t) == 0); |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2373 | ASSERT((unsigned long)ptr % sizeof(__int32_t) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2374 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2375 | start_rec_copy = xlog_write_start_rec(ptr, ticket); |
| 2376 | if (start_rec_copy) { |
| 2377 | record_cnt++; |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2378 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2379 | start_rec_copy); |
| 2380 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2381 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2382 | ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); |
| 2383 | if (!ophdr) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2384 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2385 | |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 2386 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2387 | sizeof(struct xlog_op_header)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2388 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2389 | len += xlog_write_setup_copy(ticket, ophdr, |
| 2390 | iclog->ic_size-log_offset, |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2391 | reg->i_len, |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2392 | ©_off, ©_len, |
| 2393 | &partial_copy, |
| 2394 | &partial_copy_len); |
| 2395 | xlog_verify_dest_ptr(log, ptr); |
Dave Chinner | b5203cd | 2010-03-23 11:29:44 +1100 | [diff] [blame] | 2396 | |
Eric Sandeen | 91f9f5f | 2015-10-12 16:04:15 +1100 | [diff] [blame] | 2397 | /* |
| 2398 | * Copy region. |
| 2399 | * |
| 2400 | * Unmount records just log an opheader, so can have |
| 2401 | * empty payloads with no data region to copy. Hence we |
| 2402 | * only copy the payload if the vector says it has data |
| 2403 | * to copy. |
| 2404 | */ |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2405 | ASSERT(copy_len >= 0); |
Eric Sandeen | 91f9f5f | 2015-10-12 16:04:15 +1100 | [diff] [blame] | 2406 | if (copy_len > 0) { |
| 2407 | memcpy(ptr, reg->i_addr + copy_off, copy_len); |
| 2408 | xlog_write_adv_cnt(&ptr, &len, &log_offset, |
| 2409 | copy_len); |
| 2410 | } |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2411 | copy_len += start_rec_copy + sizeof(xlog_op_header_t); |
| 2412 | record_cnt++; |
| 2413 | data_cnt += contwr ? copy_len : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2414 | |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2415 | error = xlog_write_copy_finish(log, iclog, flags, |
| 2416 | &record_cnt, &data_cnt, |
| 2417 | &partial_copy, |
| 2418 | &partial_copy_len, |
| 2419 | log_offset, |
| 2420 | commit_iclog); |
| 2421 | if (error) |
| 2422 | return error; |
| 2423 | |
| 2424 | /* |
| 2425 | * if we had a partial copy, we need to get more iclog |
| 2426 | * space but we don't want to increment the region |
| 2427 | * index because there is still more is this region to |
| 2428 | * write. |
| 2429 | * |
| 2430 | * If we completed writing this region, and we flushed |
| 2431 | * the iclog (indicated by resetting of the record |
| 2432 | * count), then we also need to get more log space. If |
| 2433 | * this was the last record, though, we are done and |
| 2434 | * can just return. |
| 2435 | */ |
| 2436 | if (partial_copy) |
| 2437 | break; |
| 2438 | |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2439 | if (++index == lv->lv_niovecs) { |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2440 | next_lv: |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2441 | lv = lv->lv_next; |
| 2442 | index = 0; |
| 2443 | if (lv) |
| 2444 | vecp = lv->lv_iovecp; |
| 2445 | } |
Dave Chinner | fd63875 | 2013-06-27 16:04:51 +1000 | [diff] [blame] | 2446 | if (record_cnt == 0 && ordered == false) { |
Dave Chinner | 55b6633 | 2010-03-23 11:43:17 +1100 | [diff] [blame] | 2447 | if (!lv) |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2448 | return 0; |
| 2449 | break; |
| 2450 | } |
| 2451 | } |
| 2452 | } |
| 2453 | |
| 2454 | ASSERT(len == 0); |
| 2455 | |
| 2456 | xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); |
| 2457 | if (!commit_iclog) |
| 2458 | return xlog_state_release_iclog(log, iclog); |
| 2459 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2460 | ASSERT(flags & XLOG_COMMIT_TRANS); |
| 2461 | *commit_iclog = iclog; |
| 2462 | return 0; |
Christoph Hellwig | 99428ad | 2010-03-23 11:35:45 +1100 | [diff] [blame] | 2463 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2464 | |
| 2465 | |
| 2466 | /***************************************************************************** |
| 2467 | * |
| 2468 | * State Machine functions |
| 2469 | * |
| 2470 | ***************************************************************************** |
| 2471 | */ |
| 2472 | |
| 2473 | /* Clean iclogs starting from the head. This ordering must be |
| 2474 | * maintained, so an iclog doesn't become ACTIVE beyond one that |
| 2475 | * is SYNCING. This is also required to maintain the notion that we use |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 2476 | * a ordered wait queue to hold off would be writers to the log when every |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2477 | * iclog is trying to sync to disk. |
| 2478 | * |
| 2479 | * State Change: DIRTY -> ACTIVE |
| 2480 | */ |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 2481 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2482 | xlog_state_clean_log( |
| 2483 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2484 | { |
| 2485 | xlog_in_core_t *iclog; |
| 2486 | int changed = 0; |
| 2487 | |
| 2488 | iclog = log->l_iclog; |
| 2489 | do { |
| 2490 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
| 2491 | iclog->ic_state = XLOG_STATE_ACTIVE; |
| 2492 | iclog->ic_offset = 0; |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 2493 | ASSERT(iclog->ic_callback == NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2494 | /* |
| 2495 | * If the number of ops in this iclog indicate it just |
| 2496 | * contains the dummy transaction, we can |
| 2497 | * change state into IDLE (the second time around). |
| 2498 | * Otherwise we should change the state into |
| 2499 | * NEED a dummy. |
| 2500 | * We don't need to cover the dummy. |
| 2501 | */ |
| 2502 | if (!changed && |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 2503 | (be32_to_cpu(iclog->ic_header.h_num_logops) == |
| 2504 | XLOG_COVER_OPS)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2505 | changed = 1; |
| 2506 | } else { |
| 2507 | /* |
| 2508 | * We have two dirty iclogs so start over |
| 2509 | * This could also be num of ops indicates |
| 2510 | * this is not the dummy going out. |
| 2511 | */ |
| 2512 | changed = 2; |
| 2513 | } |
| 2514 | iclog->ic_header.h_num_logops = 0; |
| 2515 | memset(iclog->ic_header.h_cycle_data, 0, |
| 2516 | sizeof(iclog->ic_header.h_cycle_data)); |
| 2517 | iclog->ic_header.h_lsn = 0; |
| 2518 | } else if (iclog->ic_state == XLOG_STATE_ACTIVE) |
| 2519 | /* do nothing */; |
| 2520 | else |
| 2521 | break; /* stop cleaning */ |
| 2522 | iclog = iclog->ic_next; |
| 2523 | } while (iclog != log->l_iclog); |
| 2524 | |
| 2525 | /* log is locked when we are called */ |
| 2526 | /* |
| 2527 | * Change state for the dummy log recording. |
| 2528 | * We usually go to NEED. But we go to NEED2 if the changed indicates |
| 2529 | * we are done writing the dummy record. |
| 2530 | * If we are done with the second dummy recored (DONE2), then |
| 2531 | * we go to IDLE. |
| 2532 | */ |
| 2533 | if (changed) { |
| 2534 | switch (log->l_covered_state) { |
| 2535 | case XLOG_STATE_COVER_IDLE: |
| 2536 | case XLOG_STATE_COVER_NEED: |
| 2537 | case XLOG_STATE_COVER_NEED2: |
| 2538 | log->l_covered_state = XLOG_STATE_COVER_NEED; |
| 2539 | break; |
| 2540 | |
| 2541 | case XLOG_STATE_COVER_DONE: |
| 2542 | if (changed == 1) |
| 2543 | log->l_covered_state = XLOG_STATE_COVER_NEED2; |
| 2544 | else |
| 2545 | log->l_covered_state = XLOG_STATE_COVER_NEED; |
| 2546 | break; |
| 2547 | |
| 2548 | case XLOG_STATE_COVER_DONE2: |
| 2549 | if (changed == 1) |
| 2550 | log->l_covered_state = XLOG_STATE_COVER_IDLE; |
| 2551 | else |
| 2552 | log->l_covered_state = XLOG_STATE_COVER_NEED; |
| 2553 | break; |
| 2554 | |
| 2555 | default: |
| 2556 | ASSERT(0); |
| 2557 | } |
| 2558 | } |
| 2559 | } /* xlog_state_clean_log */ |
| 2560 | |
| 2561 | STATIC xfs_lsn_t |
| 2562 | xlog_get_lowest_lsn( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2563 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2564 | { |
| 2565 | xlog_in_core_t *lsn_log; |
| 2566 | xfs_lsn_t lowest_lsn, lsn; |
| 2567 | |
| 2568 | lsn_log = log->l_iclog; |
| 2569 | lowest_lsn = 0; |
| 2570 | do { |
| 2571 | if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 2572 | lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2573 | if ((lsn && !lowest_lsn) || |
| 2574 | (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { |
| 2575 | lowest_lsn = lsn; |
| 2576 | } |
| 2577 | } |
| 2578 | lsn_log = lsn_log->ic_next; |
| 2579 | } while (lsn_log != log->l_iclog); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 2580 | return lowest_lsn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2581 | } |
| 2582 | |
| 2583 | |
| 2584 | STATIC void |
| 2585 | xlog_state_do_callback( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2586 | struct xlog *log, |
| 2587 | int aborted, |
| 2588 | struct xlog_in_core *ciclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2589 | { |
| 2590 | xlog_in_core_t *iclog; |
| 2591 | xlog_in_core_t *first_iclog; /* used to know when we've |
| 2592 | * processed all iclogs once */ |
| 2593 | xfs_log_callback_t *cb, *cb_next; |
| 2594 | int flushcnt = 0; |
| 2595 | xfs_lsn_t lowest_lsn; |
| 2596 | int ioerrors; /* counter: iclogs with errors */ |
| 2597 | int loopdidcallbacks; /* flag: inner loop did callbacks*/ |
| 2598 | int funcdidcallbacks; /* flag: function did callbacks */ |
| 2599 | int repeats; /* for issuing console warnings if |
| 2600 | * looping too many times */ |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2601 | int wake = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2602 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2603 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2604 | first_iclog = iclog = log->l_iclog; |
| 2605 | ioerrors = 0; |
| 2606 | funcdidcallbacks = 0; |
| 2607 | repeats = 0; |
| 2608 | |
| 2609 | do { |
| 2610 | /* |
| 2611 | * Scan all iclogs starting with the one pointed to by the |
| 2612 | * log. Reset this starting point each time the log is |
| 2613 | * unlocked (during callbacks). |
| 2614 | * |
| 2615 | * Keep looping through iclogs until one full pass is made |
| 2616 | * without running any callbacks. |
| 2617 | */ |
| 2618 | first_iclog = log->l_iclog; |
| 2619 | iclog = log->l_iclog; |
| 2620 | loopdidcallbacks = 0; |
| 2621 | repeats++; |
| 2622 | |
| 2623 | do { |
| 2624 | |
| 2625 | /* skip all iclogs in the ACTIVE & DIRTY states */ |
| 2626 | if (iclog->ic_state & |
| 2627 | (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY)) { |
| 2628 | iclog = iclog->ic_next; |
| 2629 | continue; |
| 2630 | } |
| 2631 | |
| 2632 | /* |
| 2633 | * Between marking a filesystem SHUTDOWN and stopping |
| 2634 | * the log, we do flush all iclogs to disk (if there |
| 2635 | * wasn't a log I/O error). So, we do want things to |
| 2636 | * go smoothly in case of just a SHUTDOWN w/o a |
| 2637 | * LOG_IO_ERROR. |
| 2638 | */ |
| 2639 | if (!(iclog->ic_state & XLOG_STATE_IOERROR)) { |
| 2640 | /* |
| 2641 | * Can only perform callbacks in order. Since |
| 2642 | * this iclog is not in the DONE_SYNC/ |
| 2643 | * DO_CALLBACK state, we skip the rest and |
| 2644 | * just try to clean up. If we set our iclog |
| 2645 | * to DO_CALLBACK, we will not process it when |
| 2646 | * we retry since a previous iclog is in the |
| 2647 | * CALLBACK and the state cannot change since |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2648 | * we are holding the l_icloglock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2649 | */ |
| 2650 | if (!(iclog->ic_state & |
| 2651 | (XLOG_STATE_DONE_SYNC | |
| 2652 | XLOG_STATE_DO_CALLBACK))) { |
| 2653 | if (ciclog && (ciclog->ic_state == |
| 2654 | XLOG_STATE_DONE_SYNC)) { |
| 2655 | ciclog->ic_state = XLOG_STATE_DO_CALLBACK; |
| 2656 | } |
| 2657 | break; |
| 2658 | } |
| 2659 | /* |
| 2660 | * We now have an iclog that is in either the |
| 2661 | * DO_CALLBACK or DONE_SYNC states. The other |
| 2662 | * states (WANT_SYNC, SYNCING, or CALLBACK were |
| 2663 | * caught by the above if and are going to |
| 2664 | * clean (i.e. we aren't doing their callbacks) |
| 2665 | * see the above if. |
| 2666 | */ |
| 2667 | |
| 2668 | /* |
| 2669 | * We will do one more check here to see if we |
| 2670 | * have chased our tail around. |
| 2671 | */ |
| 2672 | |
| 2673 | lowest_lsn = xlog_get_lowest_lsn(log); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 2674 | if (lowest_lsn && |
| 2675 | XFS_LSN_CMP(lowest_lsn, |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 2676 | be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2677 | iclog = iclog->ic_next; |
| 2678 | continue; /* Leave this iclog for |
| 2679 | * another thread */ |
| 2680 | } |
| 2681 | |
| 2682 | iclog->ic_state = XLOG_STATE_CALLBACK; |
| 2683 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2684 | |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 2685 | /* |
Dave Chinner | d35e88f | 2012-10-08 21:56:12 +1100 | [diff] [blame] | 2686 | * Completion of a iclog IO does not imply that |
| 2687 | * a transaction has completed, as transactions |
| 2688 | * can be large enough to span many iclogs. We |
| 2689 | * cannot change the tail of the log half way |
| 2690 | * through a transaction as this may be the only |
| 2691 | * transaction in the log and moving th etail to |
| 2692 | * point to the middle of it will prevent |
| 2693 | * recovery from finding the start of the |
| 2694 | * transaction. Hence we should only update the |
| 2695 | * last_sync_lsn if this iclog contains |
| 2696 | * transaction completion callbacks on it. |
| 2697 | * |
| 2698 | * We have to do this before we drop the |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 2699 | * icloglock to ensure we are the only one that |
| 2700 | * can update it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2701 | */ |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 2702 | ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), |
| 2703 | be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); |
Dave Chinner | d35e88f | 2012-10-08 21:56:12 +1100 | [diff] [blame] | 2704 | if (iclog->ic_callback) |
| 2705 | atomic64_set(&log->l_last_sync_lsn, |
| 2706 | be64_to_cpu(iclog->ic_header.h_lsn)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2707 | |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 2708 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2709 | ioerrors++; |
Dave Chinner | 84f3c68 | 2010-12-03 22:11:29 +1100 | [diff] [blame] | 2710 | |
| 2711 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2712 | |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 2713 | /* |
| 2714 | * Keep processing entries in the callback list until |
| 2715 | * we come around and it is empty. We need to |
| 2716 | * atomically see that the list is empty and change the |
| 2717 | * state to DIRTY so that we don't miss any more |
| 2718 | * callbacks being added. |
| 2719 | */ |
| 2720 | spin_lock(&iclog->ic_callback_lock); |
| 2721 | cb = iclog->ic_callback; |
Christoph Hellwig | 4b80916 | 2007-08-16 15:37:36 +1000 | [diff] [blame] | 2722 | while (cb) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2723 | iclog->ic_callback_tail = &(iclog->ic_callback); |
| 2724 | iclog->ic_callback = NULL; |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 2725 | spin_unlock(&iclog->ic_callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2726 | |
| 2727 | /* perform callbacks in the order given */ |
Christoph Hellwig | 4b80916 | 2007-08-16 15:37:36 +1000 | [diff] [blame] | 2728 | for (; cb; cb = cb_next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2729 | cb_next = cb->cb_next; |
| 2730 | cb->cb_func(cb->cb_arg, aborted); |
| 2731 | } |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 2732 | spin_lock(&iclog->ic_callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2733 | cb = iclog->ic_callback; |
| 2734 | } |
| 2735 | |
| 2736 | loopdidcallbacks++; |
| 2737 | funcdidcallbacks++; |
| 2738 | |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 2739 | spin_lock(&log->l_icloglock); |
Christoph Hellwig | 4b80916 | 2007-08-16 15:37:36 +1000 | [diff] [blame] | 2740 | ASSERT(iclog->ic_callback == NULL); |
David Chinner | 114d23a | 2008-04-10 12:18:39 +1000 | [diff] [blame] | 2741 | spin_unlock(&iclog->ic_callback_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2742 | if (!(iclog->ic_state & XLOG_STATE_IOERROR)) |
| 2743 | iclog->ic_state = XLOG_STATE_DIRTY; |
| 2744 | |
| 2745 | /* |
| 2746 | * Transition from DIRTY to ACTIVE if applicable. |
| 2747 | * NOP if STATE_IOERROR. |
| 2748 | */ |
| 2749 | xlog_state_clean_log(log); |
| 2750 | |
| 2751 | /* wake up threads waiting in xfs_log_force() */ |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 2752 | wake_up_all(&iclog->ic_force_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2753 | |
| 2754 | iclog = iclog->ic_next; |
| 2755 | } while (first_iclog != iclog); |
Nathan Scott | a3c6685e | 2006-09-28 11:02:14 +1000 | [diff] [blame] | 2756 | |
| 2757 | if (repeats > 5000) { |
| 2758 | flushcnt += repeats; |
| 2759 | repeats = 0; |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 2760 | xfs_warn(log->l_mp, |
Nathan Scott | a3c6685e | 2006-09-28 11:02:14 +1000 | [diff] [blame] | 2761 | "%s: possible infinite loop (%d iterations)", |
Harvey Harrison | 34a622b | 2008-04-10 12:19:21 +1000 | [diff] [blame] | 2762 | __func__, flushcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2763 | } |
| 2764 | } while (!ioerrors && loopdidcallbacks); |
| 2765 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2766 | #ifdef DEBUG |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 2767 | /* |
| 2768 | * Make one last gasp attempt to see if iclogs are being left in limbo. |
| 2769 | * If the above loop finds an iclog earlier than the current iclog and |
| 2770 | * in one of the syncing states, the current iclog is put into |
| 2771 | * DO_CALLBACK and the callbacks are deferred to the completion of the |
| 2772 | * earlier iclog. Walk the iclogs in order and make sure that no iclog |
| 2773 | * is in DO_CALLBACK unless an earlier iclog is in one of the syncing |
| 2774 | * states. |
| 2775 | * |
| 2776 | * Note that SYNCING|IOABORT is a valid state so we cannot just check |
| 2777 | * for ic_state == SYNCING. |
| 2778 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2779 | if (funcdidcallbacks) { |
| 2780 | first_iclog = iclog = log->l_iclog; |
| 2781 | do { |
| 2782 | ASSERT(iclog->ic_state != XLOG_STATE_DO_CALLBACK); |
| 2783 | /* |
| 2784 | * Terminate the loop if iclogs are found in states |
| 2785 | * which will cause other threads to clean up iclogs. |
| 2786 | * |
| 2787 | * SYNCING - i/o completion will go through logs |
| 2788 | * DONE_SYNC - interrupt thread should be waiting for |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2789 | * l_icloglock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2790 | * IOERROR - give up hope all ye who enter here |
| 2791 | */ |
| 2792 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC || |
Brian Foster | 609adfc | 2016-01-05 07:41:16 +1100 | [diff] [blame] | 2793 | iclog->ic_state & XLOG_STATE_SYNCING || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2794 | iclog->ic_state == XLOG_STATE_DONE_SYNC || |
| 2795 | iclog->ic_state == XLOG_STATE_IOERROR ) |
| 2796 | break; |
| 2797 | iclog = iclog->ic_next; |
| 2798 | } while (first_iclog != iclog); |
| 2799 | } |
| 2800 | #endif |
| 2801 | |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2802 | if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) |
| 2803 | wake = 1; |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2804 | spin_unlock(&log->l_icloglock); |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2805 | |
| 2806 | if (wake) |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 2807 | wake_up_all(&log->l_flush_wait); |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2808 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2809 | |
| 2810 | |
| 2811 | /* |
| 2812 | * Finish transitioning this iclog to the dirty state. |
| 2813 | * |
| 2814 | * Make sure that we completely execute this routine only when this is |
| 2815 | * the last call to the iclog. There is a good chance that iclog flushes, |
| 2816 | * when we reach the end of the physical log, get turned into 2 separate |
| 2817 | * calls to bwrite. Hence, one iclog flush could generate two calls to this |
| 2818 | * routine. By using the reference count bwritecnt, we guarantee that only |
| 2819 | * the second completion goes through. |
| 2820 | * |
| 2821 | * Callbacks could take time, so they are done outside the scope of the |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 2822 | * global state machine log lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2823 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 2824 | STATIC void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2825 | xlog_state_done_syncing( |
| 2826 | xlog_in_core_t *iclog, |
| 2827 | int aborted) |
| 2828 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2829 | struct xlog *log = iclog->ic_log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2830 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2831 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2832 | |
| 2833 | ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || |
| 2834 | iclog->ic_state == XLOG_STATE_IOERROR); |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 2835 | ASSERT(atomic_read(&iclog->ic_refcnt) == 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2836 | ASSERT(iclog->ic_bwritecnt == 1 || iclog->ic_bwritecnt == 2); |
| 2837 | |
| 2838 | |
| 2839 | /* |
| 2840 | * If we got an error, either on the first buffer, or in the case of |
| 2841 | * split log writes, on the second, we mark ALL iclogs STATE_IOERROR, |
| 2842 | * and none should ever be attempted to be written to disk |
| 2843 | * again. |
| 2844 | */ |
| 2845 | if (iclog->ic_state != XLOG_STATE_IOERROR) { |
| 2846 | if (--iclog->ic_bwritecnt == 1) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2847 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2848 | return; |
| 2849 | } |
| 2850 | iclog->ic_state = XLOG_STATE_DONE_SYNC; |
| 2851 | } |
| 2852 | |
| 2853 | /* |
| 2854 | * Someone could be sleeping prior to writing out the next |
| 2855 | * iclog buffer, we wake them all, one will get to do the |
| 2856 | * I/O, the others get to wait for the result. |
| 2857 | */ |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 2858 | wake_up_all(&iclog->ic_write_wait); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2859 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2860 | xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ |
| 2861 | } /* xlog_state_done_syncing */ |
| 2862 | |
| 2863 | |
| 2864 | /* |
| 2865 | * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 2866 | * sleep. We wait on the flush queue on the head iclog as that should be |
| 2867 | * the first iclog to complete flushing. Hence if all iclogs are syncing, |
| 2868 | * we will wait here and all new writes will sleep until a sync completes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2869 | * |
| 2870 | * The in-core logs are used in a circular fashion. They are not used |
| 2871 | * out-of-order even when an iclog past the head is free. |
| 2872 | * |
| 2873 | * return: |
| 2874 | * * log_offset where xlog_write() can start writing into the in-core |
| 2875 | * log's data space. |
| 2876 | * * in-core log pointer to which xlog_write() should write. |
| 2877 | * * boolean indicating this is a continued write to an in-core log. |
| 2878 | * If this is the last write, then the in-core log's offset field |
| 2879 | * needs to be incremented, depending on the amount of data which |
| 2880 | * is copied. |
| 2881 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 2882 | STATIC int |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2883 | xlog_state_get_iclog_space( |
| 2884 | struct xlog *log, |
| 2885 | int len, |
| 2886 | struct xlog_in_core **iclogp, |
| 2887 | struct xlog_ticket *ticket, |
| 2888 | int *continued_write, |
| 2889 | int *logoffsetp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2890 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2891 | int log_offset; |
| 2892 | xlog_rec_header_t *head; |
| 2893 | xlog_in_core_t *iclog; |
| 2894 | int error; |
| 2895 | |
| 2896 | restart: |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2897 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2898 | if (XLOG_FORCED_SHUTDOWN(log)) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2899 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 2900 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2901 | } |
| 2902 | |
| 2903 | iclog = log->l_iclog; |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2904 | if (iclog->ic_state != XLOG_STATE_ACTIVE) { |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 2905 | XFS_STATS_INC(log->l_mp, xs_log_noiclogs); |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2906 | |
| 2907 | /* Wait for log writes to have flushed */ |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 2908 | xlog_wait(&log->l_flush_wait, &log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2909 | goto restart; |
| 2910 | } |
Matthew Wilcox | d748c62 | 2008-05-19 16:34:27 +1000 | [diff] [blame] | 2911 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2912 | head = &iclog->ic_header; |
| 2913 | |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 2914 | atomic_inc(&iclog->ic_refcnt); /* prevents sync */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2915 | log_offset = iclog->ic_offset; |
| 2916 | |
| 2917 | /* On the 1st write to an iclog, figure out lsn. This works |
| 2918 | * if iclogs marked XLOG_STATE_WANT_SYNC always write out what they are |
| 2919 | * committing to. If the offset is set, that's how many blocks |
| 2920 | * must be written. |
| 2921 | */ |
| 2922 | if (log_offset == 0) { |
| 2923 | ticket->t_curr_res -= log->l_iclog_hsize; |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 2924 | xlog_tic_add_region(ticket, |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 2925 | log->l_iclog_hsize, |
| 2926 | XLOG_REG_TYPE_LRHEADER); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 2927 | head->h_cycle = cpu_to_be32(log->l_curr_cycle); |
| 2928 | head->h_lsn = cpu_to_be64( |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 2929 | xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2930 | ASSERT(log->l_curr_block >= 0); |
| 2931 | } |
| 2932 | |
| 2933 | /* If there is enough room to write everything, then do it. Otherwise, |
| 2934 | * claim the rest of the region and make sure the XLOG_STATE_WANT_SYNC |
| 2935 | * bit is on, so this will get flushed out. Don't update ic_offset |
| 2936 | * until you know exactly how many bytes get copied. Therefore, wait |
| 2937 | * until later to update ic_offset. |
| 2938 | * |
| 2939 | * xlog_write() algorithm assumes that at least 2 xlog_op_header_t's |
| 2940 | * can fit into remaining data section. |
| 2941 | */ |
| 2942 | if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { |
| 2943 | xlog_state_switch_iclogs(log, iclog, iclog->ic_size); |
| 2944 | |
Dave Chinner | 49641f1 | 2008-07-11 17:43:55 +1000 | [diff] [blame] | 2945 | /* |
| 2946 | * If I'm the only one writing to this iclog, sync it to disk. |
| 2947 | * We need to do an atomic compare and decrement here to avoid |
| 2948 | * racing with concurrent atomic_dec_and_lock() calls in |
| 2949 | * xlog_state_release_iclog() when there is more than one |
| 2950 | * reference to the iclog. |
| 2951 | */ |
| 2952 | if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) { |
| 2953 | /* we are the only one */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2954 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 49641f1 | 2008-07-11 17:43:55 +1000 | [diff] [blame] | 2955 | error = xlog_state_release_iclog(log, iclog); |
| 2956 | if (error) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 2957 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2958 | } else { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2959 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2960 | } |
| 2961 | goto restart; |
| 2962 | } |
| 2963 | |
| 2964 | /* Do we have enough room to write the full amount in the remainder |
| 2965 | * of this iclog? Or must we continue a write on the next iclog and |
| 2966 | * mark this iclog as completely taken? In the case where we switch |
| 2967 | * iclogs (to mark it taken), this particular iclog will release/sync |
| 2968 | * to disk in xlog_write(). |
| 2969 | */ |
| 2970 | if (len <= iclog->ic_size - iclog->ic_offset) { |
| 2971 | *continued_write = 0; |
| 2972 | iclog->ic_offset += len; |
| 2973 | } else { |
| 2974 | *continued_write = 1; |
| 2975 | xlog_state_switch_iclogs(log, iclog, iclog->ic_size); |
| 2976 | } |
| 2977 | *iclogp = iclog; |
| 2978 | |
| 2979 | ASSERT(iclog->ic_offset <= iclog->ic_size); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 2980 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2981 | |
| 2982 | *logoffsetp = log_offset; |
| 2983 | return 0; |
| 2984 | } /* xlog_state_get_iclog_space */ |
| 2985 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2986 | /* The first cnt-1 times through here we don't need to |
| 2987 | * move the grant write head because the permanent |
| 2988 | * reservation has reserved cnt times the unit amount. |
| 2989 | * Release part of current permanent unit reservation and |
| 2990 | * reset current reservation to be one units worth. Also |
| 2991 | * move grant reservation head forward. |
| 2992 | */ |
| 2993 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 2994 | xlog_regrant_reserve_log_space( |
| 2995 | struct xlog *log, |
| 2996 | struct xlog_ticket *ticket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2997 | { |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 2998 | trace_xfs_log_regrant_reserve_enter(log, ticket); |
| 2999 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3000 | if (ticket->t_cnt > 0) |
| 3001 | ticket->t_cnt--; |
| 3002 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3003 | xlog_grant_sub_space(log, &log->l_reserve_head.grant, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3004 | ticket->t_curr_res); |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3005 | xlog_grant_sub_space(log, &log->l_write_head.grant, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3006 | ticket->t_curr_res); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3007 | ticket->t_curr_res = ticket->t_unit_res; |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 3008 | xlog_tic_reset_res(ticket); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3009 | |
| 3010 | trace_xfs_log_regrant_reserve_sub(log, ticket); |
| 3011 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3012 | /* just return if we still have some of the pre-reserved space */ |
Dave Chinner | d0eb2f3 | 2010-12-21 12:29:14 +1100 | [diff] [blame] | 3013 | if (ticket->t_cnt > 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3014 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3015 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3016 | xlog_grant_add_space(log, &log->l_reserve_head.grant, |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3017 | ticket->t_unit_res); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3018 | |
| 3019 | trace_xfs_log_regrant_reserve_exit(log, ticket); |
| 3020 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3021 | ticket->t_curr_res = ticket->t_unit_res; |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 3022 | xlog_tic_reset_res(ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3023 | } /* xlog_regrant_reserve_log_space */ |
| 3024 | |
| 3025 | |
| 3026 | /* |
| 3027 | * Give back the space left from a reservation. |
| 3028 | * |
| 3029 | * All the information we need to make a correct determination of space left |
| 3030 | * is present. For non-permanent reservations, things are quite easy. The |
| 3031 | * count should have been decremented to zero. We only need to deal with the |
| 3032 | * space remaining in the current reservation part of the ticket. If the |
| 3033 | * ticket contains a permanent reservation, there may be left over space which |
| 3034 | * needs to be released. A count of N means that N-1 refills of the current |
| 3035 | * reservation can be done before we need to ask for more space. The first |
| 3036 | * one goes to fill up the first current reservation. Once we run out of |
| 3037 | * space, the count will stay at zero and the only space remaining will be |
| 3038 | * in the current reservation field. |
| 3039 | */ |
| 3040 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3041 | xlog_ungrant_log_space( |
| 3042 | struct xlog *log, |
| 3043 | struct xlog_ticket *ticket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3044 | { |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3045 | int bytes; |
| 3046 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3047 | if (ticket->t_cnt > 0) |
| 3048 | ticket->t_cnt--; |
| 3049 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3050 | trace_xfs_log_ungrant_enter(log, ticket); |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3051 | trace_xfs_log_ungrant_sub(log, ticket); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3052 | |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3053 | /* |
| 3054 | * If this is a permanent reservation ticket, we may be able to free |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3055 | * up more space based on the remaining count. |
| 3056 | */ |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3057 | bytes = ticket->t_curr_res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3058 | if (ticket->t_cnt > 0) { |
| 3059 | ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3060 | bytes += ticket->t_unit_res*ticket->t_cnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3061 | } |
| 3062 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3063 | xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); |
| 3064 | xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); |
Dave Chinner | 663e496 | 2010-12-21 12:06:05 +1100 | [diff] [blame] | 3065 | |
Christoph Hellwig | 0b1b213 | 2009-12-14 23:14:59 +0000 | [diff] [blame] | 3066 | trace_xfs_log_ungrant_exit(log, ticket); |
| 3067 | |
Christoph Hellwig | cfb7cdc | 2012-02-20 02:31:23 +0000 | [diff] [blame] | 3068 | xfs_log_space_wake(log->l_mp); |
Christoph Hellwig | 09a423a | 2012-02-20 02:31:20 +0000 | [diff] [blame] | 3069 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3070 | |
| 3071 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3072 | * Flush iclog to disk if this is the last reference to the given iclog and |
| 3073 | * the WANT_SYNC bit is set. |
| 3074 | * |
| 3075 | * When this function is entered, the iclog is not necessarily in the |
| 3076 | * WANT_SYNC state. It may be sitting around waiting to get filled. |
| 3077 | * |
| 3078 | * |
| 3079 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 3080 | STATIC int |
David Chinner | b589334 | 2008-03-06 13:44:06 +1100 | [diff] [blame] | 3081 | xlog_state_release_iclog( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3082 | struct xlog *log, |
| 3083 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3084 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3085 | int sync = 0; /* do we sync? */ |
| 3086 | |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3087 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3088 | return -EIO; |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3089 | |
| 3090 | ASSERT(atomic_read(&iclog->ic_refcnt) > 0); |
| 3091 | if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) |
| 3092 | return 0; |
| 3093 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3094 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3095 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3096 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3097 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3098 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || |
| 3099 | iclog->ic_state == XLOG_STATE_WANT_SYNC); |
| 3100 | |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3101 | if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { |
David Chinner | b589334 | 2008-03-06 13:44:06 +1100 | [diff] [blame] | 3102 | /* update tail before writing to iclog */ |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 3103 | xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3104 | sync++; |
| 3105 | iclog->ic_state = XLOG_STATE_SYNCING; |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 3106 | iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); |
| 3107 | xlog_verify_tail_lsn(log, iclog, tail_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | /* cycle incremented when incrementing curr_block */ |
| 3109 | } |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3110 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3111 | |
| 3112 | /* |
| 3113 | * We let the log lock go, so it's possible that we hit a log I/O |
Nathan Scott | c41564b | 2006-03-29 08:55:14 +1000 | [diff] [blame] | 3114 | * error or some other SHUTDOWN condition that marks the iclog |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3115 | * as XLOG_STATE_IOERROR before the bwrite. However, we know that |
| 3116 | * this iclog has consistent data, so we ignore IOERROR |
| 3117 | * flags after this point. |
| 3118 | */ |
David Chinner | b589334 | 2008-03-06 13:44:06 +1100 | [diff] [blame] | 3119 | if (sync) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3120 | return xlog_sync(log, iclog); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3121 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3122 | } /* xlog_state_release_iclog */ |
| 3123 | |
| 3124 | |
| 3125 | /* |
| 3126 | * This routine will mark the current iclog in the ring as WANT_SYNC |
| 3127 | * and move the current iclog pointer to the next iclog in the ring. |
| 3128 | * When this routine is called from xlog_state_get_iclog_space(), the |
| 3129 | * exact size of the iclog has not yet been determined. All we know is |
| 3130 | * that every data block. We have run out of space in this log record. |
| 3131 | */ |
| 3132 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3133 | xlog_state_switch_iclogs( |
| 3134 | struct xlog *log, |
| 3135 | struct xlog_in_core *iclog, |
| 3136 | int eventual_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3137 | { |
| 3138 | ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); |
| 3139 | if (!eventual_size) |
| 3140 | eventual_size = iclog->ic_offset; |
| 3141 | iclog->ic_state = XLOG_STATE_WANT_SYNC; |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3142 | iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3143 | log->l_prev_block = log->l_curr_block; |
| 3144 | log->l_prev_cycle = log->l_curr_cycle; |
| 3145 | |
| 3146 | /* roll log?: ic_offset changed later */ |
| 3147 | log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); |
| 3148 | |
| 3149 | /* Round up to next log-sunit */ |
Eric Sandeen | 6211870 | 2008-03-06 13:44:28 +1100 | [diff] [blame] | 3150 | if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3151 | log->l_mp->m_sb.sb_logsunit > 1) { |
| 3152 | __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); |
| 3153 | log->l_curr_block = roundup(log->l_curr_block, sunit_bb); |
| 3154 | } |
| 3155 | |
| 3156 | if (log->l_curr_block >= log->l_logBBsize) { |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 3157 | /* |
| 3158 | * Rewind the current block before the cycle is bumped to make |
| 3159 | * sure that the combined LSN never transiently moves forward |
| 3160 | * when the log wraps to the next cycle. This is to support the |
| 3161 | * unlocked sample of these fields from xlog_valid_lsn(). Most |
| 3162 | * other cases should acquire l_icloglock. |
| 3163 | */ |
| 3164 | log->l_curr_block -= log->l_logBBsize; |
| 3165 | ASSERT(log->l_curr_block >= 0); |
| 3166 | smp_wmb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3167 | log->l_curr_cycle++; |
| 3168 | if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) |
| 3169 | log->l_curr_cycle++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3170 | } |
| 3171 | ASSERT(iclog == log->l_iclog); |
| 3172 | log->l_iclog = iclog->ic_next; |
| 3173 | } /* xlog_state_switch_iclogs */ |
| 3174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3175 | /* |
| 3176 | * Write out all data in the in-core log as of this exact moment in time. |
| 3177 | * |
| 3178 | * Data may be written to the in-core log during this call. However, |
| 3179 | * we don't guarantee this data will be written out. A change from past |
| 3180 | * implementation means this routine will *not* write out zero length LRs. |
| 3181 | * |
| 3182 | * Basically, we try and perform an intelligent scan of the in-core logs. |
| 3183 | * If we determine there is no flushable data, we just return. There is no |
| 3184 | * flushable data if: |
| 3185 | * |
| 3186 | * 1. the current iclog is active and has no data; the previous iclog |
| 3187 | * is in the active or dirty state. |
| 3188 | * 2. the current iclog is drity, and the previous iclog is in the |
| 3189 | * active or dirty state. |
| 3190 | * |
David Chinner | 12017fa | 2008-08-13 16:34:31 +1000 | [diff] [blame] | 3191 | * We may sleep if: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3192 | * |
| 3193 | * 1. the current iclog is not in the active nor dirty state. |
| 3194 | * 2. the current iclog dirty, and the previous iclog is not in the |
| 3195 | * active nor dirty state. |
| 3196 | * 3. the current iclog is active, and there is another thread writing |
| 3197 | * to this particular iclog. |
| 3198 | * 4. a) the current iclog is active and has no other writers |
| 3199 | * b) when we return from flushing out this iclog, it is still |
| 3200 | * not in the active nor dirty state. |
| 3201 | */ |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3202 | int |
| 3203 | _xfs_log_force( |
| 3204 | struct xfs_mount *mp, |
| 3205 | uint flags, |
| 3206 | int *log_flushed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3207 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3208 | struct xlog *log = mp->m_log; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3209 | struct xlog_in_core *iclog; |
| 3210 | xfs_lsn_t lsn; |
| 3211 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3212 | XFS_STATS_INC(mp, xs_log_force); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3213 | |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 3214 | xlog_cil_force(log); |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 3215 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3216 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3217 | |
| 3218 | iclog = log->l_iclog; |
| 3219 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3220 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3221 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3222 | } |
| 3223 | |
| 3224 | /* If the head iclog is not active nor dirty, we just attach |
| 3225 | * ourselves to the head and go to sleep. |
| 3226 | */ |
| 3227 | if (iclog->ic_state == XLOG_STATE_ACTIVE || |
| 3228 | iclog->ic_state == XLOG_STATE_DIRTY) { |
| 3229 | /* |
| 3230 | * If the head is dirty or (active and empty), then |
| 3231 | * we need to look at the previous iclog. If the previous |
| 3232 | * iclog is active or dirty we are done. There is nothing |
| 3233 | * to sync out. Otherwise, we attach ourselves to the |
| 3234 | * previous iclog and go to sleep. |
| 3235 | */ |
| 3236 | if (iclog->ic_state == XLOG_STATE_DIRTY || |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3237 | (atomic_read(&iclog->ic_refcnt) == 0 |
| 3238 | && iclog->ic_offset == 0)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3239 | iclog = iclog->ic_prev; |
| 3240 | if (iclog->ic_state == XLOG_STATE_ACTIVE || |
| 3241 | iclog->ic_state == XLOG_STATE_DIRTY) |
| 3242 | goto no_sleep; |
| 3243 | else |
| 3244 | goto maybe_sleep; |
| 3245 | } else { |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3246 | if (atomic_read(&iclog->ic_refcnt) == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3247 | /* We are the only one with access to this |
| 3248 | * iclog. Flush it out now. There should |
| 3249 | * be a roundoff of zero to show that someone |
| 3250 | * has already taken care of the roundoff from |
| 3251 | * the previous sync. |
| 3252 | */ |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3253 | atomic_inc(&iclog->ic_refcnt); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3254 | lsn = be64_to_cpu(iclog->ic_header.h_lsn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3255 | xlog_state_switch_iclogs(log, iclog, 0); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3256 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3257 | |
| 3258 | if (xlog_state_release_iclog(log, iclog)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3259 | return -EIO; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3260 | |
| 3261 | if (log_flushed) |
| 3262 | *log_flushed = 1; |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3263 | spin_lock(&log->l_icloglock); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3264 | if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3265 | iclog->ic_state != XLOG_STATE_DIRTY) |
| 3266 | goto maybe_sleep; |
| 3267 | else |
| 3268 | goto no_sleep; |
| 3269 | } else { |
| 3270 | /* Someone else is writing to this iclog. |
| 3271 | * Use its call to flush out the data. However, |
| 3272 | * the other thread may not force out this LR, |
| 3273 | * so we mark it WANT_SYNC. |
| 3274 | */ |
| 3275 | xlog_state_switch_iclogs(log, iclog, 0); |
| 3276 | goto maybe_sleep; |
| 3277 | } |
| 3278 | } |
| 3279 | } |
| 3280 | |
| 3281 | /* By the time we come around again, the iclog could've been filled |
| 3282 | * which would give it another lsn. If we have a new lsn, just |
| 3283 | * return because the relevant data has been flushed. |
| 3284 | */ |
| 3285 | maybe_sleep: |
| 3286 | if (flags & XFS_LOG_SYNC) { |
| 3287 | /* |
| 3288 | * We must check if we're shutting down here, before |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3289 | * we wait, while we're holding the l_icloglock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3290 | * Then we check again after waking up, in case our |
| 3291 | * sleep was disturbed by a bad news. |
| 3292 | */ |
| 3293 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3294 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3295 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3296 | } |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3297 | XFS_STATS_INC(mp, xs_log_force_sleep); |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 3298 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3299 | /* |
| 3300 | * No need to grab the log lock here since we're |
| 3301 | * only deciding whether or not to return EIO |
| 3302 | * and the memory read should be atomic. |
| 3303 | */ |
| 3304 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3305 | return -EIO; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3306 | if (log_flushed) |
| 3307 | *log_flushed = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3308 | } else { |
| 3309 | |
| 3310 | no_sleep: |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3311 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3312 | } |
| 3313 | return 0; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3314 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3315 | |
| 3316 | /* |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3317 | * Wrapper for _xfs_log_force(), to be used when caller doesn't care |
| 3318 | * about errors or whether the log was flushed or not. This is the normal |
| 3319 | * interface to use when trying to unpin items or move the log forward. |
| 3320 | */ |
| 3321 | void |
| 3322 | xfs_log_force( |
| 3323 | xfs_mount_t *mp, |
| 3324 | uint flags) |
| 3325 | { |
| 3326 | int error; |
| 3327 | |
Carlos Maiolino | 9f27889 | 2016-04-06 09:46:30 +1000 | [diff] [blame] | 3328 | trace_xfs_log_force(mp, 0, _RET_IP_); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3329 | error = _xfs_log_force(mp, flags, NULL); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3330 | if (error) |
| 3331 | xfs_warn(mp, "%s: error %d returned.", __func__, error); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3332 | } |
| 3333 | |
| 3334 | /* |
| 3335 | * Force the in-core log to disk for a specific LSN. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3336 | * |
| 3337 | * Find in-core log with lsn. |
| 3338 | * If it is in the DIRTY state, just return. |
| 3339 | * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC |
| 3340 | * state and go to sleep or return. |
| 3341 | * If it is in any other state, go to sleep or return. |
| 3342 | * |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3343 | * Synchronous forces are implemented with a signal variable. All callers |
| 3344 | * to force a given lsn to disk will wait on a the sv attached to the |
| 3345 | * specific in-core log. When given in-core log finally completes its |
| 3346 | * write to disk, that thread will wake up all threads waiting on the |
| 3347 | * sv. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3348 | */ |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3349 | int |
| 3350 | _xfs_log_force_lsn( |
| 3351 | struct xfs_mount *mp, |
| 3352 | xfs_lsn_t lsn, |
| 3353 | uint flags, |
| 3354 | int *log_flushed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3355 | { |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3356 | struct xlog *log = mp->m_log; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3357 | struct xlog_in_core *iclog; |
| 3358 | int already_slept = 0; |
| 3359 | |
| 3360 | ASSERT(lsn != 0); |
| 3361 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3362 | XFS_STATS_INC(mp, xs_log_force); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3363 | |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 3364 | lsn = xlog_cil_force_lsn(log, lsn); |
| 3365 | if (lsn == NULLCOMMITLSN) |
| 3366 | return 0; |
Dave Chinner | 71e330b | 2010-05-21 14:37:18 +1000 | [diff] [blame] | 3367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3368 | try_again: |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3369 | spin_lock(&log->l_icloglock); |
| 3370 | iclog = log->l_iclog; |
| 3371 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3372 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3373 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3374 | } |
| 3375 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3376 | do { |
| 3377 | if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { |
| 3378 | iclog = iclog->ic_next; |
| 3379 | continue; |
| 3380 | } |
| 3381 | |
| 3382 | if (iclog->ic_state == XLOG_STATE_DIRTY) { |
| 3383 | spin_unlock(&log->l_icloglock); |
| 3384 | return 0; |
| 3385 | } |
| 3386 | |
| 3387 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
| 3388 | /* |
| 3389 | * We sleep here if we haven't already slept (e.g. |
| 3390 | * this is the first time we've looked at the correct |
| 3391 | * iclog buf) and the buffer before us is going to |
| 3392 | * be sync'ed. The reason for this is that if we |
| 3393 | * are doing sync transactions here, by waiting for |
| 3394 | * the previous I/O to complete, we can allow a few |
| 3395 | * more transactions into this iclog before we close |
| 3396 | * it down. |
| 3397 | * |
| 3398 | * Otherwise, we mark the buffer WANT_SYNC, and bump |
| 3399 | * up the refcnt so we can release the log (which |
| 3400 | * drops the ref count). The state switch keeps new |
| 3401 | * transaction commits from using this buffer. When |
| 3402 | * the current commits finish writing into the buffer, |
| 3403 | * the refcount will drop to zero and the buffer will |
| 3404 | * go out then. |
| 3405 | */ |
| 3406 | if (!already_slept && |
| 3407 | (iclog->ic_prev->ic_state & |
| 3408 | (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) { |
| 3409 | ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR)); |
| 3410 | |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3411 | XFS_STATS_INC(mp, xs_log_force_sleep); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3412 | |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 3413 | xlog_wait(&iclog->ic_prev->ic_write_wait, |
| 3414 | &log->l_icloglock); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3415 | if (log_flushed) |
| 3416 | *log_flushed = 1; |
| 3417 | already_slept = 1; |
| 3418 | goto try_again; |
| 3419 | } |
David Chinner | 155cc6b | 2008-03-06 13:44:14 +1100 | [diff] [blame] | 3420 | atomic_inc(&iclog->ic_refcnt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3421 | xlog_state_switch_iclogs(log, iclog, 0); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3422 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3423 | if (xlog_state_release_iclog(log, iclog)) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3424 | return -EIO; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3425 | if (log_flushed) |
| 3426 | *log_flushed = 1; |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3427 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3428 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3429 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3430 | if ((flags & XFS_LOG_SYNC) && /* sleep */ |
| 3431 | !(iclog->ic_state & |
| 3432 | (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) { |
| 3433 | /* |
| 3434 | * Don't wait on completion if we know that we've |
| 3435 | * gotten a log write error. |
| 3436 | */ |
| 3437 | if (iclog->ic_state & XLOG_STATE_IOERROR) { |
| 3438 | spin_unlock(&log->l_icloglock); |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3439 | return -EIO; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3440 | } |
Bill O'Donnell | ff6d6af | 2015-10-12 18:21:22 +1100 | [diff] [blame] | 3441 | XFS_STATS_INC(mp, xs_log_force_sleep); |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 3442 | xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3443 | /* |
| 3444 | * No need to grab the log lock here since we're |
| 3445 | * only deciding whether or not to return EIO |
| 3446 | * and the memory read should be atomic. |
| 3447 | */ |
| 3448 | if (iclog->ic_state & XLOG_STATE_IOERROR) |
Dave Chinner | 2451337 | 2014-06-25 14:58:08 +1000 | [diff] [blame] | 3449 | return -EIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3450 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3451 | if (log_flushed) |
| 3452 | *log_flushed = 1; |
| 3453 | } else { /* just return */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3454 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3455 | } |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3456 | |
| 3457 | return 0; |
| 3458 | } while (iclog != log->l_iclog); |
| 3459 | |
| 3460 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3461 | return 0; |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3462 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3463 | |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3464 | /* |
| 3465 | * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care |
| 3466 | * about errors or whether the log was flushed or not. This is the normal |
| 3467 | * interface to use when trying to unpin items or move the log forward. |
| 3468 | */ |
| 3469 | void |
| 3470 | xfs_log_force_lsn( |
| 3471 | xfs_mount_t *mp, |
| 3472 | xfs_lsn_t lsn, |
| 3473 | uint flags) |
| 3474 | { |
| 3475 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3476 | |
Carlos Maiolino | 9f27889 | 2016-04-06 09:46:30 +1000 | [diff] [blame] | 3477 | trace_xfs_log_force(mp, lsn, _RET_IP_); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3478 | error = _xfs_log_force_lsn(mp, lsn, flags, NULL); |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3479 | if (error) |
| 3480 | xfs_warn(mp, "%s: error %d returned.", __func__, error); |
Christoph Hellwig | a14a348 | 2010-01-19 09:56:46 +0000 | [diff] [blame] | 3481 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3482 | |
| 3483 | /* |
| 3484 | * Called when we want to mark the current iclog as being ready to sync to |
| 3485 | * disk. |
| 3486 | */ |
David Chinner | a8272ce | 2007-11-23 16:28:09 +1100 | [diff] [blame] | 3487 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3488 | xlog_state_want_sync( |
| 3489 | struct xlog *log, |
| 3490 | struct xlog_in_core *iclog) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3491 | { |
Christoph Hellwig | a8914f3 | 2009-08-10 11:32:44 -0300 | [diff] [blame] | 3492 | assert_spin_locked(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3493 | |
| 3494 | if (iclog->ic_state == XLOG_STATE_ACTIVE) { |
| 3495 | xlog_state_switch_iclogs(log, iclog, 0); |
| 3496 | } else { |
| 3497 | ASSERT(iclog->ic_state & |
| 3498 | (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); |
| 3499 | } |
Christoph Hellwig | 39e2def | 2008-12-03 12:20:28 +0100 | [diff] [blame] | 3500 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3501 | |
| 3502 | |
| 3503 | /***************************************************************************** |
| 3504 | * |
| 3505 | * TICKET functions |
| 3506 | * |
| 3507 | ***************************************************************************** |
| 3508 | */ |
| 3509 | |
| 3510 | /* |
Malcolm Parsons | 9da096f | 2009-03-29 09:55:42 +0200 | [diff] [blame] | 3511 | * Free a used ticket when its refcount falls to zero. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3512 | */ |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3513 | void |
| 3514 | xfs_log_ticket_put( |
| 3515 | xlog_ticket_t *ticket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3516 | { |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3517 | ASSERT(atomic_read(&ticket->t_ref) > 0); |
Dave Chinner | eb40a87 | 2010-12-21 12:09:01 +1100 | [diff] [blame] | 3518 | if (atomic_dec_and_test(&ticket->t_ref)) |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3519 | kmem_zone_free(xfs_log_ticket_zone, ticket); |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3520 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3521 | |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3522 | xlog_ticket_t * |
| 3523 | xfs_log_ticket_get( |
| 3524 | xlog_ticket_t *ticket) |
| 3525 | { |
| 3526 | ASSERT(atomic_read(&ticket->t_ref) > 0); |
| 3527 | atomic_inc(&ticket->t_ref); |
| 3528 | return ticket; |
| 3529 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3530 | |
| 3531 | /* |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3532 | * Figure out the total log space unit (in bytes) that would be |
| 3533 | * required for a log ticket. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3534 | */ |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3535 | int |
| 3536 | xfs_log_calc_unit_res( |
| 3537 | struct xfs_mount *mp, |
| 3538 | int unit_bytes) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3539 | { |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3540 | struct xlog *log = mp->m_log; |
| 3541 | int iclog_space; |
| 3542 | uint num_headers; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3543 | |
| 3544 | /* |
| 3545 | * Permanent reservations have up to 'cnt'-1 active log operations |
| 3546 | * in the log. A unit in this case is the amount of space for one |
| 3547 | * of these log operations. Normal reservations have a cnt of 1 |
| 3548 | * and their unit amount is the total amount of space required. |
| 3549 | * |
| 3550 | * The following lines of code account for non-transaction data |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3551 | * which occupy space in the on-disk log. |
| 3552 | * |
| 3553 | * Normal form of a transaction is: |
| 3554 | * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph> |
| 3555 | * and then there are LR hdrs, split-recs and roundoff at end of syncs. |
| 3556 | * |
| 3557 | * We need to account for all the leadup data and trailer data |
| 3558 | * around the transaction data. |
| 3559 | * And then we need to account for the worst case in terms of using |
| 3560 | * more space. |
| 3561 | * The worst case will happen if: |
| 3562 | * - the placement of the transaction happens to be such that the |
| 3563 | * roundoff is at its maximum |
| 3564 | * - the transaction data is synced before the commit record is synced |
| 3565 | * i.e. <transaction-data><roundoff> | <commit-rec><roundoff> |
| 3566 | * Therefore the commit record is in its own Log Record. |
| 3567 | * This can happen as the commit record is called with its |
| 3568 | * own region to xlog_write(). |
| 3569 | * This then means that in the worst case, roundoff can happen for |
| 3570 | * the commit-rec as well. |
| 3571 | * The commit-rec is smaller than padding in this scenario and so it is |
| 3572 | * not added separately. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3573 | */ |
| 3574 | |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3575 | /* for trans header */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3576 | unit_bytes += sizeof(xlog_op_header_t); |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3577 | unit_bytes += sizeof(xfs_trans_header_t); |
| 3578 | |
| 3579 | /* for start-rec */ |
| 3580 | unit_bytes += sizeof(xlog_op_header_t); |
| 3581 | |
Dave Chinner | 9b9fc2b7 | 2010-03-23 11:21:11 +1100 | [diff] [blame] | 3582 | /* |
| 3583 | * for LR headers - the space for data in an iclog is the size minus |
| 3584 | * the space used for the headers. If we use the iclog size, then we |
| 3585 | * undercalculate the number of headers required. |
| 3586 | * |
| 3587 | * Furthermore - the addition of op headers for split-recs might |
| 3588 | * increase the space required enough to require more log and op |
| 3589 | * headers, so take that into account too. |
| 3590 | * |
| 3591 | * IMPORTANT: This reservation makes the assumption that if this |
| 3592 | * transaction is the first in an iclog and hence has the LR headers |
| 3593 | * accounted to it, then the remaining space in the iclog is |
| 3594 | * exclusively for this transaction. i.e. if the transaction is larger |
| 3595 | * than the iclog, it will be the only thing in that iclog. |
| 3596 | * Fundamentally, this means we must pass the entire log vector to |
| 3597 | * xlog_write to guarantee this. |
| 3598 | */ |
| 3599 | iclog_space = log->l_iclog_size - log->l_iclog_hsize; |
| 3600 | num_headers = howmany(unit_bytes, iclog_space); |
| 3601 | |
| 3602 | /* for split-recs - ophdrs added when data split over LRs */ |
| 3603 | unit_bytes += sizeof(xlog_op_header_t) * num_headers; |
| 3604 | |
| 3605 | /* add extra header reservations if we overrun */ |
| 3606 | while (!num_headers || |
| 3607 | howmany(unit_bytes, iclog_space) > num_headers) { |
| 3608 | unit_bytes += sizeof(xlog_op_header_t); |
| 3609 | num_headers++; |
| 3610 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3611 | unit_bytes += log->l_iclog_hsize * num_headers; |
| 3612 | |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3613 | /* for commit-rec LR header - note: padding will subsume the ophdr */ |
| 3614 | unit_bytes += log->l_iclog_hsize; |
| 3615 | |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3616 | /* for roundoff padding for transaction data and one for commit record */ |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3617 | if (xfs_sb_version_haslogv2(&mp->m_sb) && mp->m_sb.sb_logsunit > 1) { |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3618 | /* log su roundoff */ |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3619 | unit_bytes += 2 * mp->m_sb.sb_logsunit; |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3620 | } else { |
| 3621 | /* BB roundoff */ |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3622 | unit_bytes += 2 * BBSIZE; |
Tim Shimmin | 32fb9b5 | 2005-09-02 16:41:43 +1000 | [diff] [blame] | 3623 | } |
| 3624 | |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3625 | return unit_bytes; |
| 3626 | } |
| 3627 | |
| 3628 | /* |
| 3629 | * Allocate and initialise a new log ticket. |
| 3630 | */ |
| 3631 | struct xlog_ticket * |
| 3632 | xlog_ticket_alloc( |
| 3633 | struct xlog *log, |
| 3634 | int unit_bytes, |
| 3635 | int cnt, |
| 3636 | char client, |
| 3637 | bool permanent, |
| 3638 | xfs_km_flags_t alloc_flags) |
| 3639 | { |
| 3640 | struct xlog_ticket *tic; |
| 3641 | int unit_res; |
| 3642 | |
| 3643 | tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); |
| 3644 | if (!tic) |
| 3645 | return NULL; |
| 3646 | |
| 3647 | unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); |
| 3648 | |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3649 | atomic_set(&tic->t_ref, 1); |
Christoph Hellwig | 14a7235f | 2012-02-20 02:31:24 +0000 | [diff] [blame] | 3650 | tic->t_task = current; |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 3651 | INIT_LIST_HEAD(&tic->t_queue); |
Jie Liu | e773fc9 | 2013-08-12 20:50:01 +1000 | [diff] [blame] | 3652 | tic->t_unit_res = unit_res; |
| 3653 | tic->t_curr_res = unit_res; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3654 | tic->t_cnt = cnt; |
| 3655 | tic->t_ocnt = cnt; |
Akinobu Mita | ecb3403 | 2013-03-04 21:58:20 +0900 | [diff] [blame] | 3656 | tic->t_tid = prandom_u32(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3657 | tic->t_clientid = client; |
| 3658 | tic->t_flags = XLOG_TIC_INITED; |
Christoph Hellwig | 9006fb9 | 2012-02-20 02:31:31 +0000 | [diff] [blame] | 3659 | if (permanent) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3660 | tic->t_flags |= XLOG_TIC_PERM_RESERV; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3661 | |
Christoph Hellwig | 0adba53 | 2007-08-30 17:21:46 +1000 | [diff] [blame] | 3662 | xlog_tic_reset_res(tic); |
Tim Shimmin | 7e9c639 | 2005-09-02 16:42:05 +1000 | [diff] [blame] | 3663 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3664 | return tic; |
Dave Chinner | cc09c0d | 2008-11-17 17:37:10 +1100 | [diff] [blame] | 3665 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3666 | |
| 3667 | |
| 3668 | /****************************************************************************** |
| 3669 | * |
| 3670 | * Log debug routines |
| 3671 | * |
| 3672 | ****************************************************************************** |
| 3673 | */ |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 3674 | #if defined(DEBUG) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3675 | /* |
| 3676 | * Make sure that the destination ptr is within the valid data region of |
| 3677 | * one of the iclogs. This uses backup pointers stored in a different |
| 3678 | * part of the log in case we trash the log structure. |
| 3679 | */ |
| 3680 | void |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3681 | xlog_verify_dest_ptr( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3682 | struct xlog *log, |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3683 | void *ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3684 | { |
| 3685 | int i; |
| 3686 | int good_ptr = 0; |
| 3687 | |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3688 | for (i = 0; i < log->l_iclog_bufs; i++) { |
| 3689 | if (ptr >= log->l_iclog_bak[i] && |
| 3690 | ptr <= log->l_iclog_bak[i] + log->l_iclog_size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3691 | good_ptr++; |
| 3692 | } |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3693 | |
| 3694 | if (!good_ptr) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3695 | xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); |
Christoph Hellwig | e6b1f27 | 2010-03-23 11:47:38 +1100 | [diff] [blame] | 3696 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3697 | |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3698 | /* |
| 3699 | * Check to make sure the grant write head didn't just over lap the tail. If |
| 3700 | * the cycles are the same, we can't be overlapping. Otherwise, make sure that |
| 3701 | * the cycles differ by exactly one and check the byte count. |
| 3702 | * |
| 3703 | * This check is run unlocked, so can give false positives. Rather than assert |
| 3704 | * on failures, use a warn-once flag and a panic tag to allow the admin to |
| 3705 | * determine if they want to panic the machine when such an error occurs. For |
| 3706 | * debug kernels this will have the same effect as using an assert but, unlinke |
| 3707 | * an assert, it can be turned off at runtime. |
| 3708 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3709 | STATIC void |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3710 | xlog_verify_grant_tail( |
Mark Tinguely | ad223e6 | 2012-06-14 09:22:15 -0500 | [diff] [blame] | 3711 | struct xlog *log) |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3712 | { |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 3713 | int tail_cycle, tail_blocks; |
Dave Chinner | a69ed03 | 2010-12-21 12:08:20 +1100 | [diff] [blame] | 3714 | int cycle, space; |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3715 | |
Christoph Hellwig | 2849696 | 2012-02-20 02:31:25 +0000 | [diff] [blame] | 3716 | xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); |
Dave Chinner | 1c3cb9e | 2010-12-21 12:28:39 +1100 | [diff] [blame] | 3717 | xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); |
| 3718 | if (tail_cycle != cycle) { |
Dave Chinner | da8a1a4 | 2011-04-08 12:45:07 +1000 | [diff] [blame] | 3719 | if (cycle - 1 != tail_cycle && |
| 3720 | !(log->l_flags & XLOG_TAIL_WARN)) { |
| 3721 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, |
| 3722 | "%s: cycle - 1 != tail_cycle", __func__); |
| 3723 | log->l_flags |= XLOG_TAIL_WARN; |
| 3724 | } |
| 3725 | |
| 3726 | if (space > BBTOB(tail_blocks) && |
| 3727 | !(log->l_flags & XLOG_TAIL_WARN)) { |
| 3728 | xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, |
| 3729 | "%s: space > BBTOB(tail_blocks)", __func__); |
| 3730 | log->l_flags |= XLOG_TAIL_WARN; |
| 3731 | } |
Dave Chinner | 3f336c6 | 2010-12-21 12:02:52 +1100 | [diff] [blame] | 3732 | } |
| 3733 | } |
| 3734 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3735 | /* check if it will fit */ |
| 3736 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3737 | xlog_verify_tail_lsn( |
| 3738 | struct xlog *log, |
| 3739 | struct xlog_in_core *iclog, |
| 3740 | xfs_lsn_t tail_lsn) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3741 | { |
| 3742 | int blocks; |
| 3743 | |
| 3744 | if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { |
| 3745 | blocks = |
| 3746 | log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); |
| 3747 | if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3748 | xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3749 | } else { |
| 3750 | ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); |
| 3751 | |
| 3752 | if (BLOCK_LSN(tail_lsn) == log->l_prev_block) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3753 | xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3754 | |
| 3755 | blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; |
| 3756 | if (blocks < BTOBB(iclog->ic_offset) + 1) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3757 | xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3758 | } |
| 3759 | } /* xlog_verify_tail_lsn */ |
| 3760 | |
| 3761 | /* |
| 3762 | * Perform a number of checks on the iclog before writing to disk. |
| 3763 | * |
| 3764 | * 1. Make sure the iclogs are still circular |
| 3765 | * 2. Make sure we have a good magic number |
| 3766 | * 3. Make sure we don't have magic numbers in the data |
| 3767 | * 4. Check fields of each log operation header for: |
| 3768 | * A. Valid client identifier |
| 3769 | * B. tid ptr value falls in valid ptr space (user space code) |
| 3770 | * C. Length in log record header is correct according to the |
| 3771 | * individual operation headers within record. |
| 3772 | * 5. When a bwrite will occur within 5 blocks of the front of the physical |
| 3773 | * log, check the preceding blocks of the physical log to make sure all |
| 3774 | * the cycle numbers agree with the current cycle number. |
| 3775 | */ |
| 3776 | STATIC void |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3777 | xlog_verify_iclog( |
| 3778 | struct xlog *log, |
| 3779 | struct xlog_in_core *iclog, |
| 3780 | int count, |
Thiago Farina | 667a929 | 2012-11-12 21:32:59 -0200 | [diff] [blame] | 3781 | bool syncing) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3782 | { |
| 3783 | xlog_op_header_t *ophead; |
| 3784 | xlog_in_core_t *icptr; |
| 3785 | xlog_in_core_2_t *xhdr; |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3786 | void *base_ptr, *ptr, *p; |
Christoph Hellwig | db9d67d | 2015-06-22 09:43:32 +1000 | [diff] [blame] | 3787 | ptrdiff_t field_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3788 | __uint8_t clientid; |
| 3789 | int len, i, j, k, op_len; |
| 3790 | int idx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3791 | |
| 3792 | /* check validity of iclog pointers */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3793 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3794 | icptr = log->l_iclog; |
Geyslan G. Bem | 643f7c4 | 2013-10-30 16:01:00 -0500 | [diff] [blame] | 3795 | for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) |
| 3796 | ASSERT(icptr); |
| 3797 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3798 | if (icptr != log->l_iclog) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3799 | xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3800 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3801 | |
| 3802 | /* check log magic numbers */ |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 3803 | if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3804 | xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3805 | |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3806 | base_ptr = ptr = &iclog->ic_header; |
| 3807 | p = &iclog->ic_header; |
| 3808 | for (ptr += BBSIZE; ptr < base_ptr + count; ptr += BBSIZE) { |
Christoph Hellwig | 69ef921 | 2011-07-08 14:36:05 +0200 | [diff] [blame] | 3809 | if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3810 | xfs_emerg(log->l_mp, "%s: unexpected magic num", |
| 3811 | __func__); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3812 | } |
| 3813 | |
| 3814 | /* check fields */ |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3815 | len = be32_to_cpu(iclog->ic_header.h_num_logops); |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3816 | base_ptr = ptr = iclog->ic_datap; |
| 3817 | ophead = ptr; |
Christoph Hellwig | b28708d | 2008-11-28 14:23:38 +1100 | [diff] [blame] | 3818 | xhdr = iclog->ic_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3819 | for (i = 0; i < len; i++) { |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3820 | ophead = ptr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3821 | |
| 3822 | /* clientid is only 1 byte */ |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3823 | p = &ophead->oh_clientid; |
| 3824 | field_offset = p - base_ptr; |
Thiago Farina | 667a929 | 2012-11-12 21:32:59 -0200 | [diff] [blame] | 3825 | if (!syncing || (field_offset & 0x1ff)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3826 | clientid = ophead->oh_clientid; |
| 3827 | } else { |
Christoph Hellwig | b2a922c | 2015-06-22 09:45:10 +1000 | [diff] [blame] | 3828 | idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3829 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
| 3830 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 3831 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 3832 | clientid = xlog_get_client_id( |
| 3833 | xhdr[j].hic_xheader.xh_cycle_data[k]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3834 | } else { |
Christoph Hellwig | 03bea6f | 2007-10-12 10:58:05 +1000 | [diff] [blame] | 3835 | clientid = xlog_get_client_id( |
| 3836 | iclog->ic_header.h_cycle_data[idx]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3837 | } |
| 3838 | } |
| 3839 | if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) |
Dave Chinner | a0fa2b6 | 2011-03-07 10:01:35 +1100 | [diff] [blame] | 3840 | xfs_warn(log->l_mp, |
| 3841 | "%s: invalid clientid %d op 0x%p offset 0x%lx", |
| 3842 | __func__, clientid, ophead, |
| 3843 | (unsigned long)field_offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3844 | |
| 3845 | /* check length */ |
Christoph Hellwig | 5809d5e | 2015-06-22 09:44:47 +1000 | [diff] [blame] | 3846 | p = &ophead->oh_len; |
| 3847 | field_offset = p - base_ptr; |
Thiago Farina | 667a929 | 2012-11-12 21:32:59 -0200 | [diff] [blame] | 3848 | if (!syncing || (field_offset & 0x1ff)) { |
Christoph Hellwig | 67fcb7b | 2007-10-12 10:58:59 +1000 | [diff] [blame] | 3849 | op_len = be32_to_cpu(ophead->oh_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3850 | } else { |
Christoph Hellwig | db9d67d | 2015-06-22 09:43:32 +1000 | [diff] [blame] | 3851 | idx = BTOBBT((uintptr_t)&ophead->oh_len - |
| 3852 | (uintptr_t)iclog->ic_datap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3853 | if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { |
| 3854 | j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
| 3855 | k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3856 | op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3857 | } else { |
Christoph Hellwig | b53e675 | 2007-10-12 10:59:34 +1000 | [diff] [blame] | 3858 | op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3859 | } |
| 3860 | } |
| 3861 | ptr += sizeof(xlog_op_header_t) + op_len; |
| 3862 | } |
| 3863 | } /* xlog_verify_iclog */ |
Nathan Scott | cfcbbbd | 2005-11-02 15:12:04 +1100 | [diff] [blame] | 3864 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3865 | |
| 3866 | /* |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3867 | * Mark all iclogs IOERROR. l_icloglock is held by the caller. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3868 | */ |
| 3869 | STATIC int |
| 3870 | xlog_state_ioerror( |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3871 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3872 | { |
| 3873 | xlog_in_core_t *iclog, *ic; |
| 3874 | |
| 3875 | iclog = log->l_iclog; |
| 3876 | if (! (iclog->ic_state & XLOG_STATE_IOERROR)) { |
| 3877 | /* |
| 3878 | * Mark all the incore logs IOERROR. |
| 3879 | * From now on, no log flushes will result. |
| 3880 | */ |
| 3881 | ic = iclog; |
| 3882 | do { |
| 3883 | ic->ic_state = XLOG_STATE_IOERROR; |
| 3884 | ic = ic->ic_next; |
| 3885 | } while (ic != iclog); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3886 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3887 | } |
| 3888 | /* |
| 3889 | * Return non-zero, if state transition has already happened. |
| 3890 | */ |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3891 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3892 | } |
| 3893 | |
| 3894 | /* |
| 3895 | * This is called from xfs_force_shutdown, when we're forcibly |
| 3896 | * shutting down the filesystem, typically because of an IO error. |
| 3897 | * Our main objectives here are to make sure that: |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3898 | * a. if !logerror, flush the logs to disk. Anything modified |
| 3899 | * after this is ignored. |
| 3900 | * b. the filesystem gets marked 'SHUTDOWN' for all interested |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3901 | * parties to find out, 'atomically'. |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3902 | * c. those who're sleeping on log reservations, pinned objects and |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3903 | * other resources get woken up, and be told the bad news. |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3904 | * d. nothing new gets queued up after (b) and (c) are done. |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3905 | * |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3906 | * Note: for the !logerror case we need to flush the regions held in memory out |
| 3907 | * to disk first. This needs to be done before the log is marked as shutdown, |
| 3908 | * otherwise the iclog writes will fail. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3909 | */ |
| 3910 | int |
| 3911 | xfs_log_force_umount( |
| 3912 | struct xfs_mount *mp, |
| 3913 | int logerror) |
| 3914 | { |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 3915 | struct xlog *log; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3916 | int retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3917 | |
| 3918 | log = mp->m_log; |
| 3919 | |
| 3920 | /* |
| 3921 | * If this happens during log recovery, don't worry about |
| 3922 | * locking; the log isn't open for business yet. |
| 3923 | */ |
| 3924 | if (!log || |
| 3925 | log->l_flags & XLOG_ACTIVE_RECOVERY) { |
| 3926 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; |
Christoph Hellwig | bac8dca | 2008-11-28 14:23:31 +1100 | [diff] [blame] | 3927 | if (mp->m_sb_bp) |
Dave Chinner | b0388bf | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 3928 | mp->m_sb_bp->b_flags |= XBF_DONE; |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3929 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3930 | } |
| 3931 | |
| 3932 | /* |
| 3933 | * Somebody could've already done the hard work for us. |
| 3934 | * No need to get locks for this. |
| 3935 | */ |
| 3936 | if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { |
| 3937 | ASSERT(XLOG_FORCED_SHUTDOWN(log)); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 3938 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3939 | } |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3940 | |
| 3941 | /* |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3942 | * Flush all the completed transactions to disk before marking the log |
| 3943 | * being shut down. We need to do it in this order to ensure that |
| 3944 | * completed operations are safely on disk before we shut down, and that |
| 3945 | * we don't have to issue any buffer IO after the shutdown flags are set |
| 3946 | * to guarantee this. |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3947 | */ |
Christoph Hellwig | 93b8a58 | 2011-12-06 21:58:07 +0000 | [diff] [blame] | 3948 | if (!logerror) |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3949 | _xfs_log_force(mp, XFS_LOG_SYNC, NULL); |
Dave Chinner | 9da1ab1 | 2010-05-17 15:51:59 +1000 | [diff] [blame] | 3950 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3951 | /* |
Dave Chinner | 3f16b98 | 2010-12-21 12:29:01 +1100 | [diff] [blame] | 3952 | * mark the filesystem and the as in a shutdown state and wake |
| 3953 | * everybody up to tell them the bad news. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3954 | */ |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3955 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3956 | mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; |
Christoph Hellwig | bac8dca | 2008-11-28 14:23:31 +1100 | [diff] [blame] | 3957 | if (mp->m_sb_bp) |
Dave Chinner | b0388bf | 2016-02-10 15:01:11 +1100 | [diff] [blame] | 3958 | mp->m_sb_bp->b_flags |= XBF_DONE; |
Christoph Hellwig | bac8dca | 2008-11-28 14:23:31 +1100 | [diff] [blame] | 3959 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3960 | /* |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3961 | * Mark the log and the iclogs with IO error flags to prevent any |
| 3962 | * further log IO from being issued or completed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3963 | */ |
| 3964 | log->l_flags |= XLOG_IO_ERROR; |
Dave Chinner | a870fe6d | 2014-10-02 09:02:28 +1000 | [diff] [blame] | 3965 | retval = xlog_state_ioerror(log); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3966 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3967 | |
| 3968 | /* |
Dave Chinner | 1054794 | 2010-12-21 12:02:25 +1100 | [diff] [blame] | 3969 | * We don't want anybody waiting for log reservations after this. That |
| 3970 | * means we have to wake up everybody queued up on reserveq as well as |
| 3971 | * writeq. In addition, we make sure in xlog_{re}grant_log_space that |
| 3972 | * we don't enqueue anything once the SHUTDOWN flag is set, and this |
Dave Chinner | 3f16b98 | 2010-12-21 12:29:01 +1100 | [diff] [blame] | 3973 | * action is protected by the grant locks. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3974 | */ |
Christoph Hellwig | a79bf2d | 2012-02-20 02:31:27 +0000 | [diff] [blame] | 3975 | xlog_grant_head_wake_all(&log->l_reserve_head); |
| 3976 | xlog_grant_head_wake_all(&log->l_write_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3977 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3978 | /* |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 3979 | * Wake up everybody waiting on xfs_log_force. Wake the CIL push first |
| 3980 | * as if the log writes were completed. The abort handling in the log |
| 3981 | * item committed callback functions will do this again under lock to |
| 3982 | * avoid races. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3983 | */ |
Dave Chinner | ac98351 | 2014-05-07 08:05:50 +1000 | [diff] [blame] | 3984 | wake_up_all(&log->l_cilp->xc_commit_wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3985 | xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); |
| 3986 | |
| 3987 | #ifdef XFSERRORDEBUG |
| 3988 | { |
| 3989 | xlog_in_core_t *iclog; |
| 3990 | |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3991 | spin_lock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3992 | iclog = log->l_iclog; |
| 3993 | do { |
| 3994 | ASSERT(iclog->ic_callback == 0); |
| 3995 | iclog = iclog->ic_next; |
| 3996 | } while (iclog != log->l_iclog); |
Eric Sandeen | b22cd72c | 2007-10-11 17:37:10 +1000 | [diff] [blame] | 3997 | spin_unlock(&log->l_icloglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3998 | } |
| 3999 | #endif |
| 4000 | /* return non-zero if log IOERROR transition had already happened */ |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 4001 | return retval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4002 | } |
| 4003 | |
Christoph Hellwig | ba0f32d | 2005-06-21 15:36:52 +1000 | [diff] [blame] | 4004 | STATIC int |
Mark Tinguely | 9a8d2fd | 2012-06-14 09:22:16 -0500 | [diff] [blame] | 4005 | xlog_iclogs_empty( |
| 4006 | struct xlog *log) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4007 | { |
| 4008 | xlog_in_core_t *iclog; |
| 4009 | |
| 4010 | iclog = log->l_iclog; |
| 4011 | do { |
| 4012 | /* endianness does not matter here, zero is zero in |
| 4013 | * any language. |
| 4014 | */ |
| 4015 | if (iclog->ic_header.h_num_logops) |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 4016 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4017 | iclog = iclog->ic_next; |
| 4018 | } while (iclog != log->l_iclog); |
Jesper Juhl | 014c254 | 2006-01-15 02:37:08 +0100 | [diff] [blame] | 4019 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4020 | } |
Dave Chinner | f661f1e | 2012-10-08 21:56:02 +1100 | [diff] [blame] | 4021 | |
Brian Foster | a45086e | 2015-10-12 15:59:25 +1100 | [diff] [blame] | 4022 | /* |
| 4023 | * Verify that an LSN stamped into a piece of metadata is valid. This is |
| 4024 | * intended for use in read verifiers on v5 superblocks. |
| 4025 | */ |
| 4026 | bool |
| 4027 | xfs_log_check_lsn( |
| 4028 | struct xfs_mount *mp, |
| 4029 | xfs_lsn_t lsn) |
| 4030 | { |
| 4031 | struct xlog *log = mp->m_log; |
| 4032 | bool valid; |
| 4033 | |
| 4034 | /* |
| 4035 | * norecovery mode skips mount-time log processing and unconditionally |
| 4036 | * resets the in-core LSN. We can't validate in this mode, but |
| 4037 | * modifications are not allowed anyways so just return true. |
| 4038 | */ |
| 4039 | if (mp->m_flags & XFS_MOUNT_NORECOVERY) |
| 4040 | return true; |
| 4041 | |
| 4042 | /* |
| 4043 | * Some metadata LSNs are initialized to NULL (e.g., the agfl). This is |
| 4044 | * handled by recovery and thus safe to ignore here. |
| 4045 | */ |
| 4046 | if (lsn == NULLCOMMITLSN) |
| 4047 | return true; |
| 4048 | |
| 4049 | valid = xlog_valid_lsn(mp->m_log, lsn); |
| 4050 | |
| 4051 | /* warn the user about what's gone wrong before verifier failure */ |
| 4052 | if (!valid) { |
| 4053 | spin_lock(&log->l_icloglock); |
| 4054 | xfs_warn(mp, |
| 4055 | "Corruption warning: Metadata has LSN (%d:%d) ahead of current LSN (%d:%d). " |
| 4056 | "Please unmount and run xfs_repair (>= v4.3) to resolve.", |
| 4057 | CYCLE_LSN(lsn), BLOCK_LSN(lsn), |
| 4058 | log->l_curr_cycle, log->l_curr_block); |
| 4059 | spin_unlock(&log->l_icloglock); |
| 4060 | } |
| 4061 | |
| 4062 | return valid; |
| 4063 | } |