David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | ******************************************************************************* |
| 3 | ** |
| 4 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
| 5 | ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. |
| 6 | ** |
| 7 | ** This copyrighted material is made available to anyone wishing to use, |
| 8 | ** modify, copy, or redistribute it subject to the terms and conditions |
| 9 | ** of the GNU General Public License v.2. |
| 10 | ** |
| 11 | ******************************************************************************* |
| 12 | ******************************************************************************/ |
| 13 | |
| 14 | #include "dlm_internal.h" |
| 15 | #include "lockspace.h" |
| 16 | #include "dir.h" |
| 17 | #include "config.h" |
| 18 | #include "ast.h" |
| 19 | #include "memory.h" |
| 20 | #include "rcom.h" |
| 21 | #include "lock.h" |
| 22 | #include "lowcomms.h" |
| 23 | #include "member.h" |
| 24 | #include "recover.h" |
| 25 | |
| 26 | |
| 27 | /* |
| 28 | * Recovery waiting routines: these functions wait for a particular reply from |
| 29 | * a remote node, or for the remote node to report a certain status. They need |
| 30 | * to abort if the lockspace is stopped indicating a node has failed (perhaps |
| 31 | * the one being waited for). |
| 32 | */ |
| 33 | |
| 34 | /* |
| 35 | * Wait until given function returns non-zero or lockspace is stopped |
| 36 | * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another |
| 37 | * function thinks it could have completed the waited-on task, they should wake |
| 38 | * up ls_wait_general to get an immediate response rather than waiting for the |
| 39 | * timer to detect the result. A timer wakes us up periodically while waiting |
| 40 | * to see if we should abort due to a node failure. This should only be called |
| 41 | * by the dlm_recoverd thread. |
| 42 | */ |
| 43 | |
| 44 | static void dlm_wait_timer_fn(unsigned long data) |
| 45 | { |
| 46 | struct dlm_ls *ls = (struct dlm_ls *) data; |
David Teigland | 68c817a | 2007-01-09 09:41:48 -0600 | [diff] [blame] | 47 | mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ)); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 48 | wake_up(&ls->ls_wait_general); |
| 49 | } |
| 50 | |
| 51 | int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) |
| 52 | { |
| 53 | int error = 0; |
| 54 | |
| 55 | init_timer(&ls->ls_timer); |
| 56 | ls->ls_timer.function = dlm_wait_timer_fn; |
| 57 | ls->ls_timer.data = (long) ls; |
David Teigland | 68c817a | 2007-01-09 09:41:48 -0600 | [diff] [blame] | 58 | ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 59 | add_timer(&ls->ls_timer); |
| 60 | |
| 61 | wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls)); |
| 62 | del_timer_sync(&ls->ls_timer); |
| 63 | |
| 64 | if (dlm_recovery_stopped(ls)) { |
| 65 | log_debug(ls, "dlm_wait_function aborted"); |
| 66 | error = -EINTR; |
| 67 | } |
| 68 | return error; |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * An efficient way for all nodes to wait for all others to have a certain |
| 73 | * status. The node with the lowest nodeid polls all the others for their |
| 74 | * status (wait_status_all) and all the others poll the node with the low id |
| 75 | * for its accumulated result (wait_status_low). When all nodes have set |
| 76 | * status flag X, then status flag X_ALL will be set on the low nodeid. |
| 77 | */ |
| 78 | |
| 79 | uint32_t dlm_recover_status(struct dlm_ls *ls) |
| 80 | { |
| 81 | uint32_t status; |
| 82 | spin_lock(&ls->ls_recover_lock); |
| 83 | status = ls->ls_recover_status; |
| 84 | spin_unlock(&ls->ls_recover_lock); |
| 85 | return status; |
| 86 | } |
| 87 | |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 88 | static void _set_recover_status(struct dlm_ls *ls, uint32_t status) |
| 89 | { |
| 90 | ls->ls_recover_status |= status; |
| 91 | } |
| 92 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 93 | void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) |
| 94 | { |
| 95 | spin_lock(&ls->ls_recover_lock); |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 96 | _set_recover_status(ls, status); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 97 | spin_unlock(&ls->ls_recover_lock); |
| 98 | } |
| 99 | |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 100 | static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, |
| 101 | int save_slots) |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 102 | { |
Al Viro | 4007685 | 2008-01-25 03:01:51 -0500 | [diff] [blame] | 103 | struct dlm_rcom *rc = ls->ls_recover_buf; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 104 | struct dlm_member *memb; |
| 105 | int error = 0, delay; |
| 106 | |
| 107 | list_for_each_entry(memb, &ls->ls_nodes, list) { |
| 108 | delay = 0; |
| 109 | for (;;) { |
| 110 | if (dlm_recovery_stopped(ls)) { |
| 111 | error = -EINTR; |
| 112 | goto out; |
| 113 | } |
| 114 | |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 115 | error = dlm_rcom_status(ls, memb->nodeid, 0); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 116 | if (error) |
| 117 | goto out; |
| 118 | |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 119 | if (save_slots) |
| 120 | dlm_slot_save(ls, rc, memb); |
| 121 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 122 | if (rc->rc_result & wait_status) |
| 123 | break; |
| 124 | if (delay < 1000) |
| 125 | delay += 20; |
| 126 | msleep(delay); |
| 127 | } |
| 128 | } |
| 129 | out: |
| 130 | return error; |
| 131 | } |
| 132 | |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 133 | static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, |
| 134 | uint32_t status_flags) |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 135 | { |
Al Viro | 4007685 | 2008-01-25 03:01:51 -0500 | [diff] [blame] | 136 | struct dlm_rcom *rc = ls->ls_recover_buf; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 137 | int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; |
| 138 | |
| 139 | for (;;) { |
| 140 | if (dlm_recovery_stopped(ls)) { |
| 141 | error = -EINTR; |
| 142 | goto out; |
| 143 | } |
| 144 | |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 145 | error = dlm_rcom_status(ls, nodeid, status_flags); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 146 | if (error) |
| 147 | break; |
| 148 | |
| 149 | if (rc->rc_result & wait_status) |
| 150 | break; |
| 151 | if (delay < 1000) |
| 152 | delay += 20; |
| 153 | msleep(delay); |
| 154 | } |
| 155 | out: |
| 156 | return error; |
| 157 | } |
| 158 | |
| 159 | static int wait_status(struct dlm_ls *ls, uint32_t status) |
| 160 | { |
| 161 | uint32_t status_all = status << 1; |
| 162 | int error; |
| 163 | |
| 164 | if (ls->ls_low_nodeid == dlm_our_nodeid()) { |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 165 | error = wait_status_all(ls, status, 0); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 166 | if (!error) |
| 167 | dlm_set_recover_status(ls, status_all); |
| 168 | } else |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 169 | error = wait_status_low(ls, status_all, 0); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 170 | |
| 171 | return error; |
| 172 | } |
| 173 | |
| 174 | int dlm_recover_members_wait(struct dlm_ls *ls) |
| 175 | { |
David Teigland | 757a427 | 2011-10-20 13:26:28 -0500 | [diff] [blame] | 176 | struct dlm_member *memb; |
| 177 | struct dlm_slot *slots; |
| 178 | int num_slots, slots_size; |
| 179 | int error, rv; |
| 180 | uint32_t gen; |
| 181 | |
| 182 | list_for_each_entry(memb, &ls->ls_nodes, list) { |
| 183 | memb->slot = -1; |
| 184 | memb->generation = 0; |
| 185 | } |
| 186 | |
| 187 | if (ls->ls_low_nodeid == dlm_our_nodeid()) { |
| 188 | error = wait_status_all(ls, DLM_RS_NODES, 1); |
| 189 | if (error) |
| 190 | goto out; |
| 191 | |
| 192 | /* slots array is sparse, slots_size may be > num_slots */ |
| 193 | |
| 194 | rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); |
| 195 | if (!rv) { |
| 196 | spin_lock(&ls->ls_recover_lock); |
| 197 | _set_recover_status(ls, DLM_RS_NODES_ALL); |
| 198 | ls->ls_num_slots = num_slots; |
| 199 | ls->ls_slots_size = slots_size; |
| 200 | ls->ls_slots = slots; |
| 201 | ls->ls_generation = gen; |
| 202 | spin_unlock(&ls->ls_recover_lock); |
| 203 | } else { |
| 204 | dlm_set_recover_status(ls, DLM_RS_NODES_ALL); |
| 205 | } |
| 206 | } else { |
| 207 | error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); |
| 208 | if (error) |
| 209 | goto out; |
| 210 | |
| 211 | dlm_slots_copy_in(ls); |
| 212 | } |
| 213 | out: |
| 214 | return error; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | int dlm_recover_directory_wait(struct dlm_ls *ls) |
| 218 | { |
| 219 | return wait_status(ls, DLM_RS_DIR); |
| 220 | } |
| 221 | |
| 222 | int dlm_recover_locks_wait(struct dlm_ls *ls) |
| 223 | { |
| 224 | return wait_status(ls, DLM_RS_LOCKS); |
| 225 | } |
| 226 | |
| 227 | int dlm_recover_done_wait(struct dlm_ls *ls) |
| 228 | { |
| 229 | return wait_status(ls, DLM_RS_DONE); |
| 230 | } |
| 231 | |
| 232 | /* |
| 233 | * The recover_list contains all the rsb's for which we've requested the new |
| 234 | * master nodeid. As replies are returned from the resource directories the |
| 235 | * rsb's are removed from the list. When the list is empty we're done. |
| 236 | * |
| 237 | * The recover_list is later similarly used for all rsb's for which we've sent |
| 238 | * new lkb's and need to receive new corresponding lkid's. |
| 239 | * |
| 240 | * We use the address of the rsb struct as a simple local identifier for the |
| 241 | * rsb so we can match an rcom reply with the rsb it was sent for. |
| 242 | */ |
| 243 | |
| 244 | static int recover_list_empty(struct dlm_ls *ls) |
| 245 | { |
| 246 | int empty; |
| 247 | |
| 248 | spin_lock(&ls->ls_recover_list_lock); |
| 249 | empty = list_empty(&ls->ls_recover_list); |
| 250 | spin_unlock(&ls->ls_recover_list_lock); |
| 251 | |
| 252 | return empty; |
| 253 | } |
| 254 | |
| 255 | static void recover_list_add(struct dlm_rsb *r) |
| 256 | { |
| 257 | struct dlm_ls *ls = r->res_ls; |
| 258 | |
| 259 | spin_lock(&ls->ls_recover_list_lock); |
| 260 | if (list_empty(&r->res_recover_list)) { |
| 261 | list_add_tail(&r->res_recover_list, &ls->ls_recover_list); |
| 262 | ls->ls_recover_list_count++; |
| 263 | dlm_hold_rsb(r); |
| 264 | } |
| 265 | spin_unlock(&ls->ls_recover_list_lock); |
| 266 | } |
| 267 | |
| 268 | static void recover_list_del(struct dlm_rsb *r) |
| 269 | { |
| 270 | struct dlm_ls *ls = r->res_ls; |
| 271 | |
| 272 | spin_lock(&ls->ls_recover_list_lock); |
| 273 | list_del_init(&r->res_recover_list); |
| 274 | ls->ls_recover_list_count--; |
| 275 | spin_unlock(&ls->ls_recover_list_lock); |
| 276 | |
| 277 | dlm_put_rsb(r); |
| 278 | } |
| 279 | |
| 280 | static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id) |
| 281 | { |
| 282 | struct dlm_rsb *r = NULL; |
| 283 | |
| 284 | spin_lock(&ls->ls_recover_list_lock); |
| 285 | |
| 286 | list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) { |
| 287 | if (id == (unsigned long) r) |
| 288 | goto out; |
| 289 | } |
| 290 | r = NULL; |
| 291 | out: |
| 292 | spin_unlock(&ls->ls_recover_list_lock); |
| 293 | return r; |
| 294 | } |
| 295 | |
| 296 | static void recover_list_clear(struct dlm_ls *ls) |
| 297 | { |
| 298 | struct dlm_rsb *r, *s; |
| 299 | |
| 300 | spin_lock(&ls->ls_recover_list_lock); |
| 301 | list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { |
| 302 | list_del_init(&r->res_recover_list); |
David Teigland | 5206980 | 2006-11-02 09:49:02 -0600 | [diff] [blame] | 303 | r->res_recover_locks_count = 0; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 304 | dlm_put_rsb(r); |
| 305 | ls->ls_recover_list_count--; |
| 306 | } |
| 307 | |
| 308 | if (ls->ls_recover_list_count != 0) { |
| 309 | log_error(ls, "warning: recover_list_count %d", |
| 310 | ls->ls_recover_list_count); |
| 311 | ls->ls_recover_list_count = 0; |
| 312 | } |
| 313 | spin_unlock(&ls->ls_recover_list_lock); |
| 314 | } |
| 315 | |
| 316 | |
| 317 | /* Master recovery: find new master node for rsb's that were |
| 318 | mastered on nodes that have been removed. |
| 319 | |
| 320 | dlm_recover_masters |
| 321 | recover_master |
| 322 | dlm_send_rcom_lookup -> receive_rcom_lookup |
| 323 | dlm_dir_lookup |
| 324 | receive_rcom_lookup_reply <- |
| 325 | dlm_recover_master_reply |
| 326 | set_new_master |
| 327 | set_master_lkbs |
| 328 | set_lock_master |
| 329 | */ |
| 330 | |
| 331 | /* |
| 332 | * Set the lock master for all LKBs in a lock queue |
| 333 | * If we are the new master of the rsb, we may have received new |
| 334 | * MSTCPY locks from other nodes already which we need to ignore |
| 335 | * when setting the new nodeid. |
| 336 | */ |
| 337 | |
| 338 | static void set_lock_master(struct list_head *queue, int nodeid) |
| 339 | { |
| 340 | struct dlm_lkb *lkb; |
| 341 | |
| 342 | list_for_each_entry(lkb, queue, lkb_statequeue) |
| 343 | if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) |
| 344 | lkb->lkb_nodeid = nodeid; |
| 345 | } |
| 346 | |
| 347 | static void set_master_lkbs(struct dlm_rsb *r) |
| 348 | { |
| 349 | set_lock_master(&r->res_grantqueue, r->res_nodeid); |
| 350 | set_lock_master(&r->res_convertqueue, r->res_nodeid); |
| 351 | set_lock_master(&r->res_waitqueue, r->res_nodeid); |
| 352 | } |
| 353 | |
| 354 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 355 | * Propagate the new master nodeid to locks |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 356 | * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. |
David Teigland | f7da790 | 2006-07-25 13:53:33 -0500 | [diff] [blame] | 357 | * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which |
| 358 | * rsb's to consider. |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 359 | */ |
| 360 | |
| 361 | static void set_new_master(struct dlm_rsb *r, int nodeid) |
| 362 | { |
| 363 | lock_rsb(r); |
| 364 | r->res_nodeid = nodeid; |
| 365 | set_master_lkbs(r); |
| 366 | rsb_set_flag(r, RSB_NEW_MASTER); |
| 367 | rsb_set_flag(r, RSB_NEW_MASTER2); |
| 368 | unlock_rsb(r); |
| 369 | } |
| 370 | |
| 371 | /* |
| 372 | * We do async lookups on rsb's that need new masters. The rsb's |
| 373 | * waiting for a lookup reply are kept on the recover_list. |
| 374 | */ |
| 375 | |
| 376 | static int recover_master(struct dlm_rsb *r) |
| 377 | { |
| 378 | struct dlm_ls *ls = r->res_ls; |
| 379 | int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid(); |
| 380 | |
| 381 | dir_nodeid = dlm_dir_nodeid(r); |
| 382 | |
| 383 | if (dir_nodeid == our_nodeid) { |
| 384 | error = dlm_dir_lookup(ls, our_nodeid, r->res_name, |
| 385 | r->res_length, &ret_nodeid); |
| 386 | if (error) |
| 387 | log_error(ls, "recover dir lookup error %d", error); |
| 388 | |
| 389 | if (ret_nodeid == our_nodeid) |
| 390 | ret_nodeid = 0; |
| 391 | set_new_master(r, ret_nodeid); |
| 392 | } else { |
| 393 | recover_list_add(r); |
| 394 | error = dlm_send_rcom_lookup(r, dir_nodeid); |
| 395 | } |
| 396 | |
| 397 | return error; |
| 398 | } |
| 399 | |
| 400 | /* |
| 401 | * When not using a directory, most resource names will hash to a new static |
| 402 | * master nodeid and the resource will need to be remastered. |
| 403 | */ |
| 404 | |
| 405 | static int recover_master_static(struct dlm_rsb *r) |
| 406 | { |
| 407 | int master = dlm_dir_nodeid(r); |
| 408 | |
| 409 | if (master == dlm_our_nodeid()) |
| 410 | master = 0; |
| 411 | |
| 412 | if (r->res_nodeid != master) { |
| 413 | if (is_master(r)) |
| 414 | dlm_purge_mstcpy_locks(r); |
| 415 | set_new_master(r, master); |
| 416 | return 1; |
| 417 | } |
| 418 | return 0; |
| 419 | } |
| 420 | |
| 421 | /* |
| 422 | * Go through local root resources and for each rsb which has a master which |
| 423 | * has departed, get the new master nodeid from the directory. The dir will |
| 424 | * assign mastery to the first node to look up the new master. That means |
| 425 | * we'll discover in this lookup if we're the new master of any rsb's. |
| 426 | * |
| 427 | * We fire off all the dir lookup requests individually and asynchronously to |
| 428 | * the correct dir node. |
| 429 | */ |
| 430 | |
| 431 | int dlm_recover_masters(struct dlm_ls *ls) |
| 432 | { |
| 433 | struct dlm_rsb *r; |
| 434 | int error = 0, count = 0; |
| 435 | |
| 436 | log_debug(ls, "dlm_recover_masters"); |
| 437 | |
| 438 | down_read(&ls->ls_root_sem); |
| 439 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { |
| 440 | if (dlm_recovery_stopped(ls)) { |
| 441 | up_read(&ls->ls_root_sem); |
| 442 | error = -EINTR; |
| 443 | goto out; |
| 444 | } |
| 445 | |
| 446 | if (dlm_no_directory(ls)) |
| 447 | count += recover_master_static(r); |
David Teigland | 222d396 | 2007-01-15 10:28:22 -0600 | [diff] [blame] | 448 | else if (!is_master(r) && |
| 449 | (dlm_is_removed(ls, r->res_nodeid) || |
| 450 | rsb_flag(r, RSB_NEW_MASTER))) { |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 451 | recover_master(r); |
| 452 | count++; |
| 453 | } |
| 454 | |
| 455 | schedule(); |
| 456 | } |
| 457 | up_read(&ls->ls_root_sem); |
| 458 | |
| 459 | log_debug(ls, "dlm_recover_masters %d resources", count); |
| 460 | |
| 461 | error = dlm_wait_function(ls, &recover_list_empty); |
| 462 | out: |
| 463 | if (error) |
| 464 | recover_list_clear(ls); |
| 465 | return error; |
| 466 | } |
| 467 | |
| 468 | int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) |
| 469 | { |
| 470 | struct dlm_rsb *r; |
| 471 | int nodeid; |
| 472 | |
| 473 | r = recover_list_find(ls, rc->rc_id); |
| 474 | if (!r) { |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 475 | log_error(ls, "dlm_recover_master_reply no id %llx", |
David Teigland | 9229f01 | 2006-05-24 09:21:30 -0400 | [diff] [blame] | 476 | (unsigned long long)rc->rc_id); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 477 | goto out; |
| 478 | } |
| 479 | |
| 480 | nodeid = rc->rc_result; |
| 481 | if (nodeid == dlm_our_nodeid()) |
| 482 | nodeid = 0; |
| 483 | |
| 484 | set_new_master(r, nodeid); |
| 485 | recover_list_del(r); |
| 486 | |
| 487 | if (recover_list_empty(ls)) |
| 488 | wake_up(&ls->ls_wait_general); |
| 489 | out: |
| 490 | return 0; |
| 491 | } |
| 492 | |
| 493 | |
| 494 | /* Lock recovery: rebuild the process-copy locks we hold on a |
| 495 | remastered rsb on the new rsb master. |
| 496 | |
| 497 | dlm_recover_locks |
| 498 | recover_locks |
| 499 | recover_locks_queue |
| 500 | dlm_send_rcom_lock -> receive_rcom_lock |
| 501 | dlm_recover_master_copy |
| 502 | receive_rcom_lock_reply <- |
| 503 | dlm_recover_process_copy |
| 504 | */ |
| 505 | |
| 506 | |
| 507 | /* |
| 508 | * keep a count of the number of lkb's we send to the new master; when we get |
| 509 | * an equal number of replies then recovery for the rsb is done |
| 510 | */ |
| 511 | |
| 512 | static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) |
| 513 | { |
| 514 | struct dlm_lkb *lkb; |
| 515 | int error = 0; |
| 516 | |
| 517 | list_for_each_entry(lkb, head, lkb_statequeue) { |
| 518 | error = dlm_send_rcom_lock(r, lkb); |
| 519 | if (error) |
| 520 | break; |
| 521 | r->res_recover_locks_count++; |
| 522 | } |
| 523 | |
| 524 | return error; |
| 525 | } |
| 526 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 527 | static int recover_locks(struct dlm_rsb *r) |
| 528 | { |
| 529 | int error = 0; |
| 530 | |
| 531 | lock_rsb(r); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 532 | |
David Teigland | a345da3 | 2006-08-18 11:54:25 -0500 | [diff] [blame] | 533 | DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 534 | |
| 535 | error = recover_locks_queue(r, &r->res_grantqueue); |
| 536 | if (error) |
| 537 | goto out; |
| 538 | error = recover_locks_queue(r, &r->res_convertqueue); |
| 539 | if (error) |
| 540 | goto out; |
| 541 | error = recover_locks_queue(r, &r->res_waitqueue); |
| 542 | if (error) |
| 543 | goto out; |
| 544 | |
| 545 | if (r->res_recover_locks_count) |
| 546 | recover_list_add(r); |
| 547 | else |
| 548 | rsb_clear_flag(r, RSB_NEW_MASTER); |
| 549 | out: |
| 550 | unlock_rsb(r); |
| 551 | return error; |
| 552 | } |
| 553 | |
| 554 | int dlm_recover_locks(struct dlm_ls *ls) |
| 555 | { |
| 556 | struct dlm_rsb *r; |
| 557 | int error, count = 0; |
| 558 | |
| 559 | log_debug(ls, "dlm_recover_locks"); |
| 560 | |
| 561 | down_read(&ls->ls_root_sem); |
| 562 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { |
| 563 | if (is_master(r)) { |
| 564 | rsb_clear_flag(r, RSB_NEW_MASTER); |
| 565 | continue; |
| 566 | } |
| 567 | |
| 568 | if (!rsb_flag(r, RSB_NEW_MASTER)) |
| 569 | continue; |
| 570 | |
| 571 | if (dlm_recovery_stopped(ls)) { |
| 572 | error = -EINTR; |
| 573 | up_read(&ls->ls_root_sem); |
| 574 | goto out; |
| 575 | } |
| 576 | |
| 577 | error = recover_locks(r); |
| 578 | if (error) { |
| 579 | up_read(&ls->ls_root_sem); |
| 580 | goto out; |
| 581 | } |
| 582 | |
| 583 | count += r->res_recover_locks_count; |
| 584 | } |
| 585 | up_read(&ls->ls_root_sem); |
| 586 | |
| 587 | log_debug(ls, "dlm_recover_locks %d locks", count); |
| 588 | |
| 589 | error = dlm_wait_function(ls, &recover_list_empty); |
| 590 | out: |
| 591 | if (error) |
| 592 | recover_list_clear(ls); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 593 | return error; |
| 594 | } |
| 595 | |
| 596 | void dlm_recovered_lock(struct dlm_rsb *r) |
| 597 | { |
David Teigland | a345da3 | 2006-08-18 11:54:25 -0500 | [diff] [blame] | 598 | DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 599 | |
| 600 | r->res_recover_locks_count--; |
| 601 | if (!r->res_recover_locks_count) { |
| 602 | rsb_clear_flag(r, RSB_NEW_MASTER); |
| 603 | recover_list_del(r); |
| 604 | } |
| 605 | |
| 606 | if (recover_list_empty(r->res_ls)) |
| 607 | wake_up(&r->res_ls->ls_wait_general); |
| 608 | } |
| 609 | |
| 610 | /* |
| 611 | * The lvb needs to be recovered on all master rsb's. This includes setting |
| 612 | * the VALNOTVALID flag if necessary, and determining the correct lvb contents |
| 613 | * based on the lvb's of the locks held on the rsb. |
| 614 | * |
| 615 | * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it |
| 616 | * was already set prior to recovery, it's not cleared, regardless of locks. |
| 617 | * |
| 618 | * The LVB contents are only considered for changing when this is a new master |
| 619 | * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with |
| 620 | * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken |
| 621 | * from the lkb with the largest lvb sequence number. |
| 622 | */ |
| 623 | |
| 624 | static void recover_lvb(struct dlm_rsb *r) |
| 625 | { |
| 626 | struct dlm_lkb *lkb, *high_lkb = NULL; |
| 627 | uint32_t high_seq = 0; |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 628 | int lock_lvb_exists = 0; |
| 629 | int big_lock_exists = 0; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 630 | int lvblen = r->res_ls->ls_lvblen; |
| 631 | |
| 632 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { |
| 633 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) |
| 634 | continue; |
| 635 | |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 636 | lock_lvb_exists = 1; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 637 | |
| 638 | if (lkb->lkb_grmode > DLM_LOCK_CR) { |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 639 | big_lock_exists = 1; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 640 | goto setflag; |
| 641 | } |
| 642 | |
| 643 | if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { |
| 644 | high_lkb = lkb; |
| 645 | high_seq = lkb->lkb_lvbseq; |
| 646 | } |
| 647 | } |
| 648 | |
| 649 | list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { |
| 650 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) |
| 651 | continue; |
| 652 | |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 653 | lock_lvb_exists = 1; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 654 | |
| 655 | if (lkb->lkb_grmode > DLM_LOCK_CR) { |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 656 | big_lock_exists = 1; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 657 | goto setflag; |
| 658 | } |
| 659 | |
| 660 | if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { |
| 661 | high_lkb = lkb; |
| 662 | high_seq = lkb->lkb_lvbseq; |
| 663 | } |
| 664 | } |
| 665 | |
| 666 | setflag: |
| 667 | if (!lock_lvb_exists) |
| 668 | goto out; |
| 669 | |
| 670 | if (!big_lock_exists) |
| 671 | rsb_set_flag(r, RSB_VALNOTVALID); |
| 672 | |
| 673 | /* don't mess with the lvb unless we're the new master */ |
| 674 | if (!rsb_flag(r, RSB_NEW_MASTER2)) |
| 675 | goto out; |
| 676 | |
| 677 | if (!r->res_lvbptr) { |
David Teigland | 52bda2b | 2007-11-07 09:06:49 -0600 | [diff] [blame] | 678 | r->res_lvbptr = dlm_allocate_lvb(r->res_ls); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 679 | if (!r->res_lvbptr) |
| 680 | goto out; |
| 681 | } |
| 682 | |
| 683 | if (big_lock_exists) { |
| 684 | r->res_lvbseq = lkb->lkb_lvbseq; |
| 685 | memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); |
| 686 | } else if (high_lkb) { |
| 687 | r->res_lvbseq = high_lkb->lkb_lvbseq; |
| 688 | memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); |
| 689 | } else { |
| 690 | r->res_lvbseq = 0; |
| 691 | memset(r->res_lvbptr, 0, lvblen); |
| 692 | } |
| 693 | out: |
| 694 | return; |
| 695 | } |
| 696 | |
| 697 | /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks |
| 698 | converting PR->CW or CW->PR need to have their lkb_grmode set. */ |
| 699 | |
| 700 | static void recover_conversion(struct dlm_rsb *r) |
| 701 | { |
| 702 | struct dlm_lkb *lkb; |
| 703 | int grmode = -1; |
| 704 | |
| 705 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { |
| 706 | if (lkb->lkb_grmode == DLM_LOCK_PR || |
| 707 | lkb->lkb_grmode == DLM_LOCK_CW) { |
| 708 | grmode = lkb->lkb_grmode; |
| 709 | break; |
| 710 | } |
| 711 | } |
| 712 | |
| 713 | list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { |
| 714 | if (lkb->lkb_grmode != DLM_LOCK_IV) |
| 715 | continue; |
| 716 | if (grmode == -1) |
| 717 | lkb->lkb_grmode = lkb->lkb_rqmode; |
| 718 | else |
| 719 | lkb->lkb_grmode = grmode; |
| 720 | } |
| 721 | } |
| 722 | |
David Teigland | f7da790 | 2006-07-25 13:53:33 -0500 | [diff] [blame] | 723 | /* We've become the new master for this rsb and waiting/converting locks may |
| 724 | need to be granted in dlm_grant_after_purge() due to locks that may have |
| 725 | existed from a removed node. */ |
| 726 | |
| 727 | static void set_locks_purged(struct dlm_rsb *r) |
| 728 | { |
| 729 | if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) |
| 730 | rsb_set_flag(r, RSB_LOCKS_PURGED); |
| 731 | } |
| 732 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 733 | void dlm_recover_rsbs(struct dlm_ls *ls) |
| 734 | { |
| 735 | struct dlm_rsb *r; |
| 736 | int count = 0; |
| 737 | |
| 738 | log_debug(ls, "dlm_recover_rsbs"); |
| 739 | |
| 740 | down_read(&ls->ls_root_sem); |
| 741 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { |
| 742 | lock_rsb(r); |
| 743 | if (is_master(r)) { |
| 744 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) |
| 745 | recover_conversion(r); |
David Teigland | f7da790 | 2006-07-25 13:53:33 -0500 | [diff] [blame] | 746 | if (rsb_flag(r, RSB_NEW_MASTER2)) |
| 747 | set_locks_purged(r); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 748 | recover_lvb(r); |
| 749 | count++; |
| 750 | } |
| 751 | rsb_clear_flag(r, RSB_RECOVER_CONVERT); |
David Teigland | f7da790 | 2006-07-25 13:53:33 -0500 | [diff] [blame] | 752 | rsb_clear_flag(r, RSB_NEW_MASTER2); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 753 | unlock_rsb(r); |
| 754 | } |
| 755 | up_read(&ls->ls_root_sem); |
| 756 | |
| 757 | log_debug(ls, "dlm_recover_rsbs %d rsbs", count); |
| 758 | } |
| 759 | |
| 760 | /* Create a single list of all root rsb's to be used during recovery */ |
| 761 | |
| 762 | int dlm_create_root_list(struct dlm_ls *ls) |
| 763 | { |
Bob Peterson | 9beb3bf | 2011-10-26 15:24:55 -0500 | [diff] [blame] | 764 | struct rb_node *n; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 765 | struct dlm_rsb *r; |
| 766 | int i, error = 0; |
| 767 | |
| 768 | down_write(&ls->ls_root_sem); |
| 769 | if (!list_empty(&ls->ls_root_list)) { |
| 770 | log_error(ls, "root list not empty"); |
| 771 | error = -EINVAL; |
| 772 | goto out; |
| 773 | } |
| 774 | |
| 775 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { |
David Teigland | c7be761 | 2009-01-07 16:50:41 -0600 | [diff] [blame] | 776 | spin_lock(&ls->ls_rsbtbl[i].lock); |
Bob Peterson | 9beb3bf | 2011-10-26 15:24:55 -0500 | [diff] [blame] | 777 | for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { |
| 778 | r = rb_entry(n, struct dlm_rsb, res_hashnode); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 779 | list_add(&r->res_root_list, &ls->ls_root_list); |
| 780 | dlm_hold_rsb(r); |
| 781 | } |
David Teigland | 85f0379 | 2008-01-16 13:02:31 -0600 | [diff] [blame] | 782 | |
| 783 | /* If we're using a directory, add tossed rsbs to the root |
| 784 | list; they'll have entries created in the new directory, |
| 785 | but no other recovery steps should do anything with them. */ |
| 786 | |
| 787 | if (dlm_no_directory(ls)) { |
David Teigland | c7be761 | 2009-01-07 16:50:41 -0600 | [diff] [blame] | 788 | spin_unlock(&ls->ls_rsbtbl[i].lock); |
David Teigland | 85f0379 | 2008-01-16 13:02:31 -0600 | [diff] [blame] | 789 | continue; |
| 790 | } |
| 791 | |
Bob Peterson | 9beb3bf | 2011-10-26 15:24:55 -0500 | [diff] [blame] | 792 | for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) { |
| 793 | r = rb_entry(n, struct dlm_rsb, res_hashnode); |
David Teigland | 85f0379 | 2008-01-16 13:02:31 -0600 | [diff] [blame] | 794 | list_add(&r->res_root_list, &ls->ls_root_list); |
| 795 | dlm_hold_rsb(r); |
| 796 | } |
David Teigland | c7be761 | 2009-01-07 16:50:41 -0600 | [diff] [blame] | 797 | spin_unlock(&ls->ls_rsbtbl[i].lock); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 798 | } |
| 799 | out: |
| 800 | up_write(&ls->ls_root_sem); |
| 801 | return error; |
| 802 | } |
| 803 | |
| 804 | void dlm_release_root_list(struct dlm_ls *ls) |
| 805 | { |
| 806 | struct dlm_rsb *r, *safe; |
| 807 | |
| 808 | down_write(&ls->ls_root_sem); |
| 809 | list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { |
| 810 | list_del_init(&r->res_root_list); |
| 811 | dlm_put_rsb(r); |
| 812 | } |
| 813 | up_write(&ls->ls_root_sem); |
| 814 | } |
| 815 | |
David Teigland | 85f0379 | 2008-01-16 13:02:31 -0600 | [diff] [blame] | 816 | /* If not using a directory, clear the entire toss list, there's no benefit to |
| 817 | caching the master value since it's fixed. If we are using a dir, keep the |
| 818 | rsb's we're the master of. Recovery will add them to the root list and from |
| 819 | there they'll be entered in the rebuilt directory. */ |
| 820 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 821 | void dlm_clear_toss_list(struct dlm_ls *ls) |
| 822 | { |
Bob Peterson | 9beb3bf | 2011-10-26 15:24:55 -0500 | [diff] [blame] | 823 | struct rb_node *n, *next; |
| 824 | struct dlm_rsb *rsb; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 825 | int i; |
| 826 | |
| 827 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { |
David Teigland | c7be761 | 2009-01-07 16:50:41 -0600 | [diff] [blame] | 828 | spin_lock(&ls->ls_rsbtbl[i].lock); |
Bob Peterson | 9beb3bf | 2011-10-26 15:24:55 -0500 | [diff] [blame] | 829 | for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { |
| 830 | next = rb_next(n);; |
| 831 | rsb = rb_entry(n, struct dlm_rsb, res_hashnode); |
| 832 | if (dlm_no_directory(ls) || !is_master(rsb)) { |
| 833 | rb_erase(n, &ls->ls_rsbtbl[i].toss); |
| 834 | dlm_free_rsb(rsb); |
David Teigland | 85f0379 | 2008-01-16 13:02:31 -0600 | [diff] [blame] | 835 | } |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 836 | } |
David Teigland | c7be761 | 2009-01-07 16:50:41 -0600 | [diff] [blame] | 837 | spin_unlock(&ls->ls_rsbtbl[i].lock); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 838 | } |
| 839 | } |
| 840 | |