Thomas Gleixner | 2522fe4 | 2019-05-28 09:57:20 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 2 | /****************************************************************************** |
| 3 | ******************************************************************************* |
| 4 | ** |
| 5 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. |
David Teigland | 60f98d1 | 2011-11-02 14:30:58 -0500 | [diff] [blame] | 6 | ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 7 | ** |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 8 | ** |
| 9 | ******************************************************************************* |
| 10 | ******************************************************************************/ |
| 11 | |
| 12 | #include "dlm_internal.h" |
| 13 | #include "lockspace.h" |
| 14 | #include "member.h" |
| 15 | #include "dir.h" |
| 16 | #include "ast.h" |
| 17 | #include "recover.h" |
| 18 | #include "lowcomms.h" |
| 19 | #include "lock.h" |
| 20 | #include "requestqueue.h" |
| 21 | #include "recoverd.h" |
| 22 | |
| 23 | |
| 24 | /* If the start for which we're re-enabling locking (seq) has been superseded |
David Teigland | c36258b | 2007-09-27 15:53:38 -0500 | [diff] [blame] | 25 | by a newer stop (ls_recover_seq), we need to leave locking disabled. |
| 26 | |
| 27 | We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees |
| 28 | locking stopped and b) adds a message to the requestqueue, but dlm_recoverd |
| 29 | enables locking and clears the requestqueue between a and b. */ |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 30 | |
| 31 | static int enable_locking(struct dlm_ls *ls, uint64_t seq) |
| 32 | { |
| 33 | int error = -EINTR; |
| 34 | |
David Teigland | c36258b | 2007-09-27 15:53:38 -0500 | [diff] [blame] | 35 | down_write(&ls->ls_recv_active); |
| 36 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 37 | spin_lock(&ls->ls_recover_lock); |
| 38 | if (ls->ls_recover_seq == seq) { |
| 39 | set_bit(LSFL_RUNNING, &ls->ls_flags); |
David Teigland | c36258b | 2007-09-27 15:53:38 -0500 | [diff] [blame] | 40 | /* unblocks processes waiting to enter the dlm */ |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 41 | up_write(&ls->ls_in_recovery); |
David Teigland | 475f230 | 2012-08-02 11:08:21 -0500 | [diff] [blame] | 42 | clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 43 | error = 0; |
| 44 | } |
| 45 | spin_unlock(&ls->ls_recover_lock); |
David Teigland | c36258b | 2007-09-27 15:53:38 -0500 | [diff] [blame] | 46 | |
| 47 | up_write(&ls->ls_recv_active); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 48 | return error; |
| 49 | } |
| 50 | |
| 51 | static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) |
| 52 | { |
| 53 | unsigned long start; |
| 54 | int error, neg = 0; |
| 55 | |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 56 | log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 57 | |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 58 | mutex_lock(&ls->ls_recoverd_active); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 59 | |
David Teigland | 23e8e1a | 2011-04-05 13:16:24 -0500 | [diff] [blame] | 60 | dlm_callback_suspend(ls); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 61 | |
David Teigland | c04fecb | 2012-05-10 10:18:07 -0500 | [diff] [blame] | 62 | dlm_clear_toss(ls); |
David Teigland | 85f0379 | 2008-01-16 13:02:31 -0600 | [diff] [blame] | 63 | |
| 64 | /* |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 65 | * This list of root rsb's will be the basis of most of the recovery |
| 66 | * routines. |
| 67 | */ |
| 68 | |
| 69 | dlm_create_root_list(ls); |
| 70 | |
| 71 | /* |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 72 | * Add or remove nodes from the lockspace's ls_nodes list. |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 73 | */ |
| 74 | |
| 75 | error = dlm_recover_members(ls, rv, &neg); |
| 76 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 77 | log_rinfo(ls, "dlm_recover_members error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 78 | goto fail; |
| 79 | } |
David Teigland | f95a34c | 2011-10-14 12:34:58 -0500 | [diff] [blame] | 80 | |
David Teigland | c04fecb | 2012-05-10 10:18:07 -0500 | [diff] [blame] | 81 | dlm_recover_dir_nodeid(ls); |
| 82 | |
| 83 | ls->ls_recover_dir_sent_res = 0; |
| 84 | ls->ls_recover_dir_sent_msg = 0; |
David Teigland | 4875647 | 2012-04-26 15:54:29 -0500 | [diff] [blame] | 85 | ls->ls_recover_locks_in = 0; |
| 86 | |
David Teigland | f95a34c | 2011-10-14 12:34:58 -0500 | [diff] [blame] | 87 | dlm_set_recover_status(ls, DLM_RS_NODES); |
| 88 | |
| 89 | error = dlm_recover_members_wait(ls); |
| 90 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 91 | log_rinfo(ls, "dlm_recover_members_wait error %d", error); |
David Teigland | f95a34c | 2011-10-14 12:34:58 -0500 | [diff] [blame] | 92 | goto fail; |
| 93 | } |
| 94 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 95 | start = jiffies; |
| 96 | |
| 97 | /* |
| 98 | * Rebuild our own share of the directory by collecting from all other |
| 99 | * nodes their master rsb names that hash to us. |
| 100 | */ |
| 101 | |
| 102 | error = dlm_recover_directory(ls); |
| 103 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 104 | log_rinfo(ls, "dlm_recover_directory error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 105 | goto fail; |
| 106 | } |
| 107 | |
David Teigland | f95a34c | 2011-10-14 12:34:58 -0500 | [diff] [blame] | 108 | dlm_set_recover_status(ls, DLM_RS_DIR); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 109 | |
| 110 | error = dlm_recover_directory_wait(ls); |
| 111 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 112 | log_rinfo(ls, "dlm_recover_directory_wait error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 113 | goto fail; |
| 114 | } |
| 115 | |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 116 | log_rinfo(ls, "dlm_recover_directory %u out %u messages", |
David Teigland | c04fecb | 2012-05-10 10:18:07 -0500 | [diff] [blame] | 117 | ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg); |
| 118 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 119 | /* |
| 120 | * We may have outstanding operations that are waiting for a reply from |
| 121 | * a failed node. Mark these to be resent after recovery. Unlock and |
| 122 | * cancel ops can just be completed. |
| 123 | */ |
| 124 | |
| 125 | dlm_recover_waiters_pre(ls); |
| 126 | |
| 127 | error = dlm_recovery_stopped(ls); |
| 128 | if (error) |
| 129 | goto fail; |
| 130 | |
| 131 | if (neg || dlm_no_directory(ls)) { |
| 132 | /* |
| 133 | * Clear lkb's for departed nodes. |
| 134 | */ |
| 135 | |
David Teigland | 4875647 | 2012-04-26 15:54:29 -0500 | [diff] [blame] | 136 | dlm_recover_purge(ls); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 137 | |
| 138 | /* |
| 139 | * Get new master nodeid's for rsb's that were mastered on |
| 140 | * departed nodes. |
| 141 | */ |
| 142 | |
| 143 | error = dlm_recover_masters(ls); |
| 144 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 145 | log_rinfo(ls, "dlm_recover_masters error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 146 | goto fail; |
| 147 | } |
| 148 | |
| 149 | /* |
| 150 | * Send our locks on remastered rsb's to the new masters. |
| 151 | */ |
| 152 | |
| 153 | error = dlm_recover_locks(ls); |
| 154 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 155 | log_rinfo(ls, "dlm_recover_locks error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 156 | goto fail; |
| 157 | } |
| 158 | |
David Teigland | f95a34c | 2011-10-14 12:34:58 -0500 | [diff] [blame] | 159 | dlm_set_recover_status(ls, DLM_RS_LOCKS); |
| 160 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 161 | error = dlm_recover_locks_wait(ls); |
| 162 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 163 | log_rinfo(ls, "dlm_recover_locks_wait error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 164 | goto fail; |
| 165 | } |
| 166 | |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 167 | log_rinfo(ls, "dlm_recover_locks %u in", |
David Teigland | 4875647 | 2012-04-26 15:54:29 -0500 | [diff] [blame] | 168 | ls->ls_recover_locks_in); |
| 169 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 170 | /* |
| 171 | * Finalize state in master rsb's now that all locks can be |
| 172 | * checked. This includes conversion resolution and lvb |
| 173 | * settings. |
| 174 | */ |
| 175 | |
| 176 | dlm_recover_rsbs(ls); |
David Teigland | 91c0dc9 | 2006-10-31 11:56:01 -0600 | [diff] [blame] | 177 | } else { |
| 178 | /* |
| 179 | * Other lockspace members may be going through the "neg" steps |
| 180 | * while also adding us to the lockspace, in which case they'll |
David Teigland | 4b77f2c | 2006-11-01 09:31:48 -0600 | [diff] [blame] | 181 | * be doing the recover_locks (RS_LOCKS) barrier. |
David Teigland | 91c0dc9 | 2006-10-31 11:56:01 -0600 | [diff] [blame] | 182 | */ |
| 183 | dlm_set_recover_status(ls, DLM_RS_LOCKS); |
David Teigland | 4b77f2c | 2006-11-01 09:31:48 -0600 | [diff] [blame] | 184 | |
| 185 | error = dlm_recover_locks_wait(ls); |
| 186 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 187 | log_rinfo(ls, "dlm_recover_locks_wait error %d", error); |
David Teigland | 4b77f2c | 2006-11-01 09:31:48 -0600 | [diff] [blame] | 188 | goto fail; |
| 189 | } |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | dlm_release_root_list(ls); |
| 193 | |
David Teigland | 2896ee3 | 2006-11-27 11:31:22 -0600 | [diff] [blame] | 194 | /* |
| 195 | * Purge directory-related requests that are saved in requestqueue. |
| 196 | * All dir requests from before recovery are invalid now due to the dir |
| 197 | * rebuild and will be resent by the requesting nodes. |
| 198 | */ |
| 199 | |
| 200 | dlm_purge_requestqueue(ls); |
| 201 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 202 | dlm_set_recover_status(ls, DLM_RS_DONE); |
David Teigland | f95a34c | 2011-10-14 12:34:58 -0500 | [diff] [blame] | 203 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 204 | error = dlm_recover_done_wait(ls); |
| 205 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 206 | log_rinfo(ls, "dlm_recover_done_wait error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 207 | goto fail; |
| 208 | } |
| 209 | |
| 210 | dlm_clear_members_gone(ls); |
| 211 | |
David Teigland | 3ae1acf | 2007-05-18 08:59:31 -0500 | [diff] [blame] | 212 | dlm_adjust_timeouts(ls); |
| 213 | |
David Teigland | 23e8e1a | 2011-04-05 13:16:24 -0500 | [diff] [blame] | 214 | dlm_callback_resume(ls); |
| 215 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 216 | error = enable_locking(ls, rv->seq); |
| 217 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 218 | log_rinfo(ls, "enable_locking error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 219 | goto fail; |
| 220 | } |
| 221 | |
| 222 | error = dlm_process_requestqueue(ls); |
| 223 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 224 | log_rinfo(ls, "dlm_process_requestqueue error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 225 | goto fail; |
| 226 | } |
| 227 | |
| 228 | error = dlm_recover_waiters_post(ls); |
| 229 | if (error) { |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 230 | log_rinfo(ls, "dlm_recover_waiters_post error %d", error); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 231 | goto fail; |
| 232 | } |
| 233 | |
David Teigland | 4875647 | 2012-04-26 15:54:29 -0500 | [diff] [blame] | 234 | dlm_recover_grant(ls); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 235 | |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 236 | log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms", |
David Teigland | 60f98d1 | 2011-11-02 14:30:58 -0500 | [diff] [blame] | 237 | (unsigned long long)rv->seq, ls->ls_generation, |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 238 | jiffies_to_msecs(jiffies - start)); |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 239 | mutex_unlock(&ls->ls_recoverd_active); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 240 | |
David Teigland | 60f98d1 | 2011-11-02 14:30:58 -0500 | [diff] [blame] | 241 | dlm_lsop_recover_done(ls); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 242 | return 0; |
| 243 | |
| 244 | fail: |
| 245 | dlm_release_root_list(ls); |
David Teigland | 075f017 | 2014-02-14 11:54:44 -0600 | [diff] [blame] | 246 | log_rinfo(ls, "dlm_recover %llu error %d", |
Ryusuke Konishi | 57adf7e | 2006-11-29 09:33:48 -0500 | [diff] [blame] | 247 | (unsigned long long)rv->seq, error); |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 248 | mutex_unlock(&ls->ls_recoverd_active); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 249 | return error; |
| 250 | } |
| 251 | |
David Teigland | 2cdc98a | 2006-10-31 11:56:08 -0600 | [diff] [blame] | 252 | /* The dlm_ls_start() that created the rv we take here may already have been |
| 253 | stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP |
| 254 | flag set. */ |
| 255 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 256 | static void do_ls_recovery(struct dlm_ls *ls) |
| 257 | { |
| 258 | struct dlm_recover *rv = NULL; |
| 259 | |
| 260 | spin_lock(&ls->ls_recover_lock); |
| 261 | rv = ls->ls_recover_args; |
| 262 | ls->ls_recover_args = NULL; |
David Teigland | 2cdc98a | 2006-10-31 11:56:08 -0600 | [diff] [blame] | 263 | if (rv && ls->ls_recover_seq == rv->seq) |
David Teigland | 475f230 | 2012-08-02 11:08:21 -0500 | [diff] [blame] | 264 | clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 265 | spin_unlock(&ls->ls_recover_lock); |
| 266 | |
| 267 | if (rv) { |
| 268 | ls_recover(ls, rv); |
David Teigland | 60f98d1 | 2011-11-02 14:30:58 -0500 | [diff] [blame] | 269 | kfree(rv->nodes); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 270 | kfree(rv); |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | static int dlm_recoverd(void *arg) |
| 275 | { |
| 276 | struct dlm_ls *ls; |
| 277 | |
| 278 | ls = dlm_find_lockspace_local(arg); |
David Teigland | 5f88f1e | 2006-08-24 14:47:20 -0500 | [diff] [blame] | 279 | if (!ls) { |
| 280 | log_print("dlm_recoverd: no lockspace %p", arg); |
| 281 | return -1; |
| 282 | } |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 283 | |
David Teigland | 475f230 | 2012-08-02 11:08:21 -0500 | [diff] [blame] | 284 | down_write(&ls->ls_in_recovery); |
| 285 | set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); |
| 286 | wake_up(&ls->ls_recover_lock_wait); |
| 287 | |
tsutomu.owa@toshiba.co.jp | e412f92 | 2017-09-12 08:56:30 +0000 | [diff] [blame] | 288 | while (1) { |
| 289 | /* |
| 290 | * We call kthread_should_stop() after set_current_state(). |
| 291 | * This is because it works correctly if kthread_stop() is |
| 292 | * called just before set_current_state(). |
| 293 | */ |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 294 | set_current_state(TASK_INTERRUPTIBLE); |
tsutomu.owa@toshiba.co.jp | e412f92 | 2017-09-12 08:56:30 +0000 | [diff] [blame] | 295 | if (kthread_should_stop()) { |
| 296 | set_current_state(TASK_RUNNING); |
| 297 | break; |
| 298 | } |
David Teigland | 475f230 | 2012-08-02 11:08:21 -0500 | [diff] [blame] | 299 | if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) && |
Guoqing Jiang | 9e1b021 | 2017-09-25 15:47:50 +0800 | [diff] [blame] | 300 | !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { |
| 301 | if (kthread_should_stop()) |
| 302 | break; |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 303 | schedule(); |
Guoqing Jiang | 9e1b021 | 2017-09-25 15:47:50 +0800 | [diff] [blame] | 304 | } |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 305 | set_current_state(TASK_RUNNING); |
| 306 | |
David Teigland | 475f230 | 2012-08-02 11:08:21 -0500 | [diff] [blame] | 307 | if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { |
| 308 | down_write(&ls->ls_in_recovery); |
| 309 | set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); |
| 310 | wake_up(&ls->ls_recover_lock_wait); |
| 311 | } |
| 312 | |
| 313 | if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags)) |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 314 | do_ls_recovery(ls); |
| 315 | } |
| 316 | |
David Teigland | 475f230 | 2012-08-02 11:08:21 -0500 | [diff] [blame] | 317 | if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)) |
| 318 | up_write(&ls->ls_in_recovery); |
| 319 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 320 | dlm_put_lockspace(ls); |
| 321 | return 0; |
| 322 | } |
| 323 | |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 324 | int dlm_recoverd_start(struct dlm_ls *ls) |
| 325 | { |
| 326 | struct task_struct *p; |
| 327 | int error = 0; |
| 328 | |
| 329 | p = kthread_run(dlm_recoverd, ls, "dlm_recoverd"); |
| 330 | if (IS_ERR(p)) |
| 331 | error = PTR_ERR(p); |
| 332 | else |
| 333 | ls->ls_recoverd_task = p; |
| 334 | return error; |
| 335 | } |
| 336 | |
| 337 | void dlm_recoverd_stop(struct dlm_ls *ls) |
| 338 | { |
| 339 | kthread_stop(ls->ls_recoverd_task); |
| 340 | } |
| 341 | |
| 342 | void dlm_recoverd_suspend(struct dlm_ls *ls) |
| 343 | { |
David Teigland | f6db1b8 | 2006-08-08 17:06:07 -0500 | [diff] [blame] | 344 | wake_up(&ls->ls_wait_general); |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 345 | mutex_lock(&ls->ls_recoverd_active); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | void dlm_recoverd_resume(struct dlm_ls *ls) |
| 349 | { |
David Teigland | 9013592 | 2006-01-20 08:47:07 +0000 | [diff] [blame] | 350 | mutex_unlock(&ls->ls_recoverd_active); |
David Teigland | e7fd417 | 2006-01-18 09:30:29 +0000 | [diff] [blame] | 351 | } |
| 352 | |