blob: 9d950d7cea38e02665ee7e705292c4f515d30e98 [file] [log] [blame]
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/timer.h>
41#include <linux/kthread.h>
Adrian Bunkb4c7f532006-01-14 20:55:10 +010042#include <linux/delay.h>
Kurt Hackel6714d8e2005-12-15 14:31:23 -080043
44
45#include "cluster/heartbeat.h"
46#include "cluster/nodemanager.h"
47#include "cluster/tcp.h"
48
49#include "dlmapi.h"
50#include "dlmcommon.h"
51#include "dlmdomain.h"
52
53#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54#include "cluster/masklog.h"
55
56static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57
58static int dlm_recovery_thread(void *data);
59void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackelc03872f2006-03-06 14:08:49 -080061void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080062static int dlm_do_recovery(struct dlm_ctxt *dlm);
63
64static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70
71static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
75 u8 flags, u8 master);
76static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
78 u8 send_to,
79 struct dlm_lock_resource *res,
80 int total_locks);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080081static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 struct dlm_migratable_lockres *mres);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080084static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 u8 dead_node, u8 send_to);
87static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 struct list_head *list, u8 dead_node);
90static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 u8 dead_node, u8 new_master);
92static void dlm_reco_ast(void *astdata);
93static void dlm_reco_bast(void *astdata, int blocked_type);
94static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96 void *data);
97static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
Adrian Bunk8169cae2006-03-31 16:53:55 +020098static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
99 struct dlm_lock_resource *res,
100 u8 *real_master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800101
102static u64 dlm_get_next_mig_cookie(void);
103
Ingo Molnar34af9462006-06-27 02:53:55 -0700104static DEFINE_SPINLOCK(dlm_reco_state_lock);
105static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800106static u64 dlm_mig_cookie = 1;
107
108static u64 dlm_get_next_mig_cookie(void)
109{
110 u64 c;
111 spin_lock(&dlm_mig_cookie_lock);
112 c = dlm_mig_cookie;
113 if (dlm_mig_cookie == (~0ULL))
114 dlm_mig_cookie = 1;
115 else
116 dlm_mig_cookie++;
117 spin_unlock(&dlm_mig_cookie_lock);
118 return c;
119}
120
Kurt Hackelab27eb62006-04-27 18:03:49 -0700121static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
122 u8 dead_node)
123{
124 assert_spin_locked(&dlm->spinlock);
125 if (dlm->reco.dead_node != dead_node)
126 mlog(0, "%s: changing dead_node from %u to %u\n",
127 dlm->name, dlm->reco.dead_node, dead_node);
128 dlm->reco.dead_node = dead_node;
129}
130
131static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
132 u8 master)
133{
134 assert_spin_locked(&dlm->spinlock);
135 mlog(0, "%s: changing new_master from %u to %u\n",
136 dlm->name, dlm->reco.new_master, master);
137 dlm->reco.new_master = master;
138}
139
Kurt Hackel466d1a42006-05-01 11:11:13 -0700140static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800141{
Kurt Hackel466d1a42006-05-01 11:11:13 -0700142 assert_spin_locked(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800143 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
Kurt Hackelab27eb62006-04-27 18:03:49 -0700144 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
145 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel466d1a42006-05-01 11:11:13 -0700146}
147
148static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
149{
150 spin_lock(&dlm->spinlock);
151 __dlm_reset_recovery(dlm);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800152 spin_unlock(&dlm->spinlock);
153}
154
155/* Worker function used during recovery. */
156void dlm_dispatch_work(void *data)
157{
158 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
159 LIST_HEAD(tmp_list);
160 struct list_head *iter, *iter2;
161 struct dlm_work_item *item;
162 dlm_workfunc_t *workfunc;
Kurt Hackel3156d262006-05-01 14:39:29 -0700163 int tot=0;
164
165 if (!dlm_joined(dlm))
166 return;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800167
168 spin_lock(&dlm->work_lock);
169 list_splice_init(&dlm->work_list, &tmp_list);
170 spin_unlock(&dlm->work_lock);
171
172 list_for_each_safe(iter, iter2, &tmp_list) {
Kurt Hackel3156d262006-05-01 14:39:29 -0700173 tot++;
174 }
175 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
176
177 list_for_each_safe(iter, iter2, &tmp_list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800178 item = list_entry(iter, struct dlm_work_item, list);
179 workfunc = item->func;
180 list_del_init(&item->list);
181
182 /* already have ref on dlm to avoid having
183 * it disappear. just double-check. */
184 BUG_ON(item->dlm != dlm);
185
186 /* this is allowed to sleep and
187 * call network stuff */
188 workfunc(item, item->data);
189
190 dlm_put(dlm);
191 kfree(item);
192 }
193}
194
195/*
196 * RECOVERY THREAD
197 */
198
Kurt Hackelc03872f2006-03-06 14:08:49 -0800199void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800200{
201 /* wake the recovery thread
202 * this will wake the reco thread in one of three places
203 * 1) sleeping with no recovery happening
204 * 2) sleeping with recovery mastered elsewhere
205 * 3) recovery mastered here, waiting on reco data */
206
207 wake_up(&dlm->dlm_reco_thread_wq);
208}
209
210/* Launch the recovery thread */
211int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
212{
213 mlog(0, "starting dlm recovery thread...\n");
214
215 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
216 "dlm_reco_thread");
217 if (IS_ERR(dlm->dlm_reco_thread_task)) {
218 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
219 dlm->dlm_reco_thread_task = NULL;
220 return -EINVAL;
221 }
222
223 return 0;
224}
225
226void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
227{
228 if (dlm->dlm_reco_thread_task) {
229 mlog(0, "waiting for dlm recovery thread to exit\n");
230 kthread_stop(dlm->dlm_reco_thread_task);
231 dlm->dlm_reco_thread_task = NULL;
232 }
233}
234
235
236
237/*
238 * this is lame, but here's how recovery works...
239 * 1) all recovery threads cluster wide will work on recovering
240 * ONE node at a time
241 * 2) negotiate who will take over all the locks for the dead node.
242 * thats right... ALL the locks.
243 * 3) once a new master is chosen, everyone scans all locks
244 * and moves aside those mastered by the dead guy
245 * 4) each of these locks should be locked until recovery is done
246 * 5) the new master collects up all of secondary lock queue info
247 * one lock at a time, forcing each node to communicate back
248 * before continuing
249 * 6) each secondary lock queue responds with the full known lock info
250 * 7) once the new master has run all its locks, it sends a ALLDONE!
251 * message to everyone
252 * 8) upon receiving this message, the secondary queue node unlocks
253 * and responds to the ALLDONE
254 * 9) once the new master gets responses from everyone, he unlocks
255 * everything and recovery for this dead node is done
256 *10) go back to 2) while there are still dead nodes
257 *
258 */
259
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700260static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
261{
262 struct dlm_reco_node_data *ndata;
263 struct dlm_lock_resource *res;
264
265 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
266 dlm->name, dlm->dlm_reco_thread_task->pid,
267 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
268 dlm->reco.dead_node, dlm->reco.new_master);
269
270 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
271 char *st = "unknown";
272 switch (ndata->state) {
273 case DLM_RECO_NODE_DATA_INIT:
274 st = "init";
275 break;
276 case DLM_RECO_NODE_DATA_REQUESTING:
277 st = "requesting";
278 break;
279 case DLM_RECO_NODE_DATA_DEAD:
280 st = "dead";
281 break;
282 case DLM_RECO_NODE_DATA_RECEIVING:
283 st = "receiving";
284 break;
285 case DLM_RECO_NODE_DATA_REQUESTED:
286 st = "requested";
287 break;
288 case DLM_RECO_NODE_DATA_DONE:
289 st = "done";
290 break;
291 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
292 st = "finalize-sent";
293 break;
294 default:
295 st = "bad";
296 break;
297 }
298 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
299 dlm->name, ndata->node_num, st);
300 }
301 list_for_each_entry(res, &dlm->reco.resources, recovering) {
302 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
303 dlm->name, res->lockname.len, res->lockname.name);
304 }
305}
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800306
307#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
308
309static int dlm_recovery_thread(void *data)
310{
311 int status;
312 struct dlm_ctxt *dlm = data;
313 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
314
315 mlog(0, "dlm thread running for %s...\n", dlm->name);
316
317 while (!kthread_should_stop()) {
318 if (dlm_joined(dlm)) {
319 status = dlm_do_recovery(dlm);
320 if (status == -EAGAIN) {
321 /* do not sleep, recheck immediately. */
322 continue;
323 }
324 if (status < 0)
325 mlog_errno(status);
326 }
327
328 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
329 kthread_should_stop(),
330 timeout);
331 }
332
333 mlog(0, "quitting DLM recovery thread\n");
334 return 0;
335}
336
Kurt Hackele2faea42006-01-12 14:24:55 -0800337/* returns true when the recovery master has contacted us */
338static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
339{
340 int ready;
341 spin_lock(&dlm->spinlock);
342 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
343 spin_unlock(&dlm->spinlock);
344 return ready;
345}
346
347/* returns true if node is no longer in the domain
348 * could be dead or just not joined */
349int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
350{
351 int dead;
352 spin_lock(&dlm->spinlock);
Kurt Hackelaba9aac2006-04-27 18:00:21 -0700353 dead = !test_bit(node, dlm->domain_map);
Kurt Hackele2faea42006-01-12 14:24:55 -0800354 spin_unlock(&dlm->spinlock);
355 return dead;
356}
357
Kurt Hackelb7084ab2006-05-01 13:54:07 -0700358/* returns true if node is no longer in the domain
359 * could be dead or just not joined */
Adrian Bunk3fb5a982006-05-16 17:26:41 +0200360static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
Kurt Hackelb7084ab2006-05-01 13:54:07 -0700361{
362 int recovered;
363 spin_lock(&dlm->spinlock);
364 recovered = !test_bit(node, dlm->recovery_map);
365 spin_unlock(&dlm->spinlock);
366 return recovered;
367}
368
369
Kurt Hackel44465a72006-01-18 17:05:38 -0800370int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
371{
372 if (timeout) {
373 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
374 "death of node %u\n", dlm->name, timeout, node);
375 wait_event_timeout(dlm->dlm_reco_thread_wq,
376 dlm_is_node_dead(dlm, node),
377 msecs_to_jiffies(timeout));
378 } else {
379 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
380 "of death of node %u\n", dlm->name, node);
381 wait_event(dlm->dlm_reco_thread_wq,
382 dlm_is_node_dead(dlm, node));
383 }
384 /* for now, return 0 */
385 return 0;
386}
387
Kurt Hackelb7084ab2006-05-01 13:54:07 -0700388int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
389{
390 if (timeout) {
391 mlog(0, "%s: waiting %dms for notification of "
392 "recovery of node %u\n", dlm->name, timeout, node);
393 wait_event_timeout(dlm->dlm_reco_thread_wq,
394 dlm_is_node_recovered(dlm, node),
395 msecs_to_jiffies(timeout));
396 } else {
397 mlog(0, "%s: waiting indefinitely for notification "
398 "of recovery of node %u\n", dlm->name, node);
399 wait_event(dlm->dlm_reco_thread_wq,
400 dlm_is_node_recovered(dlm, node));
401 }
402 /* for now, return 0 */
403 return 0;
404}
405
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800406/* callers of the top-level api calls (dlmlock/dlmunlock) should
407 * block on the dlm->reco.event when recovery is in progress.
408 * the dlm recovery thread will set this state when it begins
409 * recovering a dead node (as the new master or not) and clear
410 * the state and wake as soon as all affected lock resources have
411 * been marked with the RECOVERY flag */
412static int dlm_in_recovery(struct dlm_ctxt *dlm)
413{
414 int in_recovery;
415 spin_lock(&dlm->spinlock);
416 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
417 spin_unlock(&dlm->spinlock);
418 return in_recovery;
419}
420
421
422void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
423{
Kurt Hackel56a7c102006-05-01 14:30:39 -0700424 if (dlm_in_recovery(dlm)) {
Kurt Hackel3b3b84a2006-05-01 14:31:37 -0700425 mlog(0, "%s: reco thread %d in recovery: "
Kurt Hackel56a7c102006-05-01 14:30:39 -0700426 "state=%d, master=%u, dead=%u\n",
427 dlm->name, dlm->dlm_reco_thread_task->pid,
428 dlm->reco.state, dlm->reco.new_master,
429 dlm->reco.dead_node);
430 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800431 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
432}
433
434static void dlm_begin_recovery(struct dlm_ctxt *dlm)
435{
436 spin_lock(&dlm->spinlock);
437 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
438 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
439 spin_unlock(&dlm->spinlock);
440}
441
442static void dlm_end_recovery(struct dlm_ctxt *dlm)
443{
444 spin_lock(&dlm->spinlock);
445 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
446 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
447 spin_unlock(&dlm->spinlock);
448 wake_up(&dlm->reco.event);
449}
450
451static int dlm_do_recovery(struct dlm_ctxt *dlm)
452{
453 int status = 0;
Kurt Hackele2faea42006-01-12 14:24:55 -0800454 int ret;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800455
456 spin_lock(&dlm->spinlock);
457
458 /* check to see if the new master has died */
459 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
460 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
461 mlog(0, "new master %u died while recovering %u!\n",
462 dlm->reco.new_master, dlm->reco.dead_node);
463 /* unset the new_master, leave dead_node */
Kurt Hackelab27eb62006-04-27 18:03:49 -0700464 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800465 }
466
467 /* select a target to recover */
468 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
469 int bit;
470
471 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
472 if (bit >= O2NM_MAX_NODES || bit < 0)
Kurt Hackelab27eb62006-04-27 18:03:49 -0700473 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800474 else
Kurt Hackelab27eb62006-04-27 18:03:49 -0700475 dlm_set_reco_dead_node(dlm, bit);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800476 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
477 /* BUG? */
478 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
479 dlm->reco.dead_node);
Kurt Hackelab27eb62006-04-27 18:03:49 -0700480 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800481 }
482
483 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
484 // mlog(0, "nothing to recover! sleeping now!\n");
485 spin_unlock(&dlm->spinlock);
486 /* return to main thread loop and sleep. */
487 return 0;
488 }
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700489 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
490 dlm->name, dlm->dlm_reco_thread_task->pid,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800491 dlm->reco.dead_node);
492 spin_unlock(&dlm->spinlock);
493
494 /* take write barrier */
495 /* (stops the list reshuffling thread, proxy ast handling) */
496 dlm_begin_recovery(dlm);
497
498 if (dlm->reco.new_master == dlm->node_num)
499 goto master_here;
500
501 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -0800502 /* choose a new master, returns 0 if this node
503 * is the master, -EEXIST if it's another node.
504 * this does not return until a new master is chosen
505 * or recovery completes entirely. */
506 ret = dlm_pick_recovery_master(dlm);
507 if (!ret) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800508 /* already notified everyone. go. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800509 goto master_here;
510 }
511 mlog(0, "another node will master this recovery session.\n");
512 }
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700513 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
514 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800515 dlm->node_num, dlm->reco.dead_node);
516
517 /* it is safe to start everything back up here
518 * because all of the dead node's lock resources
519 * have been marked as in-recovery */
520 dlm_end_recovery(dlm);
521
522 /* sleep out in main dlm_recovery_thread loop. */
523 return 0;
524
525master_here:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700526 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
527 dlm->dlm_reco_thread_task->pid,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800528 dlm->name, dlm->reco.dead_node, dlm->node_num);
529
530 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
531 if (status < 0) {
Kurt Hackel6a413212006-05-01 13:49:20 -0700532 /* we should never hit this anymore */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800533 mlog(ML_ERROR, "error %d remastering locks for node %u, "
534 "retrying.\n", status, dlm->reco.dead_node);
Kurt Hackele2faea42006-01-12 14:24:55 -0800535 /* yield a bit to allow any final network messages
536 * to get handled on remaining nodes */
537 msleep(100);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800538 } else {
539 /* success! see if any other nodes need recovery */
Kurt Hackele2faea42006-01-12 14:24:55 -0800540 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
541 dlm->name, dlm->reco.dead_node, dlm->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800542 dlm_reset_recovery(dlm);
543 }
544 dlm_end_recovery(dlm);
545
546 /* continue and look for another dead node */
547 return -EAGAIN;
548}
549
550static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
551{
552 int status = 0;
553 struct dlm_reco_node_data *ndata;
554 struct list_head *iter;
555 int all_nodes_done;
556 int destroy = 0;
557 int pass = 0;
558
Kurt Hackel6a413212006-05-01 13:49:20 -0700559 do {
560 /* we have become recovery master. there is no escaping
561 * this, so just keep trying until we get it. */
562 status = dlm_init_recovery_area(dlm, dead_node);
563 if (status < 0) {
564 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
565 "retrying\n", dlm->name);
566 msleep(1000);
567 }
568 } while (status != 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800569
570 /* safe to access the node data list without a lock, since this
571 * process is the only one to change the list */
572 list_for_each(iter, &dlm->reco.node_data) {
573 ndata = list_entry (iter, struct dlm_reco_node_data, list);
574 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
575 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
576
577 mlog(0, "requesting lock info from node %u\n",
578 ndata->node_num);
579
580 if (ndata->node_num == dlm->node_num) {
581 ndata->state = DLM_RECO_NODE_DATA_DONE;
582 continue;
583 }
584
Kurt Hackel6a413212006-05-01 13:49:20 -0700585 do {
586 status = dlm_request_all_locks(dlm, ndata->node_num,
587 dead_node);
588 if (status < 0) {
589 mlog_errno(status);
590 if (dlm_is_host_down(status)) {
591 /* node died, ignore it for recovery */
592 status = 0;
593 ndata->state = DLM_RECO_NODE_DATA_DEAD;
594 /* wait for the domain map to catch up
595 * with the network state. */
596 wait_event_timeout(dlm->dlm_reco_thread_wq,
597 dlm_is_node_dead(dlm,
598 ndata->node_num),
599 msecs_to_jiffies(1000));
600 mlog(0, "waited 1 sec for %u, "
601 "dead? %s\n", ndata->node_num,
602 dlm_is_node_dead(dlm, ndata->node_num) ?
603 "yes" : "no");
604 } else {
605 /* -ENOMEM on the other node */
606 mlog(0, "%s: node %u returned "
607 "%d during recovery, retrying "
608 "after a short wait\n",
609 dlm->name, ndata->node_num,
610 status);
611 msleep(100);
612 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800613 }
Kurt Hackel6a413212006-05-01 13:49:20 -0700614 } while (status != 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800615
616 switch (ndata->state) {
617 case DLM_RECO_NODE_DATA_INIT:
618 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
619 case DLM_RECO_NODE_DATA_REQUESTED:
620 BUG();
621 break;
622 case DLM_RECO_NODE_DATA_DEAD:
623 mlog(0, "node %u died after requesting "
624 "recovery info for node %u\n",
625 ndata->node_num, dead_node);
Kurt Hackel6a413212006-05-01 13:49:20 -0700626 /* fine. don't need this node's info.
627 * continue without it. */
628 break;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800629 case DLM_RECO_NODE_DATA_REQUESTING:
630 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
631 mlog(0, "now receiving recovery data from "
632 "node %u for dead node %u\n",
633 ndata->node_num, dead_node);
634 break;
635 case DLM_RECO_NODE_DATA_RECEIVING:
636 mlog(0, "already receiving recovery data from "
637 "node %u for dead node %u\n",
638 ndata->node_num, dead_node);
639 break;
640 case DLM_RECO_NODE_DATA_DONE:
641 mlog(0, "already DONE receiving recovery data "
642 "from node %u for dead node %u\n",
643 ndata->node_num, dead_node);
644 break;
645 }
646 }
647
648 mlog(0, "done requesting all lock info\n");
649
650 /* nodes should be sending reco data now
651 * just need to wait */
652
653 while (1) {
654 /* check all the nodes now to see if we are
655 * done, or if anyone died */
656 all_nodes_done = 1;
657 spin_lock(&dlm_reco_state_lock);
658 list_for_each(iter, &dlm->reco.node_data) {
659 ndata = list_entry (iter, struct dlm_reco_node_data, list);
660
661 mlog(0, "checking recovery state of node %u\n",
662 ndata->node_num);
663 switch (ndata->state) {
664 case DLM_RECO_NODE_DATA_INIT:
665 case DLM_RECO_NODE_DATA_REQUESTING:
666 mlog(ML_ERROR, "bad ndata state for "
667 "node %u: state=%d\n",
668 ndata->node_num, ndata->state);
669 BUG();
670 break;
671 case DLM_RECO_NODE_DATA_DEAD:
Kurt Hackel6a413212006-05-01 13:49:20 -0700672 mlog(0, "node %u died after "
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800673 "requesting recovery info for "
674 "node %u\n", ndata->node_num,
675 dead_node);
Kurt Hackel6a413212006-05-01 13:49:20 -0700676 break;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800677 case DLM_RECO_NODE_DATA_RECEIVING:
678 case DLM_RECO_NODE_DATA_REQUESTED:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700679 mlog(0, "%s: node %u still in state %s\n",
680 dlm->name, ndata->node_num,
681 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
682 "receiving" : "requested");
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800683 all_nodes_done = 0;
684 break;
685 case DLM_RECO_NODE_DATA_DONE:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700686 mlog(0, "%s: node %u state is done\n",
687 dlm->name, ndata->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800688 break;
689 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700690 mlog(0, "%s: node %u state is finalize\n",
691 dlm->name, ndata->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800692 break;
693 }
694 }
695 spin_unlock(&dlm_reco_state_lock);
696
697 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
698 all_nodes_done?"yes":"no");
699 if (all_nodes_done) {
700 int ret;
701
702 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
703 * just send a finalize message to everyone and
704 * clean up */
705 mlog(0, "all nodes are done! send finalize\n");
706 ret = dlm_send_finalize_reco_message(dlm);
707 if (ret < 0)
708 mlog_errno(ret);
709
710 spin_lock(&dlm->spinlock);
711 dlm_finish_local_lockres_recovery(dlm, dead_node,
712 dlm->node_num);
713 spin_unlock(&dlm->spinlock);
714 mlog(0, "should be done with recovery!\n");
715
716 mlog(0, "finishing recovery of %s at %lu, "
717 "dead=%u, this=%u, new=%u\n", dlm->name,
718 jiffies, dlm->reco.dead_node,
719 dlm->node_num, dlm->reco.new_master);
720 destroy = 1;
Kurt Hackel6a413212006-05-01 13:49:20 -0700721 status = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800722 /* rescan everything marked dirty along the way */
723 dlm_kick_thread(dlm, NULL);
724 break;
725 }
726 /* wait to be signalled, with periodic timeout
727 * to check for node death */
728 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
729 kthread_should_stop(),
730 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
731
732 }
733
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800734 if (destroy)
735 dlm_destroy_recovery_area(dlm, dead_node);
736
737 mlog_exit(status);
738 return status;
739}
740
741static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
742{
743 int num=0;
744 struct dlm_reco_node_data *ndata;
745
746 spin_lock(&dlm->spinlock);
747 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
748 /* nodes can only be removed (by dying) after dropping
749 * this lock, and death will be trapped later, so this should do */
750 spin_unlock(&dlm->spinlock);
751
752 while (1) {
753 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
754 if (num >= O2NM_MAX_NODES) {
755 break;
756 }
757 BUG_ON(num == dead_node);
758
Kurt Hackelad8100e2006-05-01 14:25:21 -0700759 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800760 if (!ndata) {
761 dlm_destroy_recovery_area(dlm, dead_node);
762 return -ENOMEM;
763 }
764 ndata->node_num = num;
765 ndata->state = DLM_RECO_NODE_DATA_INIT;
766 spin_lock(&dlm_reco_state_lock);
767 list_add_tail(&ndata->list, &dlm->reco.node_data);
768 spin_unlock(&dlm_reco_state_lock);
769 num++;
770 }
771
772 return 0;
773}
774
775static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
776{
777 struct list_head *iter, *iter2;
778 struct dlm_reco_node_data *ndata;
779 LIST_HEAD(tmplist);
780
781 spin_lock(&dlm_reco_state_lock);
782 list_splice_init(&dlm->reco.node_data, &tmplist);
783 spin_unlock(&dlm_reco_state_lock);
784
785 list_for_each_safe(iter, iter2, &tmplist) {
786 ndata = list_entry (iter, struct dlm_reco_node_data, list);
787 list_del_init(&ndata->list);
788 kfree(ndata);
789 }
790}
791
792static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
793 u8 dead_node)
794{
795 struct dlm_lock_request lr;
796 enum dlm_status ret;
797
798 mlog(0, "\n");
799
800
801 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
802 "to %u\n", dead_node, request_from);
803
804 memset(&lr, 0, sizeof(lr));
805 lr.node_idx = dlm->node_num;
806 lr.dead_node = dead_node;
807
808 // send message
809 ret = DLM_NOLOCKMGR;
810 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
811 &lr, sizeof(lr), request_from, NULL);
812
813 /* negative status is handled by caller */
814 if (ret < 0)
815 mlog_errno(ret);
816
817 // return from here, then
818 // sleep until all received or error
819 return ret;
820
821}
822
823int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
824{
825 struct dlm_ctxt *dlm = data;
826 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
827 char *buf = NULL;
828 struct dlm_work_item *item = NULL;
829
830 if (!dlm_grab(dlm))
831 return -EINVAL;
832
Kurt Hackelc3187ce2006-04-27 18:05:41 -0700833 if (lr->dead_node != dlm->reco.dead_node) {
834 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
835 "dead_node is %u\n", dlm->name, lr->node_idx,
836 lr->dead_node, dlm->reco.dead_node);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700837 dlm_print_reco_node_status(dlm);
Kurt Hackelc3187ce2006-04-27 18:05:41 -0700838 /* this is a hack */
839 dlm_put(dlm);
840 return -ENOMEM;
841 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800842 BUG_ON(lr->dead_node != dlm->reco.dead_node);
843
Kurt Hackelad8100e2006-05-01 14:25:21 -0700844 item = kcalloc(1, sizeof(*item), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800845 if (!item) {
846 dlm_put(dlm);
847 return -ENOMEM;
848 }
849
850 /* this will get freed by dlm_request_all_locks_worker */
Kurt Hackelad8100e2006-05-01 14:25:21 -0700851 buf = (char *) __get_free_page(GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800852 if (!buf) {
853 kfree(item);
854 dlm_put(dlm);
855 return -ENOMEM;
856 }
857
858 /* queue up work for dlm_request_all_locks_worker */
859 dlm_grab(dlm); /* get an extra ref for the work item */
860 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
861 item->u.ral.reco_master = lr->node_idx;
862 item->u.ral.dead_node = lr->dead_node;
863 spin_lock(&dlm->work_lock);
864 list_add_tail(&item->list, &dlm->work_list);
865 spin_unlock(&dlm->work_lock);
Kurt Hackel3156d262006-05-01 14:39:29 -0700866 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800867
868 dlm_put(dlm);
869 return 0;
870}
871
872static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
873{
874 struct dlm_migratable_lockres *mres;
875 struct dlm_lock_resource *res;
876 struct dlm_ctxt *dlm;
877 LIST_HEAD(resources);
878 struct list_head *iter;
879 int ret;
880 u8 dead_node, reco_master;
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700881 int skip_all_done = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800882
883 dlm = item->dlm;
884 dead_node = item->u.ral.dead_node;
885 reco_master = item->u.ral.reco_master;
Kurt Hackele2faea42006-01-12 14:24:55 -0800886 mres = (struct dlm_migratable_lockres *)data;
887
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700888 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
889 dlm->name, dead_node, reco_master);
890
Kurt Hackele2faea42006-01-12 14:24:55 -0800891 if (dead_node != dlm->reco.dead_node ||
892 reco_master != dlm->reco.new_master) {
Kurt Hackel6a413212006-05-01 13:49:20 -0700893 /* worker could have been created before the recovery master
894 * died. if so, do not continue, but do not error. */
895 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
896 mlog(ML_NOTICE, "%s: will not send recovery state, "
897 "recovery master %u died, thread=(dead=%u,mas=%u)"
898 " current=(dead=%u,mas=%u)\n", dlm->name,
899 reco_master, dead_node, reco_master,
900 dlm->reco.dead_node, dlm->reco.new_master);
901 } else {
902 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
903 "master=%u), request(dead=%u, master=%u)\n",
904 dlm->name, dlm->reco.dead_node,
905 dlm->reco.new_master, dead_node, reco_master);
906 }
907 goto leave;
Kurt Hackele2faea42006-01-12 14:24:55 -0800908 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800909
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800910 /* lock resources should have already been moved to the
911 * dlm->reco.resources list. now move items from that list
912 * to a temp list if the dead owner matches. note that the
913 * whole cluster recovers only one node at a time, so we
914 * can safely move UNKNOWN lock resources for each recovery
915 * session. */
916 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
917
918 /* now we can begin blasting lockreses without the dlm lock */
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700919
920 /* any errors returned will be due to the new_master dying,
921 * the dlm_reco_thread should detect this */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800922 list_for_each(iter, &resources) {
923 res = list_entry (iter, struct dlm_lock_resource, recovering);
924 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
925 DLM_MRES_RECOVERY);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700926 if (ret < 0) {
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700927 mlog(ML_ERROR, "%s: node %u went down while sending "
928 "recovery state for dead node %u, ret=%d\n", dlm->name,
929 reco_master, dead_node, ret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700930 skip_all_done = 1;
931 break;
932 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800933 }
934
935 /* move the resources back to the list */
936 spin_lock(&dlm->spinlock);
937 list_splice_init(&resources, &dlm->reco.resources);
938 spin_unlock(&dlm->spinlock);
939
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700940 if (!skip_all_done) {
941 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
942 if (ret < 0) {
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700943 mlog(ML_ERROR, "%s: node %u went down while sending "
944 "recovery all-done for dead node %u, ret=%d\n",
945 dlm->name, reco_master, dead_node, ret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700946 }
947 }
Kurt Hackel6a413212006-05-01 13:49:20 -0700948leave:
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800949 free_page((unsigned long)data);
950}
951
952
953static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
954{
955 int ret, tmpret;
956 struct dlm_reco_data_done done_msg;
957
958 memset(&done_msg, 0, sizeof(done_msg));
959 done_msg.node_idx = dlm->node_num;
960 done_msg.dead_node = dead_node;
961 mlog(0, "sending DATA DONE message to %u, "
962 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
963 done_msg.dead_node);
964
965 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
966 sizeof(done_msg), send_to, &tmpret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700967 if (ret < 0) {
968 if (!dlm_is_host_down(ret)) {
969 mlog_errno(ret);
970 mlog(ML_ERROR, "%s: unknown error sending data-done "
971 "to %u\n", dlm->name, send_to);
972 BUG();
973 }
974 } else
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800975 ret = tmpret;
976 return ret;
977}
978
979
980int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
981{
982 struct dlm_ctxt *dlm = data;
983 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
984 struct list_head *iter;
985 struct dlm_reco_node_data *ndata = NULL;
986 int ret = -EINVAL;
987
988 if (!dlm_grab(dlm))
989 return -EINVAL;
990
991 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
992 "node_idx=%u, this node=%u\n", done->dead_node,
993 dlm->reco.dead_node, done->node_idx, dlm->node_num);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700994
995 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
996 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
997 "node_idx=%u, this node=%u\n", done->dead_node,
998 dlm->reco.dead_node, done->node_idx, dlm->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800999
1000 spin_lock(&dlm_reco_state_lock);
1001 list_for_each(iter, &dlm->reco.node_data) {
1002 ndata = list_entry (iter, struct dlm_reco_node_data, list);
1003 if (ndata->node_num != done->node_idx)
1004 continue;
1005
1006 switch (ndata->state) {
Kurt Hackele2faea42006-01-12 14:24:55 -08001007 /* should have moved beyond INIT but not to FINALIZE yet */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001008 case DLM_RECO_NODE_DATA_INIT:
1009 case DLM_RECO_NODE_DATA_DEAD:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001010 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1011 mlog(ML_ERROR, "bad ndata state for node %u:"
1012 " state=%d\n", ndata->node_num,
1013 ndata->state);
1014 BUG();
1015 break;
Kurt Hackele2faea42006-01-12 14:24:55 -08001016 /* these states are possible at this point, anywhere along
1017 * the line of recovery */
1018 case DLM_RECO_NODE_DATA_DONE:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001019 case DLM_RECO_NODE_DATA_RECEIVING:
1020 case DLM_RECO_NODE_DATA_REQUESTED:
1021 case DLM_RECO_NODE_DATA_REQUESTING:
1022 mlog(0, "node %u is DONE sending "
1023 "recovery data!\n",
1024 ndata->node_num);
1025
1026 ndata->state = DLM_RECO_NODE_DATA_DONE;
1027 ret = 0;
1028 break;
1029 }
1030 }
1031 spin_unlock(&dlm_reco_state_lock);
1032
1033 /* wake the recovery thread, some node is done */
1034 if (!ret)
1035 dlm_kick_recovery_thread(dlm);
1036
1037 if (ret < 0)
1038 mlog(ML_ERROR, "failed to find recovery node data for node "
1039 "%u\n", done->node_idx);
1040 dlm_put(dlm);
1041
1042 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1043 return ret;
1044}
1045
1046static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1047 struct list_head *list,
1048 u8 dead_node)
1049{
1050 struct dlm_lock_resource *res;
1051 struct list_head *iter, *iter2;
Kurt Hackele2faea42006-01-12 14:24:55 -08001052 struct dlm_lock *lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001053
1054 spin_lock(&dlm->spinlock);
1055 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1056 res = list_entry (iter, struct dlm_lock_resource, recovering);
Kurt Hackele2faea42006-01-12 14:24:55 -08001057 /* always prune any $RECOVERY entries for dead nodes,
1058 * otherwise hangs can occur during later recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001059 if (dlm_is_recovery_lock(res->lockname.name,
Kurt Hackele2faea42006-01-12 14:24:55 -08001060 res->lockname.len)) {
1061 spin_lock(&res->spinlock);
1062 list_for_each_entry(lock, &res->granted, list) {
1063 if (lock->ml.node == dead_node) {
1064 mlog(0, "AHA! there was "
1065 "a $RECOVERY lock for dead "
1066 "node %u (%s)!\n",
1067 dead_node, dlm->name);
1068 list_del_init(&lock->list);
1069 dlm_lock_put(lock);
1070 break;
1071 }
1072 }
1073 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001074 continue;
Kurt Hackele2faea42006-01-12 14:24:55 -08001075 }
1076
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001077 if (res->owner == dead_node) {
1078 mlog(0, "found lockres owned by dead node while "
1079 "doing recovery for node %u. sending it.\n",
1080 dead_node);
Akinobu Mitaf1166292006-06-26 00:24:46 -07001081 list_move_tail(&res->recovering, list);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001082 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1083 mlog(0, "found UNKNOWN owner while doing recovery "
1084 "for node %u. sending it.\n", dead_node);
Akinobu Mitaf1166292006-06-26 00:24:46 -07001085 list_move_tail(&res->recovering, list);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001086 }
1087 }
1088 spin_unlock(&dlm->spinlock);
1089}
1090
1091static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1092{
1093 int total_locks = 0;
1094 struct list_head *iter, *queue = &res->granted;
1095 int i;
1096
1097 for (i=0; i<3; i++) {
1098 list_for_each(iter, queue)
1099 total_locks++;
1100 queue++;
1101 }
1102 return total_locks;
1103}
1104
1105
1106static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1107 struct dlm_migratable_lockres *mres,
1108 u8 send_to,
1109 struct dlm_lock_resource *res,
1110 int total_locks)
1111{
1112 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1113 int mres_total_locks = be32_to_cpu(mres->total_locks);
1114 int sz, ret = 0, status = 0;
1115 u8 orig_flags = mres->flags,
1116 orig_master = mres->master;
1117
1118 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1119 if (!mres->num_locks)
1120 return 0;
1121
1122 sz = sizeof(struct dlm_migratable_lockres) +
1123 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1124
1125 /* add an all-done flag if we reached the last lock */
1126 orig_flags = mres->flags;
1127 BUG_ON(total_locks > mres_total_locks);
1128 if (total_locks == mres_total_locks)
1129 mres->flags |= DLM_MRES_ALL_DONE;
1130
1131 /* send it */
1132 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1133 sz, send_to, &status);
1134 if (ret < 0) {
1135 /* XXX: negative status is not handled.
1136 * this will end up killing this node. */
1137 mlog_errno(ret);
1138 } else {
1139 /* might get an -ENOMEM back here */
1140 ret = status;
1141 if (ret < 0) {
1142 mlog_errno(ret);
1143
1144 if (ret == -EFAULT) {
1145 mlog(ML_ERROR, "node %u told me to kill "
1146 "myself!\n", send_to);
1147 BUG();
1148 }
1149 }
1150 }
1151
1152 /* zero and reinit the message buffer */
1153 dlm_init_migratable_lockres(mres, res->lockname.name,
1154 res->lockname.len, mres_total_locks,
1155 mig_cookie, orig_flags, orig_master);
1156 return ret;
1157}
1158
1159static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1160 const char *lockname, int namelen,
1161 int total_locks, u64 cookie,
1162 u8 flags, u8 master)
1163{
1164 /* mres here is one full page */
1165 memset(mres, 0, PAGE_SIZE);
1166 mres->lockname_len = namelen;
1167 memcpy(mres->lockname, lockname, namelen);
1168 mres->num_locks = 0;
1169 mres->total_locks = cpu_to_be32(total_locks);
1170 mres->mig_cookie = cpu_to_be64(cookie);
1171 mres->flags = flags;
1172 mres->master = master;
1173}
1174
1175
1176/* returns 1 if this lock fills the network structure,
1177 * 0 otherwise */
1178static int dlm_add_lock_to_array(struct dlm_lock *lock,
1179 struct dlm_migratable_lockres *mres, int queue)
1180{
1181 struct dlm_migratable_lock *ml;
1182 int lock_num = mres->num_locks;
1183
1184 ml = &(mres->ml[lock_num]);
1185 ml->cookie = lock->ml.cookie;
1186 ml->type = lock->ml.type;
1187 ml->convert_type = lock->ml.convert_type;
1188 ml->highest_blocked = lock->ml.highest_blocked;
1189 ml->list = queue;
1190 if (lock->lksb) {
1191 ml->flags = lock->lksb->flags;
1192 /* send our current lvb */
1193 if (ml->type == LKM_EXMODE ||
1194 ml->type == LKM_PRMODE) {
1195 /* if it is already set, this had better be a PR
1196 * and it has to match */
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001197 if (!dlm_lvb_is_empty(mres->lvb) &&
1198 (ml->type == LKM_EXMODE ||
1199 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001200 mlog(ML_ERROR, "mismatched lvbs!\n");
1201 __dlm_print_one_lock_resource(lock->lockres);
1202 BUG();
1203 }
1204 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1205 }
1206 }
1207 ml->node = lock->ml.node;
1208 mres->num_locks++;
1209 /* we reached the max, send this network message */
1210 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1211 return 1;
1212 return 0;
1213}
1214
1215
1216int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1217 struct dlm_migratable_lockres *mres,
1218 u8 send_to, u8 flags)
1219{
1220 struct list_head *queue, *iter;
1221 int total_locks, i;
1222 u64 mig_cookie = 0;
1223 struct dlm_lock *lock;
1224 int ret = 0;
1225
1226 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1227
1228 mlog(0, "sending to %u\n", send_to);
1229
1230 total_locks = dlm_num_locks_in_lockres(res);
1231 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1232 /* rare, but possible */
1233 mlog(0, "argh. lockres has %d locks. this will "
1234 "require more than one network packet to "
1235 "migrate\n", total_locks);
1236 mig_cookie = dlm_get_next_mig_cookie();
1237 }
1238
1239 dlm_init_migratable_lockres(mres, res->lockname.name,
1240 res->lockname.len, total_locks,
1241 mig_cookie, flags, res->owner);
1242
1243 total_locks = 0;
1244 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1245 queue = dlm_list_idx_to_ptr(res, i);
1246 list_for_each(iter, queue) {
1247 lock = list_entry (iter, struct dlm_lock, list);
1248
1249 /* add another lock. */
1250 total_locks++;
1251 if (!dlm_add_lock_to_array(lock, mres, i))
1252 continue;
1253
1254 /* this filled the lock message,
1255 * we must send it immediately. */
1256 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1257 res, total_locks);
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001258 if (ret < 0)
1259 goto error;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001260 }
1261 }
1262 /* flush any remaining locks */
1263 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001264 if (ret < 0)
1265 goto error;
1266 return ret;
1267
1268error:
1269 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1270 dlm->name, ret);
1271 if (!dlm_is_host_down(ret))
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001272 BUG();
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001273 mlog(0, "%s: node %u went down while sending %s "
1274 "lockres %.*s\n", dlm->name, send_to,
1275 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1276 res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001277 return ret;
1278}
1279
1280
1281
1282/*
1283 * this message will contain no more than one page worth of
1284 * recovery data, and it will work on only one lockres.
1285 * there may be many locks in this page, and we may need to wait
1286 * for additional packets to complete all the locks (rare, but
1287 * possible).
1288 */
1289/*
1290 * NOTE: the allocation error cases here are scary
1291 * we really cannot afford to fail an alloc in recovery
1292 * do we spin? returning an error only delays the problem really
1293 */
1294
1295int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1296{
1297 struct dlm_ctxt *dlm = data;
1298 struct dlm_migratable_lockres *mres =
1299 (struct dlm_migratable_lockres *)msg->buf;
1300 int ret = 0;
1301 u8 real_master;
1302 char *buf = NULL;
1303 struct dlm_work_item *item = NULL;
1304 struct dlm_lock_resource *res = NULL;
1305
1306 if (!dlm_grab(dlm))
1307 return -EINVAL;
1308
1309 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1310
1311 real_master = mres->master;
1312 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1313 /* cannot migrate a lockres with no master */
1314 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1315 }
1316
1317 mlog(0, "%s message received from node %u\n",
1318 (mres->flags & DLM_MRES_RECOVERY) ?
1319 "recovery" : "migration", mres->master);
1320 if (mres->flags & DLM_MRES_ALL_DONE)
1321 mlog(0, "all done flag. all lockres data received!\n");
1322
1323 ret = -ENOMEM;
Kurt Hackelad8100e2006-05-01 14:25:21 -07001324 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1325 item = kcalloc(1, sizeof(*item), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001326 if (!buf || !item)
1327 goto leave;
1328
1329 /* lookup the lock to see if we have a secondary queue for this
1330 * already... just add the locks in and this will have its owner
1331 * and RECOVERY flag changed when it completes. */
1332 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1333 if (res) {
1334 /* this will get a ref on res */
1335 /* mark it as recovering/migrating and hash it */
1336 spin_lock(&res->spinlock);
1337 if (mres->flags & DLM_MRES_RECOVERY) {
1338 res->state |= DLM_LOCK_RES_RECOVERING;
1339 } else {
1340 if (res->state & DLM_LOCK_RES_MIGRATING) {
1341 /* this is at least the second
1342 * lockres message */
1343 mlog(0, "lock %.*s is already migrating\n",
1344 mres->lockname_len,
1345 mres->lockname);
1346 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1347 /* caller should BUG */
1348 mlog(ML_ERROR, "node is attempting to migrate "
1349 "lock %.*s, but marked as recovering!\n",
1350 mres->lockname_len, mres->lockname);
1351 ret = -EFAULT;
1352 spin_unlock(&res->spinlock);
1353 goto leave;
1354 }
1355 res->state |= DLM_LOCK_RES_MIGRATING;
1356 }
1357 spin_unlock(&res->spinlock);
1358 } else {
1359 /* need to allocate, just like if it was
1360 * mastered here normally */
1361 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1362 if (!res)
1363 goto leave;
1364
1365 /* to match the ref that we would have gotten if
1366 * dlm_lookup_lockres had succeeded */
1367 dlm_lockres_get(res);
1368
1369 /* mark it as recovering/migrating and hash it */
1370 if (mres->flags & DLM_MRES_RECOVERY)
1371 res->state |= DLM_LOCK_RES_RECOVERING;
1372 else
1373 res->state |= DLM_LOCK_RES_MIGRATING;
1374
1375 spin_lock(&dlm->spinlock);
1376 __dlm_insert_lockres(dlm, res);
1377 spin_unlock(&dlm->spinlock);
1378
1379 /* now that the new lockres is inserted,
1380 * make it usable by other processes */
1381 spin_lock(&res->spinlock);
1382 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1383 spin_unlock(&res->spinlock);
1384
1385 /* add an extra ref for just-allocated lockres
1386 * otherwise the lockres will be purged immediately */
1387 dlm_lockres_get(res);
1388
1389 }
1390
1391 /* at this point we have allocated everything we need,
1392 * and we have a hashed lockres with an extra ref and
1393 * the proper res->state flags. */
1394 ret = 0;
1395 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1396 /* migration cannot have an unknown master */
1397 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1398 mlog(0, "recovery has passed me a lockres with an "
1399 "unknown owner.. will need to requery: "
1400 "%.*s\n", mres->lockname_len, mres->lockname);
1401 } else {
1402 spin_lock(&res->spinlock);
1403 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1404 spin_unlock(&res->spinlock);
1405 }
1406
1407 /* queue up work for dlm_mig_lockres_worker */
1408 dlm_grab(dlm); /* get an extra ref for the work item */
1409 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1410 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1411 item->u.ml.lockres = res; /* already have a ref */
1412 item->u.ml.real_master = real_master;
1413 spin_lock(&dlm->work_lock);
1414 list_add_tail(&item->list, &dlm->work_list);
1415 spin_unlock(&dlm->work_lock);
Kurt Hackel3156d262006-05-01 14:39:29 -07001416 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001417
1418leave:
1419 dlm_put(dlm);
1420 if (ret < 0) {
1421 if (buf)
1422 kfree(buf);
1423 if (item)
1424 kfree(item);
1425 }
1426
1427 mlog_exit(ret);
1428 return ret;
1429}
1430
1431
1432static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1433{
1434 struct dlm_ctxt *dlm = data;
1435 struct dlm_migratable_lockres *mres;
1436 int ret = 0;
1437 struct dlm_lock_resource *res;
1438 u8 real_master;
1439
1440 dlm = item->dlm;
1441 mres = (struct dlm_migratable_lockres *)data;
1442
1443 res = item->u.ml.lockres;
1444 real_master = item->u.ml.real_master;
1445
1446 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1447 /* this case is super-rare. only occurs if
1448 * node death happens during migration. */
1449again:
1450 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1451 if (ret < 0) {
Kurt Hackele2faea42006-01-12 14:24:55 -08001452 mlog(0, "dlm_lockres_master_requery ret=%d\n",
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001453 ret);
1454 goto again;
1455 }
1456 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1457 mlog(0, "lockres %.*s not claimed. "
1458 "this node will take it.\n",
1459 res->lockname.len, res->lockname.name);
1460 } else {
1461 mlog(0, "master needs to respond to sender "
1462 "that node %u still owns %.*s\n",
1463 real_master, res->lockname.len,
1464 res->lockname.name);
1465 /* cannot touch this lockres */
1466 goto leave;
1467 }
1468 }
1469
1470 ret = dlm_process_recovery_data(dlm, res, mres);
1471 if (ret < 0)
1472 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1473 else
1474 mlog(0, "dlm_process_recovery_data succeeded\n");
1475
1476 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1477 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1478 ret = dlm_finish_migration(dlm, res, mres->master);
1479 if (ret < 0)
1480 mlog_errno(ret);
1481 }
1482
1483leave:
1484 kfree(data);
1485 mlog_exit(ret);
1486}
1487
1488
1489
Adrian Bunk8169cae2006-03-31 16:53:55 +02001490static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1491 struct dlm_lock_resource *res,
1492 u8 *real_master)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001493{
1494 struct dlm_node_iter iter;
1495 int nodenum;
1496 int ret = 0;
1497
1498 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1499
1500 /* we only reach here if one of the two nodes in a
1501 * migration died while the migration was in progress.
1502 * at this point we need to requery the master. we
1503 * know that the new_master got as far as creating
1504 * an mle on at least one node, but we do not know
1505 * if any nodes had actually cleared the mle and set
1506 * the master to the new_master. the old master
1507 * is supposed to set the owner to UNKNOWN in the
1508 * event of a new_master death, so the only possible
1509 * responses that we can get from nodes here are
1510 * that the master is new_master, or that the master
1511 * is UNKNOWN.
1512 * if all nodes come back with UNKNOWN then we know
1513 * the lock needs remastering here.
1514 * if any node comes back with a valid master, check
1515 * to see if that master is the one that we are
1516 * recovering. if so, then the new_master died and
1517 * we need to remaster this lock. if not, then the
1518 * new_master survived and that node will respond to
1519 * other nodes about the owner.
1520 * if there is an owner, this node needs to dump this
1521 * lockres and alert the sender that this lockres
1522 * was rejected. */
1523 spin_lock(&dlm->spinlock);
1524 dlm_node_iter_init(dlm->domain_map, &iter);
1525 spin_unlock(&dlm->spinlock);
1526
1527 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1528 /* do not send to self */
1529 if (nodenum == dlm->node_num)
1530 continue;
1531 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1532 if (ret < 0) {
1533 mlog_errno(ret);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001534 if (!dlm_is_host_down(ret))
1535 BUG();
1536 /* host is down, so answer for that node would be
1537 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001538 }
1539 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1540 mlog(0, "lock master is %u\n", *real_master);
1541 break;
1542 }
1543 }
1544 return ret;
1545}
1546
1547
Kurt Hackelc03872f2006-03-06 14:08:49 -08001548int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1549 u8 nodenum, u8 *real_master)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001550{
1551 int ret = -EINVAL;
1552 struct dlm_master_requery req;
1553 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1554
1555 memset(&req, 0, sizeof(req));
1556 req.node_idx = dlm->node_num;
1557 req.namelen = res->lockname.len;
1558 memcpy(req.name, res->lockname.name, res->lockname.len);
1559
1560 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1561 &req, sizeof(req), nodenum, &status);
1562 /* XXX: negative status not handled properly here. */
1563 if (ret < 0)
1564 mlog_errno(ret);
1565 else {
1566 BUG_ON(status < 0);
1567 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1568 *real_master = (u8) (status & 0xff);
1569 mlog(0, "node %u responded to master requery with %u\n",
1570 nodenum, *real_master);
1571 ret = 0;
1572 }
1573 return ret;
1574}
1575
1576
1577/* this function cannot error, so unless the sending
1578 * or receiving of the message failed, the owner can
1579 * be trusted */
1580int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1581{
1582 struct dlm_ctxt *dlm = data;
1583 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1584 struct dlm_lock_resource *res = NULL;
Mark Fasheha3d33292006-03-09 17:55:56 -08001585 unsigned int hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001586 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1587 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1588
1589 if (!dlm_grab(dlm)) {
1590 /* since the domain has gone away on this
1591 * node, the proper response is UNKNOWN */
1592 return master;
1593 }
1594
Mark Fasheha3d33292006-03-09 17:55:56 -08001595 hash = dlm_lockid_hash(req->name, req->namelen);
1596
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001597 spin_lock(&dlm->spinlock);
Mark Fasheha3d33292006-03-09 17:55:56 -08001598 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001599 if (res) {
1600 spin_lock(&res->spinlock);
1601 master = res->owner;
1602 if (master == dlm->node_num) {
1603 int ret = dlm_dispatch_assert_master(dlm, res,
1604 0, 0, flags);
1605 if (ret < 0) {
1606 mlog_errno(-ENOMEM);
1607 /* retry!? */
1608 BUG();
1609 }
1610 }
1611 spin_unlock(&res->spinlock);
1612 }
1613 spin_unlock(&dlm->spinlock);
1614
1615 dlm_put(dlm);
1616 return master;
1617}
1618
1619static inline struct list_head *
1620dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1621{
1622 struct list_head *ret;
1623 BUG_ON(list_num < 0);
1624 BUG_ON(list_num > 2);
1625 ret = &(res->granted);
1626 ret += list_num;
1627 return ret;
1628}
1629/* TODO: do ast flush business
1630 * TODO: do MIGRATING and RECOVERING spinning
1631 */
1632
1633/*
1634* NOTE about in-flight requests during migration:
1635*
1636* Before attempting the migrate, the master has marked the lockres as
1637* MIGRATING and then flushed all of its pending ASTS. So any in-flight
1638* requests either got queued before the MIGRATING flag got set, in which
1639* case the lock data will reflect the change and a return message is on
1640* the way, or the request failed to get in before MIGRATING got set. In
1641* this case, the caller will be told to spin and wait for the MIGRATING
1642* flag to be dropped, then recheck the master.
1643* This holds true for the convert, cancel and unlock cases, and since lvb
1644* updates are tied to these same messages, it applies to lvb updates as
1645* well. For the lock case, there is no way a lock can be on the master
1646* queue and not be on the secondary queue since the lock is always added
1647* locally first. This means that the new target node will never be sent
1648* a lock that he doesn't already have on the list.
1649* In total, this means that the local lock is correct and should not be
1650* updated to match the one sent by the master. Any messages sent back
1651* from the master before the MIGRATING flag will bring the lock properly
1652* up-to-date, and the change will be ordered properly for the waiter.
1653* We will *not* attempt to modify the lock underneath the waiter.
1654*/
1655
1656static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1657 struct dlm_lock_resource *res,
1658 struct dlm_migratable_lockres *mres)
1659{
1660 struct dlm_migratable_lock *ml;
1661 struct list_head *queue;
1662 struct dlm_lock *newlock = NULL;
1663 struct dlm_lockstatus *lksb = NULL;
1664 int ret = 0;
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001665 int i, bad;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001666 struct list_head *iter;
1667 struct dlm_lock *lock = NULL;
1668
1669 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1670 for (i=0; i<mres->num_locks; i++) {
1671 ml = &(mres->ml[i]);
1672 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1673 newlock = NULL;
1674 lksb = NULL;
1675
1676 queue = dlm_list_num_to_pointer(res, ml->list);
1677
1678 /* if the lock is for the local node it needs to
1679 * be moved to the proper location within the queue.
1680 * do not allocate a new lock structure. */
1681 if (ml->node == dlm->node_num) {
1682 /* MIGRATION ONLY! */
1683 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1684
1685 spin_lock(&res->spinlock);
1686 list_for_each(iter, queue) {
1687 lock = list_entry (iter, struct dlm_lock, list);
1688 if (lock->ml.cookie != ml->cookie)
1689 lock = NULL;
1690 else
1691 break;
1692 }
1693
1694 /* lock is always created locally first, and
1695 * destroyed locally last. it must be on the list */
1696 if (!lock) {
Kurt Hackel29004852006-03-02 16:43:36 -08001697 u64 c = ml->cookie;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001698 mlog(ML_ERROR, "could not find local lock "
Kurt Hackel29004852006-03-02 16:43:36 -08001699 "with cookie %u:%llu!\n",
1700 dlm_get_lock_cookie_node(c),
1701 dlm_get_lock_cookie_seq(c));
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001702 BUG();
1703 }
1704 BUG_ON(lock->ml.node != ml->node);
1705
1706 /* see NOTE above about why we do not update
1707 * to match the master here */
1708
1709 /* move the lock to its proper place */
1710 /* do not alter lock refcount. switching lists. */
Akinobu Mitaf1166292006-06-26 00:24:46 -07001711 list_move_tail(&lock->list, queue);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001712 spin_unlock(&res->spinlock);
1713
1714 mlog(0, "just reordered a local lock!\n");
1715 continue;
1716 }
1717
1718 /* lock is for another node. */
1719 newlock = dlm_new_lock(ml->type, ml->node,
1720 be64_to_cpu(ml->cookie), NULL);
1721 if (!newlock) {
1722 ret = -ENOMEM;
1723 goto leave;
1724 }
1725 lksb = newlock->lksb;
1726 dlm_lock_attach_lockres(newlock, res);
1727
1728 if (ml->convert_type != LKM_IVMODE) {
1729 BUG_ON(queue != &res->converting);
1730 newlock->ml.convert_type = ml->convert_type;
1731 }
1732 lksb->flags |= (ml->flags &
1733 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001734
1735 if (ml->type == LKM_NLMODE)
1736 goto skip_lvb;
1737
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001738 if (!dlm_lvb_is_empty(mres->lvb)) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001739 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1740 /* other node was trying to update
1741 * lvb when node died. recreate the
1742 * lksb with the updated lvb. */
1743 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001744 /* the lock resource lvb update must happen
1745 * NOW, before the spinlock is dropped.
1746 * we no longer wait for the AST to update
1747 * the lvb. */
1748 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001749 } else {
1750 /* otherwise, the node is sending its
1751 * most recent valid lvb info */
1752 BUG_ON(ml->type != LKM_EXMODE &&
1753 ml->type != LKM_PRMODE);
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001754 if (!dlm_lvb_is_empty(res->lvb) &&
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001755 (ml->type == LKM_EXMODE ||
1756 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1757 int i;
1758 mlog(ML_ERROR, "%s:%.*s: received bad "
1759 "lvb! type=%d\n", dlm->name,
1760 res->lockname.len,
1761 res->lockname.name, ml->type);
1762 printk("lockres lvb=[");
1763 for (i=0; i<DLM_LVB_LEN; i++)
1764 printk("%02x", res->lvb[i]);
1765 printk("]\nmigrated lvb=[");
1766 for (i=0; i<DLM_LVB_LEN; i++)
1767 printk("%02x", mres->lvb[i]);
1768 printk("]\n");
1769 dlm_print_one_lock_resource(res);
1770 BUG();
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001771 }
1772 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1773 }
1774 }
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001775skip_lvb:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001776
1777 /* NOTE:
1778 * wrt lock queue ordering and recovery:
1779 * 1. order of locks on granted queue is
1780 * meaningless.
1781 * 2. order of locks on converting queue is
1782 * LOST with the node death. sorry charlie.
1783 * 3. order of locks on the blocked queue is
1784 * also LOST.
1785 * order of locks does not affect integrity, it
1786 * just means that a lock request may get pushed
1787 * back in line as a result of the node death.
1788 * also note that for a given node the lock order
1789 * for its secondary queue locks is preserved
1790 * relative to each other, but clearly *not*
1791 * preserved relative to locks from other nodes.
1792 */
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001793 bad = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001794 spin_lock(&res->spinlock);
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001795 list_for_each_entry(lock, queue, list) {
1796 if (lock->ml.cookie == ml->cookie) {
1797 u64 c = lock->ml.cookie;
1798 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1799 "exists on this lockres!\n", dlm->name,
1800 res->lockname.len, res->lockname.name,
1801 dlm_get_lock_cookie_node(c),
1802 dlm_get_lock_cookie_seq(c));
1803
1804 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1805 "node=%u, cookie=%u:%llu, queue=%d\n",
1806 ml->type, ml->convert_type, ml->node,
1807 dlm_get_lock_cookie_node(ml->cookie),
1808 dlm_get_lock_cookie_seq(ml->cookie),
1809 ml->list);
1810
1811 __dlm_print_one_lock_resource(res);
1812 bad = 1;
1813 break;
1814 }
1815 }
1816 if (!bad) {
1817 dlm_lock_get(newlock);
1818 list_add_tail(&newlock->list, queue);
1819 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001820 spin_unlock(&res->spinlock);
1821 }
1822 mlog(0, "done running all the locks\n");
1823
1824leave:
1825 if (ret < 0) {
1826 mlog_errno(ret);
1827 if (newlock)
1828 dlm_lock_put(newlock);
1829 }
1830
1831 mlog_exit(ret);
1832 return ret;
1833}
1834
1835void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1836 struct dlm_lock_resource *res)
1837{
1838 int i;
1839 struct list_head *queue, *iter, *iter2;
1840 struct dlm_lock *lock;
1841
1842 res->state |= DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001843 if (!list_empty(&res->recovering)) {
1844 mlog(0,
1845 "Recovering res %s:%.*s, is already on recovery list!\n",
1846 dlm->name, res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001847 list_del_init(&res->recovering);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001848 }
1849 /* We need to hold a reference while on the recovery list */
1850 dlm_lockres_get(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001851 list_add_tail(&res->recovering, &dlm->reco.resources);
1852
1853 /* find any pending locks and put them back on proper list */
1854 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1855 queue = dlm_list_idx_to_ptr(res, i);
1856 list_for_each_safe(iter, iter2, queue) {
1857 lock = list_entry (iter, struct dlm_lock, list);
1858 dlm_lock_get(lock);
1859 if (lock->convert_pending) {
1860 /* move converting lock back to granted */
1861 BUG_ON(i != DLM_CONVERTING_LIST);
1862 mlog(0, "node died with convert pending "
1863 "on %.*s. move back to granted list.\n",
1864 res->lockname.len, res->lockname.name);
1865 dlm_revert_pending_convert(res, lock);
1866 lock->convert_pending = 0;
1867 } else if (lock->lock_pending) {
1868 /* remove pending lock requests completely */
1869 BUG_ON(i != DLM_BLOCKED_LIST);
1870 mlog(0, "node died with lock pending "
1871 "on %.*s. remove from blocked list and skip.\n",
1872 res->lockname.len, res->lockname.name);
1873 /* lock will be floating until ref in
1874 * dlmlock_remote is freed after the network
1875 * call returns. ok for it to not be on any
1876 * list since no ast can be called
1877 * (the master is dead). */
1878 dlm_revert_pending_lock(res, lock);
1879 lock->lock_pending = 0;
1880 } else if (lock->unlock_pending) {
1881 /* if an unlock was in progress, treat as
1882 * if this had completed successfully
1883 * before sending this lock state to the
1884 * new master. note that the dlm_unlock
1885 * call is still responsible for calling
1886 * the unlockast. that will happen after
1887 * the network call times out. for now,
1888 * just move lists to prepare the new
1889 * recovery master. */
1890 BUG_ON(i != DLM_GRANTED_LIST);
1891 mlog(0, "node died with unlock pending "
1892 "on %.*s. remove from blocked list and skip.\n",
1893 res->lockname.len, res->lockname.name);
1894 dlm_commit_pending_unlock(res, lock);
1895 lock->unlock_pending = 0;
1896 } else if (lock->cancel_pending) {
1897 /* if a cancel was in progress, treat as
1898 * if this had completed successfully
1899 * before sending this lock state to the
1900 * new master */
1901 BUG_ON(i != DLM_CONVERTING_LIST);
1902 mlog(0, "node died with cancel pending "
1903 "on %.*s. move back to granted list.\n",
1904 res->lockname.len, res->lockname.name);
1905 dlm_commit_pending_cancel(res, lock);
1906 lock->cancel_pending = 0;
1907 }
1908 dlm_lock_put(lock);
1909 }
1910 }
1911}
1912
1913
1914
1915/* removes all recovered locks from the recovery list.
1916 * sets the res->owner to the new master.
1917 * unsets the RECOVERY flag and wakes waiters. */
1918static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1919 u8 dead_node, u8 new_master)
1920{
1921 int i;
Mark Fasheh81f20942006-02-28 17:31:22 -08001922 struct list_head *iter, *iter2;
1923 struct hlist_node *hash_iter;
1924 struct hlist_head *bucket;
1925
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001926 struct dlm_lock_resource *res;
1927
1928 mlog_entry_void();
1929
1930 assert_spin_locked(&dlm->spinlock);
1931
1932 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1933 res = list_entry (iter, struct dlm_lock_resource, recovering);
1934 if (res->owner == dead_node) {
1935 list_del_init(&res->recovering);
1936 spin_lock(&res->spinlock);
1937 dlm_change_lockres_owner(dlm, res, new_master);
1938 res->state &= ~DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001939 if (!__dlm_lockres_unused(res))
1940 __dlm_dirty_lockres(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001941 spin_unlock(&res->spinlock);
1942 wake_up(&res->wq);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001943 dlm_lockres_put(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001944 }
1945 }
1946
1947 /* this will become unnecessary eventually, but
1948 * for now we need to run the whole hash, clear
1949 * the RECOVERING state and set the owner
1950 * if necessary */
Mark Fasheh81f20942006-02-28 17:31:22 -08001951 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
Daniel Phillips03d864c2006-03-10 18:08:16 -08001952 bucket = dlm_lockres_hash(dlm, i);
Mark Fasheh81f20942006-02-28 17:31:22 -08001953 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001954 if (res->state & DLM_LOCK_RES_RECOVERING) {
1955 if (res->owner == dead_node) {
1956 mlog(0, "(this=%u) res %.*s owner=%u "
1957 "was not on recovering list, but "
1958 "clearing state anyway\n",
1959 dlm->node_num, res->lockname.len,
1960 res->lockname.name, new_master);
1961 } else if (res->owner == dlm->node_num) {
1962 mlog(0, "(this=%u) res %.*s owner=%u "
1963 "was not on recovering list, "
1964 "owner is THIS node, clearing\n",
1965 dlm->node_num, res->lockname.len,
1966 res->lockname.name, new_master);
1967 } else
1968 continue;
1969
Kurt Hackelc03872f2006-03-06 14:08:49 -08001970 if (!list_empty(&res->recovering)) {
1971 mlog(0, "%s:%.*s: lockres was "
1972 "marked RECOVERING, owner=%u\n",
1973 dlm->name, res->lockname.len,
1974 res->lockname.name, res->owner);
1975 list_del_init(&res->recovering);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001976 dlm_lockres_put(res);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001977 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001978 spin_lock(&res->spinlock);
1979 dlm_change_lockres_owner(dlm, res, new_master);
1980 res->state &= ~DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001981 if (!__dlm_lockres_unused(res))
1982 __dlm_dirty_lockres(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001983 spin_unlock(&res->spinlock);
1984 wake_up(&res->wq);
1985 }
1986 }
1987 }
1988}
1989
1990static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1991{
1992 if (local) {
1993 if (lock->ml.type != LKM_EXMODE &&
1994 lock->ml.type != LKM_PRMODE)
1995 return 1;
1996 } else if (lock->ml.type == LKM_EXMODE)
1997 return 1;
1998 return 0;
1999}
2000
2001static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2002 struct dlm_lock_resource *res, u8 dead_node)
2003{
2004 struct list_head *iter, *queue;
2005 struct dlm_lock *lock;
2006 int blank_lvb = 0, local = 0;
2007 int i;
2008 u8 search_node;
2009
2010 assert_spin_locked(&dlm->spinlock);
2011 assert_spin_locked(&res->spinlock);
2012
2013 if (res->owner == dlm->node_num)
2014 /* if this node owned the lockres, and if the dead node
2015 * had an EX when he died, blank out the lvb */
2016 search_node = dead_node;
2017 else {
2018 /* if this is a secondary lockres, and we had no EX or PR
2019 * locks granted, we can no longer trust the lvb */
2020 search_node = dlm->node_num;
2021 local = 1; /* check local state for valid lvb */
2022 }
2023
2024 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2025 queue = dlm_list_idx_to_ptr(res, i);
2026 list_for_each(iter, queue) {
2027 lock = list_entry (iter, struct dlm_lock, list);
2028 if (lock->ml.node == search_node) {
2029 if (dlm_lvb_needs_invalidation(lock, local)) {
2030 /* zero the lksb lvb and lockres lvb */
2031 blank_lvb = 1;
2032 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2033 }
2034 }
2035 }
2036 }
2037
2038 if (blank_lvb) {
2039 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2040 res->lockname.len, res->lockname.name, dead_node);
2041 memset(res->lvb, 0, DLM_LVB_LEN);
2042 }
2043}
2044
2045static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2046 struct dlm_lock_resource *res, u8 dead_node)
2047{
2048 struct list_head *iter, *tmpiter;
2049 struct dlm_lock *lock;
2050
2051 /* this node is the lockres master:
2052 * 1) remove any stale locks for the dead node
2053 * 2) if the dead node had an EX when he died, blank out the lvb
2054 */
2055 assert_spin_locked(&dlm->spinlock);
2056 assert_spin_locked(&res->spinlock);
2057
2058 /* TODO: check pending_asts, pending_basts here */
2059 list_for_each_safe(iter, tmpiter, &res->granted) {
2060 lock = list_entry (iter, struct dlm_lock, list);
2061 if (lock->ml.node == dead_node) {
2062 list_del_init(&lock->list);
2063 dlm_lock_put(lock);
2064 }
2065 }
2066 list_for_each_safe(iter, tmpiter, &res->converting) {
2067 lock = list_entry (iter, struct dlm_lock, list);
2068 if (lock->ml.node == dead_node) {
2069 list_del_init(&lock->list);
2070 dlm_lock_put(lock);
2071 }
2072 }
2073 list_for_each_safe(iter, tmpiter, &res->blocked) {
2074 lock = list_entry (iter, struct dlm_lock, list);
2075 if (lock->ml.node == dead_node) {
2076 list_del_init(&lock->list);
2077 dlm_lock_put(lock);
2078 }
2079 }
2080
2081 /* do not kick thread yet */
2082 __dlm_dirty_lockres(dlm, res);
2083}
2084
2085/* if this node is the recovery master, and there are no
2086 * locks for a given lockres owned by this node that are in
2087 * either PR or EX mode, zero out the lvb before requesting.
2088 *
2089 */
2090
2091
2092static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2093{
Mark Fasheh81f20942006-02-28 17:31:22 -08002094 struct hlist_node *iter;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002095 struct dlm_lock_resource *res;
2096 int i;
Mark Fasheh81f20942006-02-28 17:31:22 -08002097 struct hlist_head *bucket;
Kurt Hackele2faea42006-01-12 14:24:55 -08002098 struct dlm_lock *lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002099
2100
2101 /* purge any stale mles */
2102 dlm_clean_master_list(dlm, dead_node);
2103
2104 /*
2105 * now clean up all lock resources. there are two rules:
2106 *
2107 * 1) if the dead node was the master, move the lockres
2108 * to the recovering list. set the RECOVERING flag.
2109 * this lockres needs to be cleaned up before it can
2110 * be used further.
2111 *
2112 * 2) if this node was the master, remove all locks from
2113 * each of the lockres queues that were owned by the
2114 * dead node. once recovery finishes, the dlm thread
2115 * can be kicked again to see if any ASTs or BASTs
2116 * need to be fired as a result.
2117 */
Mark Fasheh81f20942006-02-28 17:31:22 -08002118 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
Daniel Phillips03d864c2006-03-10 18:08:16 -08002119 bucket = dlm_lockres_hash(dlm, i);
Mark Fasheh81f20942006-02-28 17:31:22 -08002120 hlist_for_each_entry(res, iter, bucket, hash_node) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002121 /* always prune any $RECOVERY entries for dead nodes,
2122 * otherwise hangs can occur during later recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002123 if (dlm_is_recovery_lock(res->lockname.name,
Kurt Hackele2faea42006-01-12 14:24:55 -08002124 res->lockname.len)) {
2125 spin_lock(&res->spinlock);
2126 list_for_each_entry(lock, &res->granted, list) {
2127 if (lock->ml.node == dead_node) {
2128 mlog(0, "AHA! there was "
2129 "a $RECOVERY lock for dead "
2130 "node %u (%s)!\n",
2131 dead_node, dlm->name);
2132 list_del_init(&lock->list);
2133 dlm_lock_put(lock);
2134 break;
2135 }
2136 }
2137 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002138 continue;
Kurt Hackele2faea42006-01-12 14:24:55 -08002139 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002140 spin_lock(&res->spinlock);
2141 /* zero the lvb if necessary */
2142 dlm_revalidate_lvb(dlm, res, dead_node);
2143 if (res->owner == dead_node)
2144 dlm_move_lockres_to_recovery_list(dlm, res);
2145 else if (res->owner == dlm->node_num) {
2146 dlm_free_dead_locks(dlm, res, dead_node);
2147 __dlm_lockres_calc_usage(dlm, res);
2148 }
2149 spin_unlock(&res->spinlock);
2150 }
2151 }
2152
2153}
2154
2155static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2156{
2157 assert_spin_locked(&dlm->spinlock);
2158
Kurt Hackel466d1a42006-05-01 11:11:13 -07002159 if (dlm->reco.new_master == idx) {
2160 mlog(0, "%s: recovery master %d just died\n",
2161 dlm->name, idx);
2162 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2163 /* finalize1 was reached, so it is safe to clear
2164 * the new_master and dead_node. that recovery
2165 * is complete. */
2166 mlog(0, "%s: dead master %d had reached "
2167 "finalize1 state, clearing\n", dlm->name, idx);
2168 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2169 __dlm_reset_recovery(dlm);
2170 }
2171 }
2172
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002173 /* check to see if the node is already considered dead */
2174 if (!test_bit(idx, dlm->live_nodes_map)) {
2175 mlog(0, "for domain %s, node %d is already dead. "
2176 "another node likely did recovery already.\n",
2177 dlm->name, idx);
2178 return;
2179 }
2180
2181 /* check to see if we do not care about this node */
2182 if (!test_bit(idx, dlm->domain_map)) {
2183 /* This also catches the case that we get a node down
2184 * but haven't joined the domain yet. */
2185 mlog(0, "node %u already removed from domain!\n", idx);
2186 return;
2187 }
2188
2189 clear_bit(idx, dlm->live_nodes_map);
2190
2191 /* Clean up join state on node death. */
2192 if (dlm->joining_node == idx) {
2193 mlog(0, "Clearing join state for node %u\n", idx);
2194 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2195 }
2196
2197 /* make sure local cleanup occurs before the heartbeat events */
2198 if (!test_bit(idx, dlm->recovery_map))
2199 dlm_do_local_recovery_cleanup(dlm, idx);
2200
2201 /* notify anything attached to the heartbeat events */
2202 dlm_hb_event_notify_attached(dlm, idx, 0);
2203
2204 mlog(0, "node %u being removed from domain map!\n", idx);
2205 clear_bit(idx, dlm->domain_map);
2206 /* wake up migration waiters if a node goes down.
2207 * perhaps later we can genericize this for other waiters. */
2208 wake_up(&dlm->migration_wq);
2209
2210 if (test_bit(idx, dlm->recovery_map))
2211 mlog(0, "domain %s, node %u already added "
2212 "to recovery map!\n", dlm->name, idx);
2213 else
2214 set_bit(idx, dlm->recovery_map);
2215}
2216
2217void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2218{
2219 struct dlm_ctxt *dlm = data;
2220
2221 if (!dlm_grab(dlm))
2222 return;
2223
2224 spin_lock(&dlm->spinlock);
2225 __dlm_hb_node_down(dlm, idx);
2226 spin_unlock(&dlm->spinlock);
2227
2228 dlm_put(dlm);
2229}
2230
2231void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2232{
2233 struct dlm_ctxt *dlm = data;
2234
2235 if (!dlm_grab(dlm))
2236 return;
2237
2238 spin_lock(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002239 set_bit(idx, dlm->live_nodes_map);
Kurt Hackele2faea42006-01-12 14:24:55 -08002240 /* do NOT notify mle attached to the heartbeat events.
2241 * new nodes are not interesting in mastery until joined. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002242 spin_unlock(&dlm->spinlock);
2243
2244 dlm_put(dlm);
2245}
2246
2247static void dlm_reco_ast(void *astdata)
2248{
2249 struct dlm_ctxt *dlm = astdata;
2250 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2251 dlm->node_num, dlm->name);
2252}
2253static void dlm_reco_bast(void *astdata, int blocked_type)
2254{
2255 struct dlm_ctxt *dlm = astdata;
2256 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2257 dlm->node_num, dlm->name);
2258}
2259static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2260{
2261 mlog(0, "unlockast for recovery lock fired!\n");
2262}
2263
Kurt Hackele2faea42006-01-12 14:24:55 -08002264/*
2265 * dlm_pick_recovery_master will continually attempt to use
2266 * dlmlock() on the special "$RECOVERY" lockres with the
2267 * LKM_NOQUEUE flag to get an EX. every thread that enters
2268 * this function on each node racing to become the recovery
2269 * master will not stop attempting this until either:
2270 * a) this node gets the EX (and becomes the recovery master),
2271 * or b) dlm->reco.new_master gets set to some nodenum
2272 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2273 * so each time a recovery master is needed, the entire cluster
2274 * will sync at this point. if the new master dies, that will
2275 * be detected in dlm_do_recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002276static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2277{
2278 enum dlm_status ret;
2279 struct dlm_lockstatus lksb;
2280 int status = -EINVAL;
2281
2282 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2283 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002284again:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002285 memset(&lksb, 0, sizeof(lksb));
2286
2287 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
Mark Fasheh3384f3d2006-09-08 11:38:29 -07002288 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2289 dlm_reco_ast, dlm, dlm_reco_bast);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002290
Kurt Hackele2faea42006-01-12 14:24:55 -08002291 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2292 dlm->name, ret, lksb.status);
2293
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002294 if (ret == DLM_NORMAL) {
2295 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2296 dlm->name, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002297
2298 /* got the EX lock. check to see if another node
2299 * just became the reco master */
2300 if (dlm_reco_master_ready(dlm)) {
2301 mlog(0, "%s: got reco EX lock, but %u will "
2302 "do the recovery\n", dlm->name,
2303 dlm->reco.new_master);
2304 status = -EEXIST;
2305 } else {
Kurt Hackel898effa2006-01-18 17:01:25 -08002306 status = 0;
2307
2308 /* see if recovery was already finished elsewhere */
2309 spin_lock(&dlm->spinlock);
2310 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2311 status = -EINVAL;
2312 mlog(0, "%s: got reco EX lock, but "
2313 "node got recovered already\n", dlm->name);
2314 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2315 mlog(ML_ERROR, "%s: new master is %u "
2316 "but no dead node!\n",
2317 dlm->name, dlm->reco.new_master);
2318 BUG();
2319 }
2320 }
2321 spin_unlock(&dlm->spinlock);
2322 }
2323
2324 /* if this node has actually become the recovery master,
2325 * set the master and send the messages to begin recovery */
2326 if (!status) {
2327 mlog(0, "%s: dead=%u, this=%u, sending "
2328 "begin_reco now\n", dlm->name,
2329 dlm->reco.dead_node, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002330 status = dlm_send_begin_reco_message(dlm,
2331 dlm->reco.dead_node);
2332 /* this always succeeds */
2333 BUG_ON(status);
2334
2335 /* set the new_master to this node */
2336 spin_lock(&dlm->spinlock);
Kurt Hackelab27eb62006-04-27 18:03:49 -07002337 dlm_set_reco_master(dlm, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002338 spin_unlock(&dlm->spinlock);
2339 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002340
2341 /* recovery lock is a special case. ast will not get fired,
2342 * so just go ahead and unlock it. */
2343 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
Kurt Hackele2faea42006-01-12 14:24:55 -08002344 if (ret == DLM_DENIED) {
2345 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2346 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2347 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002348 if (ret != DLM_NORMAL) {
2349 /* this would really suck. this could only happen
2350 * if there was a network error during the unlock
2351 * because of node death. this means the unlock
2352 * is actually "done" and the lock structure is
2353 * even freed. we can continue, but only
2354 * because this specific lock name is special. */
Kurt Hackele2faea42006-01-12 14:24:55 -08002355 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002356 }
2357 } else if (ret == DLM_NOTQUEUED) {
2358 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2359 dlm->name, dlm->node_num);
2360 /* another node is master. wait on
Kurt Hackele2faea42006-01-12 14:24:55 -08002361 * reco.new_master != O2NM_INVALID_NODE_NUM
2362 * for at most one second */
2363 wait_event_timeout(dlm->dlm_reco_thread_wq,
2364 dlm_reco_master_ready(dlm),
2365 msecs_to_jiffies(1000));
2366 if (!dlm_reco_master_ready(dlm)) {
2367 mlog(0, "%s: reco master taking awhile\n",
2368 dlm->name);
2369 goto again;
2370 }
2371 /* another node has informed this one that it is reco master */
2372 mlog(0, "%s: reco master %u is ready to recover %u\n",
2373 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002374 status = -EEXIST;
Kurt Hackelc8df4122006-05-01 13:47:50 -07002375 } else if (ret == DLM_RECOVERING) {
2376 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2377 dlm->name, dlm->node_num);
2378 goto again;
Kurt Hackele2faea42006-01-12 14:24:55 -08002379 } else {
2380 struct dlm_lock_resource *res;
2381
2382 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2383 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2384 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2385 dlm_errname(lksb.status));
2386 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2387 DLM_RECOVERY_LOCK_NAME_LEN);
2388 if (res) {
2389 dlm_print_one_lock_resource(res);
2390 dlm_lockres_put(res);
2391 } else {
2392 mlog(ML_ERROR, "recovery lock not found\n");
2393 }
2394 BUG();
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002395 }
2396
2397 return status;
2398}
2399
2400static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2401{
2402 struct dlm_begin_reco br;
2403 int ret = 0;
2404 struct dlm_node_iter iter;
2405 int nodenum;
2406 int status;
2407
2408 mlog_entry("%u\n", dead_node);
2409
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002410 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002411
2412 spin_lock(&dlm->spinlock);
2413 dlm_node_iter_init(dlm->domain_map, &iter);
2414 spin_unlock(&dlm->spinlock);
2415
2416 clear_bit(dead_node, iter.node_map);
2417
2418 memset(&br, 0, sizeof(br));
2419 br.node_idx = dlm->node_num;
2420 br.dead_node = dead_node;
2421
2422 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2423 ret = 0;
2424 if (nodenum == dead_node) {
2425 mlog(0, "not sending begin reco to dead node "
2426 "%u\n", dead_node);
2427 continue;
2428 }
2429 if (nodenum == dlm->node_num) {
2430 mlog(0, "not sending begin reco to self\n");
2431 continue;
2432 }
Kurt Hackele2faea42006-01-12 14:24:55 -08002433retry:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002434 ret = -EINVAL;
2435 mlog(0, "attempting to send begin reco msg to %d\n",
2436 nodenum);
2437 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2438 &br, sizeof(br), nodenum, &status);
2439 /* negative status is handled ok by caller here */
2440 if (ret >= 0)
2441 ret = status;
Kurt Hackele2faea42006-01-12 14:24:55 -08002442 if (dlm_is_host_down(ret)) {
2443 /* node is down. not involved in recovery
2444 * so just keep going */
2445 mlog(0, "%s: node %u was down when sending "
2446 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2447 ret = 0;
2448 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002449 if (ret < 0) {
2450 struct dlm_lock_resource *res;
Kurt Hackele2faea42006-01-12 14:24:55 -08002451 /* this is now a serious problem, possibly ENOMEM
2452 * in the network stack. must retry */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002453 mlog_errno(ret);
2454 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2455 " returned %d\n", dlm->name, nodenum, ret);
2456 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2457 DLM_RECOVERY_LOCK_NAME_LEN);
2458 if (res) {
2459 dlm_print_one_lock_resource(res);
2460 dlm_lockres_put(res);
2461 } else {
2462 mlog(ML_ERROR, "recovery lock not found\n");
2463 }
Kurt Hackele2faea42006-01-12 14:24:55 -08002464 /* sleep for a bit in hopes that we can avoid
2465 * another ENOMEM */
2466 msleep(100);
2467 goto retry;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002468 } else if (ret == EAGAIN) {
2469 mlog(0, "%s: trying to start recovery of node "
2470 "%u, but node %u is waiting for last recovery "
2471 "to complete, backoff for a bit\n", dlm->name,
2472 dead_node, nodenum);
2473 /* TODO Look into replacing msleep with cond_resched() */
2474 msleep(100);
2475 goto retry;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002476 }
2477 }
2478
2479 return ret;
2480}
2481
2482int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2483{
2484 struct dlm_ctxt *dlm = data;
2485 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2486
2487 /* ok to return 0, domain has gone away */
2488 if (!dlm_grab(dlm))
2489 return 0;
2490
Kurt Hackel466d1a42006-05-01 11:11:13 -07002491 spin_lock(&dlm->spinlock);
2492 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2493 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2494 "but this node is in finalize state, waiting on finalize2\n",
2495 dlm->name, br->node_idx, br->dead_node,
2496 dlm->reco.dead_node, dlm->reco.new_master);
2497 spin_unlock(&dlm->spinlock);
2498 return EAGAIN;
2499 }
2500 spin_unlock(&dlm->spinlock);
2501
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002502 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2503 dlm->name, br->node_idx, br->dead_node,
2504 dlm->reco.dead_node, dlm->reco.new_master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002505
2506 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2507
2508 spin_lock(&dlm->spinlock);
2509 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002510 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2511 mlog(0, "%s: new_master %u died, changing "
2512 "to %u\n", dlm->name, dlm->reco.new_master,
2513 br->node_idx);
2514 } else {
2515 mlog(0, "%s: new_master %u NOT DEAD, changing "
2516 "to %u\n", dlm->name, dlm->reco.new_master,
2517 br->node_idx);
2518 /* may not have seen the new master as dead yet */
2519 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002520 }
2521 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002522 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2523 "node %u changing it to %u\n", dlm->name,
2524 dlm->reco.dead_node, br->node_idx, br->dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002525 }
Kurt Hackelab27eb62006-04-27 18:03:49 -07002526 dlm_set_reco_master(dlm, br->node_idx);
2527 dlm_set_reco_dead_node(dlm, br->dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002528 if (!test_bit(br->dead_node, dlm->recovery_map)) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002529 mlog(0, "recovery master %u sees %u as dead, but this "
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002530 "node has not yet. marking %u as dead\n",
2531 br->node_idx, br->dead_node, br->dead_node);
Kurt Hackele2faea42006-01-12 14:24:55 -08002532 if (!test_bit(br->dead_node, dlm->domain_map) ||
2533 !test_bit(br->dead_node, dlm->live_nodes_map))
2534 mlog(0, "%u not in domain/live_nodes map "
2535 "so setting it in reco map manually\n",
2536 br->dead_node);
Kurt Hackelc03872f2006-03-06 14:08:49 -08002537 /* force the recovery cleanup in __dlm_hb_node_down
2538 * both of these will be cleared in a moment */
2539 set_bit(br->dead_node, dlm->domain_map);
2540 set_bit(br->dead_node, dlm->live_nodes_map);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002541 __dlm_hb_node_down(dlm, br->dead_node);
2542 }
2543 spin_unlock(&dlm->spinlock);
2544
2545 dlm_kick_recovery_thread(dlm);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002546
2547 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2548 dlm->name, br->node_idx, br->dead_node,
2549 dlm->reco.dead_node, dlm->reco.new_master);
2550
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002551 dlm_put(dlm);
2552 return 0;
2553}
2554
Kurt Hackel466d1a42006-05-01 11:11:13 -07002555#define DLM_FINALIZE_STAGE2 0x01
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002556static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2557{
2558 int ret = 0;
2559 struct dlm_finalize_reco fr;
2560 struct dlm_node_iter iter;
2561 int nodenum;
2562 int status;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002563 int stage = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002564
Kurt Hackel466d1a42006-05-01 11:11:13 -07002565 mlog(0, "finishing recovery for node %s:%u, "
2566 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002567
2568 spin_lock(&dlm->spinlock);
2569 dlm_node_iter_init(dlm->domain_map, &iter);
2570 spin_unlock(&dlm->spinlock);
2571
Kurt Hackel466d1a42006-05-01 11:11:13 -07002572stage2:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002573 memset(&fr, 0, sizeof(fr));
2574 fr.node_idx = dlm->node_num;
2575 fr.dead_node = dlm->reco.dead_node;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002576 if (stage == 2)
2577 fr.flags |= DLM_FINALIZE_STAGE2;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002578
2579 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2580 if (nodenum == dlm->node_num)
2581 continue;
2582 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2583 &fr, sizeof(fr), nodenum, &status);
Kurt Hackel466d1a42006-05-01 11:11:13 -07002584 if (ret >= 0)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002585 ret = status;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002586 if (ret < 0) {
2587 mlog_errno(ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002588 if (dlm_is_host_down(ret)) {
2589 /* this has no effect on this recovery
2590 * session, so set the status to zero to
2591 * finish out the last recovery */
2592 mlog(ML_ERROR, "node %u went down after this "
2593 "node finished recovery.\n", nodenum);
2594 ret = 0;
Kurt Hackelc27069e2006-05-01 13:51:49 -07002595 continue;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002596 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002597 break;
2598 }
2599 }
Kurt Hackel466d1a42006-05-01 11:11:13 -07002600 if (stage == 1) {
2601 /* reset the node_iter back to the top and send finalize2 */
2602 iter.curnode = -1;
2603 stage = 2;
2604 goto stage2;
2605 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002606
2607 return ret;
2608}
2609
2610int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2611{
2612 struct dlm_ctxt *dlm = data;
2613 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002614 int stage = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002615
2616 /* ok to return 0, domain has gone away */
2617 if (!dlm_grab(dlm))
2618 return 0;
2619
Kurt Hackel466d1a42006-05-01 11:11:13 -07002620 if (fr->flags & DLM_FINALIZE_STAGE2)
2621 stage = 2;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002622
Kurt Hackel466d1a42006-05-01 11:11:13 -07002623 mlog(0, "%s: node %u finalizing recovery stage%d of "
2624 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2625 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2626
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002627 spin_lock(&dlm->spinlock);
2628
2629 if (dlm->reco.new_master != fr->node_idx) {
2630 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2631 "%u is supposed to be the new master, dead=%u\n",
2632 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2633 BUG();
2634 }
2635 if (dlm->reco.dead_node != fr->dead_node) {
2636 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2637 "node %u, but node %u is supposed to be dead\n",
2638 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2639 BUG();
2640 }
2641
Kurt Hackel466d1a42006-05-01 11:11:13 -07002642 switch (stage) {
2643 case 1:
2644 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2645 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2646 mlog(ML_ERROR, "%s: received finalize1 from "
2647 "new master %u for dead node %u, but "
2648 "this node has already received it!\n",
2649 dlm->name, fr->node_idx, fr->dead_node);
2650 dlm_print_reco_node_status(dlm);
2651 BUG();
2652 }
2653 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2654 spin_unlock(&dlm->spinlock);
2655 break;
2656 case 2:
2657 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2658 mlog(ML_ERROR, "%s: received finalize2 from "
2659 "new master %u for dead node %u, but "
2660 "this node did not have finalize1!\n",
2661 dlm->name, fr->node_idx, fr->dead_node);
2662 dlm_print_reco_node_status(dlm);
2663 BUG();
2664 }
2665 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2666 spin_unlock(&dlm->spinlock);
2667 dlm_reset_recovery(dlm);
2668 dlm_kick_recovery_thread(dlm);
2669 break;
2670 default:
2671 BUG();
2672 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002673
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002674 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2675 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2676
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002677 dlm_put(dlm);
2678 return 0;
2679}