blob: f0549e9f002489af59f43112604d98fe1f75458a [file] [log] [blame]
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/timer.h>
41#include <linux/kthread.h>
Adrian Bunkb4c7f532006-01-14 20:55:10 +010042#include <linux/delay.h>
Kurt Hackel6714d8e2005-12-15 14:31:23 -080043
44
45#include "cluster/heartbeat.h"
46#include "cluster/nodemanager.h"
47#include "cluster/tcp.h"
48
49#include "dlmapi.h"
50#include "dlmcommon.h"
51#include "dlmdomain.h"
52
53#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54#include "cluster/masklog.h"
55
56static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57
58static int dlm_recovery_thread(void *data);
59void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackelc03872f2006-03-06 14:08:49 -080061void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080062static int dlm_do_recovery(struct dlm_ctxt *dlm);
63
64static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70
71static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
75 u8 flags, u8 master);
76static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
78 u8 send_to,
79 struct dlm_lock_resource *res,
80 int total_locks);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080081static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 struct dlm_migratable_lockres *mres);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080084static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 u8 dead_node, u8 send_to);
87static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 struct list_head *list, u8 dead_node);
90static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 u8 dead_node, u8 new_master);
92static void dlm_reco_ast(void *astdata);
93static void dlm_reco_bast(void *astdata, int blocked_type);
94static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96 void *data);
97static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98
99static u64 dlm_get_next_mig_cookie(void);
100
101static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
102static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
103static u64 dlm_mig_cookie = 1;
104
105static u64 dlm_get_next_mig_cookie(void)
106{
107 u64 c;
108 spin_lock(&dlm_mig_cookie_lock);
109 c = dlm_mig_cookie;
110 if (dlm_mig_cookie == (~0ULL))
111 dlm_mig_cookie = 1;
112 else
113 dlm_mig_cookie++;
114 spin_unlock(&dlm_mig_cookie_lock);
115 return c;
116}
117
Kurt Hackelab27eb62006-04-27 18:03:49 -0700118static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
119 u8 dead_node)
120{
121 assert_spin_locked(&dlm->spinlock);
122 if (dlm->reco.dead_node != dead_node)
123 mlog(0, "%s: changing dead_node from %u to %u\n",
124 dlm->name, dlm->reco.dead_node, dead_node);
125 dlm->reco.dead_node = dead_node;
126}
127
128static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
129 u8 master)
130{
131 assert_spin_locked(&dlm->spinlock);
132 mlog(0, "%s: changing new_master from %u to %u\n",
133 dlm->name, dlm->reco.new_master, master);
134 dlm->reco.new_master = master;
135}
136
Kurt Hackel466d1a42006-05-01 11:11:13 -0700137static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800138{
Kurt Hackel466d1a42006-05-01 11:11:13 -0700139 assert_spin_locked(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800140 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
Kurt Hackelab27eb62006-04-27 18:03:49 -0700141 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
142 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel466d1a42006-05-01 11:11:13 -0700143}
144
145static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
146{
147 spin_lock(&dlm->spinlock);
148 __dlm_reset_recovery(dlm);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800149 spin_unlock(&dlm->spinlock);
150}
151
152/* Worker function used during recovery. */
153void dlm_dispatch_work(void *data)
154{
155 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
156 LIST_HEAD(tmp_list);
157 struct list_head *iter, *iter2;
158 struct dlm_work_item *item;
159 dlm_workfunc_t *workfunc;
160
161 spin_lock(&dlm->work_lock);
162 list_splice_init(&dlm->work_list, &tmp_list);
163 spin_unlock(&dlm->work_lock);
164
165 list_for_each_safe(iter, iter2, &tmp_list) {
166 item = list_entry(iter, struct dlm_work_item, list);
167 workfunc = item->func;
168 list_del_init(&item->list);
169
170 /* already have ref on dlm to avoid having
171 * it disappear. just double-check. */
172 BUG_ON(item->dlm != dlm);
173
174 /* this is allowed to sleep and
175 * call network stuff */
176 workfunc(item, item->data);
177
178 dlm_put(dlm);
179 kfree(item);
180 }
181}
182
183/*
184 * RECOVERY THREAD
185 */
186
Kurt Hackelc03872f2006-03-06 14:08:49 -0800187void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800188{
189 /* wake the recovery thread
190 * this will wake the reco thread in one of three places
191 * 1) sleeping with no recovery happening
192 * 2) sleeping with recovery mastered elsewhere
193 * 3) recovery mastered here, waiting on reco data */
194
195 wake_up(&dlm->dlm_reco_thread_wq);
196}
197
198/* Launch the recovery thread */
199int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
200{
201 mlog(0, "starting dlm recovery thread...\n");
202
203 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
204 "dlm_reco_thread");
205 if (IS_ERR(dlm->dlm_reco_thread_task)) {
206 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
207 dlm->dlm_reco_thread_task = NULL;
208 return -EINVAL;
209 }
210
211 return 0;
212}
213
214void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
215{
216 if (dlm->dlm_reco_thread_task) {
217 mlog(0, "waiting for dlm recovery thread to exit\n");
218 kthread_stop(dlm->dlm_reco_thread_task);
219 dlm->dlm_reco_thread_task = NULL;
220 }
221}
222
223
224
225/*
226 * this is lame, but here's how recovery works...
227 * 1) all recovery threads cluster wide will work on recovering
228 * ONE node at a time
229 * 2) negotiate who will take over all the locks for the dead node.
230 * thats right... ALL the locks.
231 * 3) once a new master is chosen, everyone scans all locks
232 * and moves aside those mastered by the dead guy
233 * 4) each of these locks should be locked until recovery is done
234 * 5) the new master collects up all of secondary lock queue info
235 * one lock at a time, forcing each node to communicate back
236 * before continuing
237 * 6) each secondary lock queue responds with the full known lock info
238 * 7) once the new master has run all its locks, it sends a ALLDONE!
239 * message to everyone
240 * 8) upon receiving this message, the secondary queue node unlocks
241 * and responds to the ALLDONE
242 * 9) once the new master gets responses from everyone, he unlocks
243 * everything and recovery for this dead node is done
244 *10) go back to 2) while there are still dead nodes
245 *
246 */
247
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700248static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
249{
250 struct dlm_reco_node_data *ndata;
251 struct dlm_lock_resource *res;
252
253 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
254 dlm->name, dlm->dlm_reco_thread_task->pid,
255 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
256 dlm->reco.dead_node, dlm->reco.new_master);
257
258 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
259 char *st = "unknown";
260 switch (ndata->state) {
261 case DLM_RECO_NODE_DATA_INIT:
262 st = "init";
263 break;
264 case DLM_RECO_NODE_DATA_REQUESTING:
265 st = "requesting";
266 break;
267 case DLM_RECO_NODE_DATA_DEAD:
268 st = "dead";
269 break;
270 case DLM_RECO_NODE_DATA_RECEIVING:
271 st = "receiving";
272 break;
273 case DLM_RECO_NODE_DATA_REQUESTED:
274 st = "requested";
275 break;
276 case DLM_RECO_NODE_DATA_DONE:
277 st = "done";
278 break;
279 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
280 st = "finalize-sent";
281 break;
282 default:
283 st = "bad";
284 break;
285 }
286 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
287 dlm->name, ndata->node_num, st);
288 }
289 list_for_each_entry(res, &dlm->reco.resources, recovering) {
290 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
291 dlm->name, res->lockname.len, res->lockname.name);
292 }
293}
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800294
295#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
296
297static int dlm_recovery_thread(void *data)
298{
299 int status;
300 struct dlm_ctxt *dlm = data;
301 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
302
303 mlog(0, "dlm thread running for %s...\n", dlm->name);
304
305 while (!kthread_should_stop()) {
306 if (dlm_joined(dlm)) {
307 status = dlm_do_recovery(dlm);
308 if (status == -EAGAIN) {
309 /* do not sleep, recheck immediately. */
310 continue;
311 }
312 if (status < 0)
313 mlog_errno(status);
314 }
315
316 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
317 kthread_should_stop(),
318 timeout);
319 }
320
321 mlog(0, "quitting DLM recovery thread\n");
322 return 0;
323}
324
Kurt Hackele2faea42006-01-12 14:24:55 -0800325/* returns true when the recovery master has contacted us */
326static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
327{
328 int ready;
329 spin_lock(&dlm->spinlock);
330 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
331 spin_unlock(&dlm->spinlock);
332 return ready;
333}
334
335/* returns true if node is no longer in the domain
336 * could be dead or just not joined */
337int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
338{
339 int dead;
340 spin_lock(&dlm->spinlock);
Kurt Hackelaba9aac2006-04-27 18:00:21 -0700341 dead = !test_bit(node, dlm->domain_map);
Kurt Hackele2faea42006-01-12 14:24:55 -0800342 spin_unlock(&dlm->spinlock);
343 return dead;
344}
345
Kurt Hackelb7084ab2006-05-01 13:54:07 -0700346/* returns true if node is no longer in the domain
347 * could be dead or just not joined */
348int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
349{
350 int recovered;
351 spin_lock(&dlm->spinlock);
352 recovered = !test_bit(node, dlm->recovery_map);
353 spin_unlock(&dlm->spinlock);
354 return recovered;
355}
356
357
Kurt Hackel44465a72006-01-18 17:05:38 -0800358int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
359{
360 if (timeout) {
361 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
362 "death of node %u\n", dlm->name, timeout, node);
363 wait_event_timeout(dlm->dlm_reco_thread_wq,
364 dlm_is_node_dead(dlm, node),
365 msecs_to_jiffies(timeout));
366 } else {
367 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
368 "of death of node %u\n", dlm->name, node);
369 wait_event(dlm->dlm_reco_thread_wq,
370 dlm_is_node_dead(dlm, node));
371 }
372 /* for now, return 0 */
373 return 0;
374}
375
Kurt Hackelb7084ab2006-05-01 13:54:07 -0700376int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
377{
378 if (timeout) {
379 mlog(0, "%s: waiting %dms for notification of "
380 "recovery of node %u\n", dlm->name, timeout, node);
381 wait_event_timeout(dlm->dlm_reco_thread_wq,
382 dlm_is_node_recovered(dlm, node),
383 msecs_to_jiffies(timeout));
384 } else {
385 mlog(0, "%s: waiting indefinitely for notification "
386 "of recovery of node %u\n", dlm->name, node);
387 wait_event(dlm->dlm_reco_thread_wq,
388 dlm_is_node_recovered(dlm, node));
389 }
390 /* for now, return 0 */
391 return 0;
392}
393
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800394/* callers of the top-level api calls (dlmlock/dlmunlock) should
395 * block on the dlm->reco.event when recovery is in progress.
396 * the dlm recovery thread will set this state when it begins
397 * recovering a dead node (as the new master or not) and clear
398 * the state and wake as soon as all affected lock resources have
399 * been marked with the RECOVERY flag */
400static int dlm_in_recovery(struct dlm_ctxt *dlm)
401{
402 int in_recovery;
403 spin_lock(&dlm->spinlock);
404 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
405 spin_unlock(&dlm->spinlock);
406 return in_recovery;
407}
408
409
410void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
411{
Kurt Hackel56a7c102006-05-01 14:30:39 -0700412 if (dlm_in_recovery(dlm)) {
Kurt Hackel3b3b84a2006-05-01 14:31:37 -0700413 mlog(0, "%s: reco thread %d in recovery: "
Kurt Hackel56a7c102006-05-01 14:30:39 -0700414 "state=%d, master=%u, dead=%u\n",
415 dlm->name, dlm->dlm_reco_thread_task->pid,
416 dlm->reco.state, dlm->reco.new_master,
417 dlm->reco.dead_node);
418 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800419 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
420}
421
422static void dlm_begin_recovery(struct dlm_ctxt *dlm)
423{
424 spin_lock(&dlm->spinlock);
425 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
426 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
427 spin_unlock(&dlm->spinlock);
428}
429
430static void dlm_end_recovery(struct dlm_ctxt *dlm)
431{
432 spin_lock(&dlm->spinlock);
433 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
434 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
435 spin_unlock(&dlm->spinlock);
436 wake_up(&dlm->reco.event);
437}
438
439static int dlm_do_recovery(struct dlm_ctxt *dlm)
440{
441 int status = 0;
Kurt Hackele2faea42006-01-12 14:24:55 -0800442 int ret;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800443
444 spin_lock(&dlm->spinlock);
445
446 /* check to see if the new master has died */
447 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
448 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
449 mlog(0, "new master %u died while recovering %u!\n",
450 dlm->reco.new_master, dlm->reco.dead_node);
451 /* unset the new_master, leave dead_node */
Kurt Hackelab27eb62006-04-27 18:03:49 -0700452 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800453 }
454
455 /* select a target to recover */
456 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
457 int bit;
458
459 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
460 if (bit >= O2NM_MAX_NODES || bit < 0)
Kurt Hackelab27eb62006-04-27 18:03:49 -0700461 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800462 else
Kurt Hackelab27eb62006-04-27 18:03:49 -0700463 dlm_set_reco_dead_node(dlm, bit);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800464 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
465 /* BUG? */
466 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
467 dlm->reco.dead_node);
Kurt Hackelab27eb62006-04-27 18:03:49 -0700468 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800469 }
470
471 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
472 // mlog(0, "nothing to recover! sleeping now!\n");
473 spin_unlock(&dlm->spinlock);
474 /* return to main thread loop and sleep. */
475 return 0;
476 }
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700477 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
478 dlm->name, dlm->dlm_reco_thread_task->pid,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800479 dlm->reco.dead_node);
480 spin_unlock(&dlm->spinlock);
481
482 /* take write barrier */
483 /* (stops the list reshuffling thread, proxy ast handling) */
484 dlm_begin_recovery(dlm);
485
486 if (dlm->reco.new_master == dlm->node_num)
487 goto master_here;
488
489 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -0800490 /* choose a new master, returns 0 if this node
491 * is the master, -EEXIST if it's another node.
492 * this does not return until a new master is chosen
493 * or recovery completes entirely. */
494 ret = dlm_pick_recovery_master(dlm);
495 if (!ret) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800496 /* already notified everyone. go. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800497 goto master_here;
498 }
499 mlog(0, "another node will master this recovery session.\n");
500 }
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700501 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
502 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800503 dlm->node_num, dlm->reco.dead_node);
504
505 /* it is safe to start everything back up here
506 * because all of the dead node's lock resources
507 * have been marked as in-recovery */
508 dlm_end_recovery(dlm);
509
510 /* sleep out in main dlm_recovery_thread loop. */
511 return 0;
512
513master_here:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700514 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
515 dlm->dlm_reco_thread_task->pid,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800516 dlm->name, dlm->reco.dead_node, dlm->node_num);
517
518 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
519 if (status < 0) {
Kurt Hackel6a413212006-05-01 13:49:20 -0700520 /* we should never hit this anymore */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800521 mlog(ML_ERROR, "error %d remastering locks for node %u, "
522 "retrying.\n", status, dlm->reco.dead_node);
Kurt Hackele2faea42006-01-12 14:24:55 -0800523 /* yield a bit to allow any final network messages
524 * to get handled on remaining nodes */
525 msleep(100);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800526 } else {
527 /* success! see if any other nodes need recovery */
Kurt Hackele2faea42006-01-12 14:24:55 -0800528 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
529 dlm->name, dlm->reco.dead_node, dlm->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800530 dlm_reset_recovery(dlm);
531 }
532 dlm_end_recovery(dlm);
533
534 /* continue and look for another dead node */
535 return -EAGAIN;
536}
537
538static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
539{
540 int status = 0;
541 struct dlm_reco_node_data *ndata;
542 struct list_head *iter;
543 int all_nodes_done;
544 int destroy = 0;
545 int pass = 0;
546
Kurt Hackel6a413212006-05-01 13:49:20 -0700547 do {
548 /* we have become recovery master. there is no escaping
549 * this, so just keep trying until we get it. */
550 status = dlm_init_recovery_area(dlm, dead_node);
551 if (status < 0) {
552 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
553 "retrying\n", dlm->name);
554 msleep(1000);
555 }
556 } while (status != 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800557
558 /* safe to access the node data list without a lock, since this
559 * process is the only one to change the list */
560 list_for_each(iter, &dlm->reco.node_data) {
561 ndata = list_entry (iter, struct dlm_reco_node_data, list);
562 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
563 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
564
565 mlog(0, "requesting lock info from node %u\n",
566 ndata->node_num);
567
568 if (ndata->node_num == dlm->node_num) {
569 ndata->state = DLM_RECO_NODE_DATA_DONE;
570 continue;
571 }
572
Kurt Hackel6a413212006-05-01 13:49:20 -0700573 do {
574 status = dlm_request_all_locks(dlm, ndata->node_num,
575 dead_node);
576 if (status < 0) {
577 mlog_errno(status);
578 if (dlm_is_host_down(status)) {
579 /* node died, ignore it for recovery */
580 status = 0;
581 ndata->state = DLM_RECO_NODE_DATA_DEAD;
582 /* wait for the domain map to catch up
583 * with the network state. */
584 wait_event_timeout(dlm->dlm_reco_thread_wq,
585 dlm_is_node_dead(dlm,
586 ndata->node_num),
587 msecs_to_jiffies(1000));
588 mlog(0, "waited 1 sec for %u, "
589 "dead? %s\n", ndata->node_num,
590 dlm_is_node_dead(dlm, ndata->node_num) ?
591 "yes" : "no");
592 } else {
593 /* -ENOMEM on the other node */
594 mlog(0, "%s: node %u returned "
595 "%d during recovery, retrying "
596 "after a short wait\n",
597 dlm->name, ndata->node_num,
598 status);
599 msleep(100);
600 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800601 }
Kurt Hackel6a413212006-05-01 13:49:20 -0700602 } while (status != 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800603
604 switch (ndata->state) {
605 case DLM_RECO_NODE_DATA_INIT:
606 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
607 case DLM_RECO_NODE_DATA_REQUESTED:
608 BUG();
609 break;
610 case DLM_RECO_NODE_DATA_DEAD:
611 mlog(0, "node %u died after requesting "
612 "recovery info for node %u\n",
613 ndata->node_num, dead_node);
Kurt Hackel6a413212006-05-01 13:49:20 -0700614 /* fine. don't need this node's info.
615 * continue without it. */
616 break;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800617 case DLM_RECO_NODE_DATA_REQUESTING:
618 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
619 mlog(0, "now receiving recovery data from "
620 "node %u for dead node %u\n",
621 ndata->node_num, dead_node);
622 break;
623 case DLM_RECO_NODE_DATA_RECEIVING:
624 mlog(0, "already receiving recovery data from "
625 "node %u for dead node %u\n",
626 ndata->node_num, dead_node);
627 break;
628 case DLM_RECO_NODE_DATA_DONE:
629 mlog(0, "already DONE receiving recovery data "
630 "from node %u for dead node %u\n",
631 ndata->node_num, dead_node);
632 break;
633 }
634 }
635
636 mlog(0, "done requesting all lock info\n");
637
638 /* nodes should be sending reco data now
639 * just need to wait */
640
641 while (1) {
642 /* check all the nodes now to see if we are
643 * done, or if anyone died */
644 all_nodes_done = 1;
645 spin_lock(&dlm_reco_state_lock);
646 list_for_each(iter, &dlm->reco.node_data) {
647 ndata = list_entry (iter, struct dlm_reco_node_data, list);
648
649 mlog(0, "checking recovery state of node %u\n",
650 ndata->node_num);
651 switch (ndata->state) {
652 case DLM_RECO_NODE_DATA_INIT:
653 case DLM_RECO_NODE_DATA_REQUESTING:
654 mlog(ML_ERROR, "bad ndata state for "
655 "node %u: state=%d\n",
656 ndata->node_num, ndata->state);
657 BUG();
658 break;
659 case DLM_RECO_NODE_DATA_DEAD:
Kurt Hackel6a413212006-05-01 13:49:20 -0700660 mlog(0, "node %u died after "
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800661 "requesting recovery info for "
662 "node %u\n", ndata->node_num,
663 dead_node);
Kurt Hackel6a413212006-05-01 13:49:20 -0700664 break;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800665 case DLM_RECO_NODE_DATA_RECEIVING:
666 case DLM_RECO_NODE_DATA_REQUESTED:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700667 mlog(0, "%s: node %u still in state %s\n",
668 dlm->name, ndata->node_num,
669 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
670 "receiving" : "requested");
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800671 all_nodes_done = 0;
672 break;
673 case DLM_RECO_NODE_DATA_DONE:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700674 mlog(0, "%s: node %u state is done\n",
675 dlm->name, ndata->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800676 break;
677 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700678 mlog(0, "%s: node %u state is finalize\n",
679 dlm->name, ndata->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800680 break;
681 }
682 }
683 spin_unlock(&dlm_reco_state_lock);
684
685 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
686 all_nodes_done?"yes":"no");
687 if (all_nodes_done) {
688 int ret;
689
690 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
691 * just send a finalize message to everyone and
692 * clean up */
693 mlog(0, "all nodes are done! send finalize\n");
694 ret = dlm_send_finalize_reco_message(dlm);
695 if (ret < 0)
696 mlog_errno(ret);
697
698 spin_lock(&dlm->spinlock);
699 dlm_finish_local_lockres_recovery(dlm, dead_node,
700 dlm->node_num);
701 spin_unlock(&dlm->spinlock);
702 mlog(0, "should be done with recovery!\n");
703
704 mlog(0, "finishing recovery of %s at %lu, "
705 "dead=%u, this=%u, new=%u\n", dlm->name,
706 jiffies, dlm->reco.dead_node,
707 dlm->node_num, dlm->reco.new_master);
708 destroy = 1;
Kurt Hackel6a413212006-05-01 13:49:20 -0700709 status = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800710 /* rescan everything marked dirty along the way */
711 dlm_kick_thread(dlm, NULL);
712 break;
713 }
714 /* wait to be signalled, with periodic timeout
715 * to check for node death */
716 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
717 kthread_should_stop(),
718 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
719
720 }
721
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800722 if (destroy)
723 dlm_destroy_recovery_area(dlm, dead_node);
724
725 mlog_exit(status);
726 return status;
727}
728
729static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
730{
731 int num=0;
732 struct dlm_reco_node_data *ndata;
733
734 spin_lock(&dlm->spinlock);
735 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
736 /* nodes can only be removed (by dying) after dropping
737 * this lock, and death will be trapped later, so this should do */
738 spin_unlock(&dlm->spinlock);
739
740 while (1) {
741 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
742 if (num >= O2NM_MAX_NODES) {
743 break;
744 }
745 BUG_ON(num == dead_node);
746
Kurt Hackelad8100e2006-05-01 14:25:21 -0700747 ndata = kcalloc(1, sizeof(*ndata), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800748 if (!ndata) {
749 dlm_destroy_recovery_area(dlm, dead_node);
750 return -ENOMEM;
751 }
752 ndata->node_num = num;
753 ndata->state = DLM_RECO_NODE_DATA_INIT;
754 spin_lock(&dlm_reco_state_lock);
755 list_add_tail(&ndata->list, &dlm->reco.node_data);
756 spin_unlock(&dlm_reco_state_lock);
757 num++;
758 }
759
760 return 0;
761}
762
763static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
764{
765 struct list_head *iter, *iter2;
766 struct dlm_reco_node_data *ndata;
767 LIST_HEAD(tmplist);
768
769 spin_lock(&dlm_reco_state_lock);
770 list_splice_init(&dlm->reco.node_data, &tmplist);
771 spin_unlock(&dlm_reco_state_lock);
772
773 list_for_each_safe(iter, iter2, &tmplist) {
774 ndata = list_entry (iter, struct dlm_reco_node_data, list);
775 list_del_init(&ndata->list);
776 kfree(ndata);
777 }
778}
779
780static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
781 u8 dead_node)
782{
783 struct dlm_lock_request lr;
784 enum dlm_status ret;
785
786 mlog(0, "\n");
787
788
789 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
790 "to %u\n", dead_node, request_from);
791
792 memset(&lr, 0, sizeof(lr));
793 lr.node_idx = dlm->node_num;
794 lr.dead_node = dead_node;
795
796 // send message
797 ret = DLM_NOLOCKMGR;
798 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
799 &lr, sizeof(lr), request_from, NULL);
800
801 /* negative status is handled by caller */
802 if (ret < 0)
803 mlog_errno(ret);
804
805 // return from here, then
806 // sleep until all received or error
807 return ret;
808
809}
810
811int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
812{
813 struct dlm_ctxt *dlm = data;
814 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
815 char *buf = NULL;
816 struct dlm_work_item *item = NULL;
817
818 if (!dlm_grab(dlm))
819 return -EINVAL;
820
Kurt Hackelc3187ce2006-04-27 18:05:41 -0700821 if (lr->dead_node != dlm->reco.dead_node) {
822 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
823 "dead_node is %u\n", dlm->name, lr->node_idx,
824 lr->dead_node, dlm->reco.dead_node);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700825 dlm_print_reco_node_status(dlm);
Kurt Hackelc3187ce2006-04-27 18:05:41 -0700826 /* this is a hack */
827 dlm_put(dlm);
828 return -ENOMEM;
829 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800830 BUG_ON(lr->dead_node != dlm->reco.dead_node);
831
Kurt Hackelad8100e2006-05-01 14:25:21 -0700832 item = kcalloc(1, sizeof(*item), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800833 if (!item) {
834 dlm_put(dlm);
835 return -ENOMEM;
836 }
837
838 /* this will get freed by dlm_request_all_locks_worker */
Kurt Hackelad8100e2006-05-01 14:25:21 -0700839 buf = (char *) __get_free_page(GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800840 if (!buf) {
841 kfree(item);
842 dlm_put(dlm);
843 return -ENOMEM;
844 }
845
846 /* queue up work for dlm_request_all_locks_worker */
847 dlm_grab(dlm); /* get an extra ref for the work item */
848 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
849 item->u.ral.reco_master = lr->node_idx;
850 item->u.ral.dead_node = lr->dead_node;
851 spin_lock(&dlm->work_lock);
852 list_add_tail(&item->list, &dlm->work_list);
853 spin_unlock(&dlm->work_lock);
854 schedule_work(&dlm->dispatched_work);
855
856 dlm_put(dlm);
857 return 0;
858}
859
860static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
861{
862 struct dlm_migratable_lockres *mres;
863 struct dlm_lock_resource *res;
864 struct dlm_ctxt *dlm;
865 LIST_HEAD(resources);
866 struct list_head *iter;
867 int ret;
868 u8 dead_node, reco_master;
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700869 int skip_all_done = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800870
871 dlm = item->dlm;
872 dead_node = item->u.ral.dead_node;
873 reco_master = item->u.ral.reco_master;
Kurt Hackele2faea42006-01-12 14:24:55 -0800874 mres = (struct dlm_migratable_lockres *)data;
875
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700876 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
877 dlm->name, dead_node, reco_master);
878
Kurt Hackele2faea42006-01-12 14:24:55 -0800879 if (dead_node != dlm->reco.dead_node ||
880 reco_master != dlm->reco.new_master) {
Kurt Hackel6a413212006-05-01 13:49:20 -0700881 /* worker could have been created before the recovery master
882 * died. if so, do not continue, but do not error. */
883 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
884 mlog(ML_NOTICE, "%s: will not send recovery state, "
885 "recovery master %u died, thread=(dead=%u,mas=%u)"
886 " current=(dead=%u,mas=%u)\n", dlm->name,
887 reco_master, dead_node, reco_master,
888 dlm->reco.dead_node, dlm->reco.new_master);
889 } else {
890 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
891 "master=%u), request(dead=%u, master=%u)\n",
892 dlm->name, dlm->reco.dead_node,
893 dlm->reco.new_master, dead_node, reco_master);
894 }
895 goto leave;
Kurt Hackele2faea42006-01-12 14:24:55 -0800896 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800897
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800898 /* lock resources should have already been moved to the
899 * dlm->reco.resources list. now move items from that list
900 * to a temp list if the dead owner matches. note that the
901 * whole cluster recovers only one node at a time, so we
902 * can safely move UNKNOWN lock resources for each recovery
903 * session. */
904 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
905
906 /* now we can begin blasting lockreses without the dlm lock */
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700907
908 /* any errors returned will be due to the new_master dying,
909 * the dlm_reco_thread should detect this */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800910 list_for_each(iter, &resources) {
911 res = list_entry (iter, struct dlm_lock_resource, recovering);
912 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
913 DLM_MRES_RECOVERY);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700914 if (ret < 0) {
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700915 mlog(ML_ERROR, "%s: node %u went down while sending "
916 "recovery state for dead node %u, ret=%d\n", dlm->name,
917 reco_master, dead_node, ret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700918 skip_all_done = 1;
919 break;
920 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800921 }
922
923 /* move the resources back to the list */
924 spin_lock(&dlm->spinlock);
925 list_splice_init(&resources, &dlm->reco.resources);
926 spin_unlock(&dlm->spinlock);
927
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700928 if (!skip_all_done) {
929 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
930 if (ret < 0) {
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700931 mlog(ML_ERROR, "%s: node %u went down while sending "
932 "recovery all-done for dead node %u, ret=%d\n",
933 dlm->name, reco_master, dead_node, ret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700934 }
935 }
Kurt Hackel6a413212006-05-01 13:49:20 -0700936leave:
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800937 free_page((unsigned long)data);
938}
939
940
941static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
942{
943 int ret, tmpret;
944 struct dlm_reco_data_done done_msg;
945
946 memset(&done_msg, 0, sizeof(done_msg));
947 done_msg.node_idx = dlm->node_num;
948 done_msg.dead_node = dead_node;
949 mlog(0, "sending DATA DONE message to %u, "
950 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
951 done_msg.dead_node);
952
953 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
954 sizeof(done_msg), send_to, &tmpret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700955 if (ret < 0) {
956 if (!dlm_is_host_down(ret)) {
957 mlog_errno(ret);
958 mlog(ML_ERROR, "%s: unknown error sending data-done "
959 "to %u\n", dlm->name, send_to);
960 BUG();
961 }
962 } else
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800963 ret = tmpret;
964 return ret;
965}
966
967
968int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
969{
970 struct dlm_ctxt *dlm = data;
971 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
972 struct list_head *iter;
973 struct dlm_reco_node_data *ndata = NULL;
974 int ret = -EINVAL;
975
976 if (!dlm_grab(dlm))
977 return -EINVAL;
978
979 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
980 "node_idx=%u, this node=%u\n", done->dead_node,
981 dlm->reco.dead_node, done->node_idx, dlm->node_num);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700982
983 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
984 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
985 "node_idx=%u, this node=%u\n", done->dead_node,
986 dlm->reco.dead_node, done->node_idx, dlm->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800987
988 spin_lock(&dlm_reco_state_lock);
989 list_for_each(iter, &dlm->reco.node_data) {
990 ndata = list_entry (iter, struct dlm_reco_node_data, list);
991 if (ndata->node_num != done->node_idx)
992 continue;
993
994 switch (ndata->state) {
Kurt Hackele2faea42006-01-12 14:24:55 -0800995 /* should have moved beyond INIT but not to FINALIZE yet */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800996 case DLM_RECO_NODE_DATA_INIT:
997 case DLM_RECO_NODE_DATA_DEAD:
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800998 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
999 mlog(ML_ERROR, "bad ndata state for node %u:"
1000 " state=%d\n", ndata->node_num,
1001 ndata->state);
1002 BUG();
1003 break;
Kurt Hackele2faea42006-01-12 14:24:55 -08001004 /* these states are possible at this point, anywhere along
1005 * the line of recovery */
1006 case DLM_RECO_NODE_DATA_DONE:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001007 case DLM_RECO_NODE_DATA_RECEIVING:
1008 case DLM_RECO_NODE_DATA_REQUESTED:
1009 case DLM_RECO_NODE_DATA_REQUESTING:
1010 mlog(0, "node %u is DONE sending "
1011 "recovery data!\n",
1012 ndata->node_num);
1013
1014 ndata->state = DLM_RECO_NODE_DATA_DONE;
1015 ret = 0;
1016 break;
1017 }
1018 }
1019 spin_unlock(&dlm_reco_state_lock);
1020
1021 /* wake the recovery thread, some node is done */
1022 if (!ret)
1023 dlm_kick_recovery_thread(dlm);
1024
1025 if (ret < 0)
1026 mlog(ML_ERROR, "failed to find recovery node data for node "
1027 "%u\n", done->node_idx);
1028 dlm_put(dlm);
1029
1030 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1031 return ret;
1032}
1033
1034static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1035 struct list_head *list,
1036 u8 dead_node)
1037{
1038 struct dlm_lock_resource *res;
1039 struct list_head *iter, *iter2;
Kurt Hackele2faea42006-01-12 14:24:55 -08001040 struct dlm_lock *lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001041
1042 spin_lock(&dlm->spinlock);
1043 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1044 res = list_entry (iter, struct dlm_lock_resource, recovering);
Kurt Hackele2faea42006-01-12 14:24:55 -08001045 /* always prune any $RECOVERY entries for dead nodes,
1046 * otherwise hangs can occur during later recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001047 if (dlm_is_recovery_lock(res->lockname.name,
Kurt Hackele2faea42006-01-12 14:24:55 -08001048 res->lockname.len)) {
1049 spin_lock(&res->spinlock);
1050 list_for_each_entry(lock, &res->granted, list) {
1051 if (lock->ml.node == dead_node) {
1052 mlog(0, "AHA! there was "
1053 "a $RECOVERY lock for dead "
1054 "node %u (%s)!\n",
1055 dead_node, dlm->name);
1056 list_del_init(&lock->list);
1057 dlm_lock_put(lock);
1058 break;
1059 }
1060 }
1061 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001062 continue;
Kurt Hackele2faea42006-01-12 14:24:55 -08001063 }
1064
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001065 if (res->owner == dead_node) {
1066 mlog(0, "found lockres owned by dead node while "
1067 "doing recovery for node %u. sending it.\n",
1068 dead_node);
Akinobu Mitaf1166292006-06-26 00:24:46 -07001069 list_move_tail(&res->recovering, list);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001070 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1071 mlog(0, "found UNKNOWN owner while doing recovery "
1072 "for node %u. sending it.\n", dead_node);
Akinobu Mitaf1166292006-06-26 00:24:46 -07001073 list_move_tail(&res->recovering, list);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001074 }
1075 }
1076 spin_unlock(&dlm->spinlock);
1077}
1078
1079static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1080{
1081 int total_locks = 0;
1082 struct list_head *iter, *queue = &res->granted;
1083 int i;
1084
1085 for (i=0; i<3; i++) {
1086 list_for_each(iter, queue)
1087 total_locks++;
1088 queue++;
1089 }
1090 return total_locks;
1091}
1092
1093
1094static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1095 struct dlm_migratable_lockres *mres,
1096 u8 send_to,
1097 struct dlm_lock_resource *res,
1098 int total_locks)
1099{
1100 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1101 int mres_total_locks = be32_to_cpu(mres->total_locks);
1102 int sz, ret = 0, status = 0;
1103 u8 orig_flags = mres->flags,
1104 orig_master = mres->master;
1105
1106 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1107 if (!mres->num_locks)
1108 return 0;
1109
1110 sz = sizeof(struct dlm_migratable_lockres) +
1111 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1112
1113 /* add an all-done flag if we reached the last lock */
1114 orig_flags = mres->flags;
1115 BUG_ON(total_locks > mres_total_locks);
1116 if (total_locks == mres_total_locks)
1117 mres->flags |= DLM_MRES_ALL_DONE;
1118
1119 /* send it */
1120 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1121 sz, send_to, &status);
1122 if (ret < 0) {
1123 /* XXX: negative status is not handled.
1124 * this will end up killing this node. */
1125 mlog_errno(ret);
1126 } else {
1127 /* might get an -ENOMEM back here */
1128 ret = status;
1129 if (ret < 0) {
1130 mlog_errno(ret);
1131
1132 if (ret == -EFAULT) {
1133 mlog(ML_ERROR, "node %u told me to kill "
1134 "myself!\n", send_to);
1135 BUG();
1136 }
1137 }
1138 }
1139
1140 /* zero and reinit the message buffer */
1141 dlm_init_migratable_lockres(mres, res->lockname.name,
1142 res->lockname.len, mres_total_locks,
1143 mig_cookie, orig_flags, orig_master);
1144 return ret;
1145}
1146
1147static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1148 const char *lockname, int namelen,
1149 int total_locks, u64 cookie,
1150 u8 flags, u8 master)
1151{
1152 /* mres here is one full page */
1153 memset(mres, 0, PAGE_SIZE);
1154 mres->lockname_len = namelen;
1155 memcpy(mres->lockname, lockname, namelen);
1156 mres->num_locks = 0;
1157 mres->total_locks = cpu_to_be32(total_locks);
1158 mres->mig_cookie = cpu_to_be64(cookie);
1159 mres->flags = flags;
1160 mres->master = master;
1161}
1162
1163
1164/* returns 1 if this lock fills the network structure,
1165 * 0 otherwise */
1166static int dlm_add_lock_to_array(struct dlm_lock *lock,
1167 struct dlm_migratable_lockres *mres, int queue)
1168{
1169 struct dlm_migratable_lock *ml;
1170 int lock_num = mres->num_locks;
1171
1172 ml = &(mres->ml[lock_num]);
1173 ml->cookie = lock->ml.cookie;
1174 ml->type = lock->ml.type;
1175 ml->convert_type = lock->ml.convert_type;
1176 ml->highest_blocked = lock->ml.highest_blocked;
1177 ml->list = queue;
1178 if (lock->lksb) {
1179 ml->flags = lock->lksb->flags;
1180 /* send our current lvb */
1181 if (ml->type == LKM_EXMODE ||
1182 ml->type == LKM_PRMODE) {
1183 /* if it is already set, this had better be a PR
1184 * and it has to match */
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001185 if (!dlm_lvb_is_empty(mres->lvb) &&
1186 (ml->type == LKM_EXMODE ||
1187 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001188 mlog(ML_ERROR, "mismatched lvbs!\n");
1189 __dlm_print_one_lock_resource(lock->lockres);
1190 BUG();
1191 }
1192 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1193 }
1194 }
1195 ml->node = lock->ml.node;
1196 mres->num_locks++;
1197 /* we reached the max, send this network message */
1198 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1199 return 1;
1200 return 0;
1201}
1202
1203
1204int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1205 struct dlm_migratable_lockres *mres,
1206 u8 send_to, u8 flags)
1207{
1208 struct list_head *queue, *iter;
1209 int total_locks, i;
1210 u64 mig_cookie = 0;
1211 struct dlm_lock *lock;
1212 int ret = 0;
1213
1214 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1215
1216 mlog(0, "sending to %u\n", send_to);
1217
1218 total_locks = dlm_num_locks_in_lockres(res);
1219 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1220 /* rare, but possible */
1221 mlog(0, "argh. lockres has %d locks. this will "
1222 "require more than one network packet to "
1223 "migrate\n", total_locks);
1224 mig_cookie = dlm_get_next_mig_cookie();
1225 }
1226
1227 dlm_init_migratable_lockres(mres, res->lockname.name,
1228 res->lockname.len, total_locks,
1229 mig_cookie, flags, res->owner);
1230
1231 total_locks = 0;
1232 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1233 queue = dlm_list_idx_to_ptr(res, i);
1234 list_for_each(iter, queue) {
1235 lock = list_entry (iter, struct dlm_lock, list);
1236
1237 /* add another lock. */
1238 total_locks++;
1239 if (!dlm_add_lock_to_array(lock, mres, i))
1240 continue;
1241
1242 /* this filled the lock message,
1243 * we must send it immediately. */
1244 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1245 res, total_locks);
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001246 if (ret < 0)
1247 goto error;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001248 }
1249 }
1250 /* flush any remaining locks */
1251 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001252 if (ret < 0)
1253 goto error;
1254 return ret;
1255
1256error:
1257 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1258 dlm->name, ret);
1259 if (!dlm_is_host_down(ret))
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001260 BUG();
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001261 mlog(0, "%s: node %u went down while sending %s "
1262 "lockres %.*s\n", dlm->name, send_to,
1263 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1264 res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001265 return ret;
1266}
1267
1268
1269
1270/*
1271 * this message will contain no more than one page worth of
1272 * recovery data, and it will work on only one lockres.
1273 * there may be many locks in this page, and we may need to wait
1274 * for additional packets to complete all the locks (rare, but
1275 * possible).
1276 */
1277/*
1278 * NOTE: the allocation error cases here are scary
1279 * we really cannot afford to fail an alloc in recovery
1280 * do we spin? returning an error only delays the problem really
1281 */
1282
1283int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1284{
1285 struct dlm_ctxt *dlm = data;
1286 struct dlm_migratable_lockres *mres =
1287 (struct dlm_migratable_lockres *)msg->buf;
1288 int ret = 0;
1289 u8 real_master;
1290 char *buf = NULL;
1291 struct dlm_work_item *item = NULL;
1292 struct dlm_lock_resource *res = NULL;
1293
1294 if (!dlm_grab(dlm))
1295 return -EINVAL;
1296
1297 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1298
1299 real_master = mres->master;
1300 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1301 /* cannot migrate a lockres with no master */
1302 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1303 }
1304
1305 mlog(0, "%s message received from node %u\n",
1306 (mres->flags & DLM_MRES_RECOVERY) ?
1307 "recovery" : "migration", mres->master);
1308 if (mres->flags & DLM_MRES_ALL_DONE)
1309 mlog(0, "all done flag. all lockres data received!\n");
1310
1311 ret = -ENOMEM;
Kurt Hackelad8100e2006-05-01 14:25:21 -07001312 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1313 item = kcalloc(1, sizeof(*item), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001314 if (!buf || !item)
1315 goto leave;
1316
1317 /* lookup the lock to see if we have a secondary queue for this
1318 * already... just add the locks in and this will have its owner
1319 * and RECOVERY flag changed when it completes. */
1320 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1321 if (res) {
1322 /* this will get a ref on res */
1323 /* mark it as recovering/migrating and hash it */
1324 spin_lock(&res->spinlock);
1325 if (mres->flags & DLM_MRES_RECOVERY) {
1326 res->state |= DLM_LOCK_RES_RECOVERING;
1327 } else {
1328 if (res->state & DLM_LOCK_RES_MIGRATING) {
1329 /* this is at least the second
1330 * lockres message */
1331 mlog(0, "lock %.*s is already migrating\n",
1332 mres->lockname_len,
1333 mres->lockname);
1334 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1335 /* caller should BUG */
1336 mlog(ML_ERROR, "node is attempting to migrate "
1337 "lock %.*s, but marked as recovering!\n",
1338 mres->lockname_len, mres->lockname);
1339 ret = -EFAULT;
1340 spin_unlock(&res->spinlock);
1341 goto leave;
1342 }
1343 res->state |= DLM_LOCK_RES_MIGRATING;
1344 }
1345 spin_unlock(&res->spinlock);
1346 } else {
1347 /* need to allocate, just like if it was
1348 * mastered here normally */
1349 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1350 if (!res)
1351 goto leave;
1352
1353 /* to match the ref that we would have gotten if
1354 * dlm_lookup_lockres had succeeded */
1355 dlm_lockres_get(res);
1356
1357 /* mark it as recovering/migrating and hash it */
1358 if (mres->flags & DLM_MRES_RECOVERY)
1359 res->state |= DLM_LOCK_RES_RECOVERING;
1360 else
1361 res->state |= DLM_LOCK_RES_MIGRATING;
1362
1363 spin_lock(&dlm->spinlock);
1364 __dlm_insert_lockres(dlm, res);
1365 spin_unlock(&dlm->spinlock);
1366
1367 /* now that the new lockres is inserted,
1368 * make it usable by other processes */
1369 spin_lock(&res->spinlock);
1370 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1371 spin_unlock(&res->spinlock);
1372
1373 /* add an extra ref for just-allocated lockres
1374 * otherwise the lockres will be purged immediately */
1375 dlm_lockres_get(res);
1376
1377 }
1378
1379 /* at this point we have allocated everything we need,
1380 * and we have a hashed lockres with an extra ref and
1381 * the proper res->state flags. */
1382 ret = 0;
1383 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1384 /* migration cannot have an unknown master */
1385 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1386 mlog(0, "recovery has passed me a lockres with an "
1387 "unknown owner.. will need to requery: "
1388 "%.*s\n", mres->lockname_len, mres->lockname);
1389 } else {
1390 spin_lock(&res->spinlock);
1391 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1392 spin_unlock(&res->spinlock);
1393 }
1394
1395 /* queue up work for dlm_mig_lockres_worker */
1396 dlm_grab(dlm); /* get an extra ref for the work item */
1397 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1398 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1399 item->u.ml.lockres = res; /* already have a ref */
1400 item->u.ml.real_master = real_master;
1401 spin_lock(&dlm->work_lock);
1402 list_add_tail(&item->list, &dlm->work_list);
1403 spin_unlock(&dlm->work_lock);
1404 schedule_work(&dlm->dispatched_work);
1405
1406leave:
1407 dlm_put(dlm);
1408 if (ret < 0) {
1409 if (buf)
1410 kfree(buf);
1411 if (item)
1412 kfree(item);
1413 }
1414
1415 mlog_exit(ret);
1416 return ret;
1417}
1418
1419
1420static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1421{
1422 struct dlm_ctxt *dlm = data;
1423 struct dlm_migratable_lockres *mres;
1424 int ret = 0;
1425 struct dlm_lock_resource *res;
1426 u8 real_master;
1427
1428 dlm = item->dlm;
1429 mres = (struct dlm_migratable_lockres *)data;
1430
1431 res = item->u.ml.lockres;
1432 real_master = item->u.ml.real_master;
1433
1434 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1435 /* this case is super-rare. only occurs if
1436 * node death happens during migration. */
1437again:
1438 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1439 if (ret < 0) {
Kurt Hackele2faea42006-01-12 14:24:55 -08001440 mlog(0, "dlm_lockres_master_requery ret=%d\n",
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001441 ret);
1442 goto again;
1443 }
1444 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1445 mlog(0, "lockres %.*s not claimed. "
1446 "this node will take it.\n",
1447 res->lockname.len, res->lockname.name);
1448 } else {
1449 mlog(0, "master needs to respond to sender "
1450 "that node %u still owns %.*s\n",
1451 real_master, res->lockname.len,
1452 res->lockname.name);
1453 /* cannot touch this lockres */
1454 goto leave;
1455 }
1456 }
1457
1458 ret = dlm_process_recovery_data(dlm, res, mres);
1459 if (ret < 0)
1460 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1461 else
1462 mlog(0, "dlm_process_recovery_data succeeded\n");
1463
1464 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1465 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1466 ret = dlm_finish_migration(dlm, res, mres->master);
1467 if (ret < 0)
1468 mlog_errno(ret);
1469 }
1470
1471leave:
1472 kfree(data);
1473 mlog_exit(ret);
1474}
1475
1476
1477
Kurt Hackelc03872f2006-03-06 14:08:49 -08001478int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1479 struct dlm_lock_resource *res, u8 *real_master)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001480{
1481 struct dlm_node_iter iter;
1482 int nodenum;
1483 int ret = 0;
1484
1485 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1486
1487 /* we only reach here if one of the two nodes in a
1488 * migration died while the migration was in progress.
1489 * at this point we need to requery the master. we
1490 * know that the new_master got as far as creating
1491 * an mle on at least one node, but we do not know
1492 * if any nodes had actually cleared the mle and set
1493 * the master to the new_master. the old master
1494 * is supposed to set the owner to UNKNOWN in the
1495 * event of a new_master death, so the only possible
1496 * responses that we can get from nodes here are
1497 * that the master is new_master, or that the master
1498 * is UNKNOWN.
1499 * if all nodes come back with UNKNOWN then we know
1500 * the lock needs remastering here.
1501 * if any node comes back with a valid master, check
1502 * to see if that master is the one that we are
1503 * recovering. if so, then the new_master died and
1504 * we need to remaster this lock. if not, then the
1505 * new_master survived and that node will respond to
1506 * other nodes about the owner.
1507 * if there is an owner, this node needs to dump this
1508 * lockres and alert the sender that this lockres
1509 * was rejected. */
1510 spin_lock(&dlm->spinlock);
1511 dlm_node_iter_init(dlm->domain_map, &iter);
1512 spin_unlock(&dlm->spinlock);
1513
1514 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1515 /* do not send to self */
1516 if (nodenum == dlm->node_num)
1517 continue;
1518 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1519 if (ret < 0) {
1520 mlog_errno(ret);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001521 if (!dlm_is_host_down(ret))
1522 BUG();
1523 /* host is down, so answer for that node would be
1524 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001525 }
1526 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1527 mlog(0, "lock master is %u\n", *real_master);
1528 break;
1529 }
1530 }
1531 return ret;
1532}
1533
1534
Kurt Hackelc03872f2006-03-06 14:08:49 -08001535int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1536 u8 nodenum, u8 *real_master)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001537{
1538 int ret = -EINVAL;
1539 struct dlm_master_requery req;
1540 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1541
1542 memset(&req, 0, sizeof(req));
1543 req.node_idx = dlm->node_num;
1544 req.namelen = res->lockname.len;
1545 memcpy(req.name, res->lockname.name, res->lockname.len);
1546
1547 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1548 &req, sizeof(req), nodenum, &status);
1549 /* XXX: negative status not handled properly here. */
1550 if (ret < 0)
1551 mlog_errno(ret);
1552 else {
1553 BUG_ON(status < 0);
1554 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1555 *real_master = (u8) (status & 0xff);
1556 mlog(0, "node %u responded to master requery with %u\n",
1557 nodenum, *real_master);
1558 ret = 0;
1559 }
1560 return ret;
1561}
1562
1563
1564/* this function cannot error, so unless the sending
1565 * or receiving of the message failed, the owner can
1566 * be trusted */
1567int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1568{
1569 struct dlm_ctxt *dlm = data;
1570 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1571 struct dlm_lock_resource *res = NULL;
Mark Fasheha3d33292006-03-09 17:55:56 -08001572 unsigned int hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001573 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1574 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1575
1576 if (!dlm_grab(dlm)) {
1577 /* since the domain has gone away on this
1578 * node, the proper response is UNKNOWN */
1579 return master;
1580 }
1581
Mark Fasheha3d33292006-03-09 17:55:56 -08001582 hash = dlm_lockid_hash(req->name, req->namelen);
1583
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001584 spin_lock(&dlm->spinlock);
Mark Fasheha3d33292006-03-09 17:55:56 -08001585 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001586 if (res) {
1587 spin_lock(&res->spinlock);
1588 master = res->owner;
1589 if (master == dlm->node_num) {
1590 int ret = dlm_dispatch_assert_master(dlm, res,
1591 0, 0, flags);
1592 if (ret < 0) {
1593 mlog_errno(-ENOMEM);
1594 /* retry!? */
1595 BUG();
1596 }
1597 }
1598 spin_unlock(&res->spinlock);
1599 }
1600 spin_unlock(&dlm->spinlock);
1601
1602 dlm_put(dlm);
1603 return master;
1604}
1605
1606static inline struct list_head *
1607dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1608{
1609 struct list_head *ret;
1610 BUG_ON(list_num < 0);
1611 BUG_ON(list_num > 2);
1612 ret = &(res->granted);
1613 ret += list_num;
1614 return ret;
1615}
1616/* TODO: do ast flush business
1617 * TODO: do MIGRATING and RECOVERING spinning
1618 */
1619
1620/*
1621* NOTE about in-flight requests during migration:
1622*
1623* Before attempting the migrate, the master has marked the lockres as
1624* MIGRATING and then flushed all of its pending ASTS. So any in-flight
1625* requests either got queued before the MIGRATING flag got set, in which
1626* case the lock data will reflect the change and a return message is on
1627* the way, or the request failed to get in before MIGRATING got set. In
1628* this case, the caller will be told to spin and wait for the MIGRATING
1629* flag to be dropped, then recheck the master.
1630* This holds true for the convert, cancel and unlock cases, and since lvb
1631* updates are tied to these same messages, it applies to lvb updates as
1632* well. For the lock case, there is no way a lock can be on the master
1633* queue and not be on the secondary queue since the lock is always added
1634* locally first. This means that the new target node will never be sent
1635* a lock that he doesn't already have on the list.
1636* In total, this means that the local lock is correct and should not be
1637* updated to match the one sent by the master. Any messages sent back
1638* from the master before the MIGRATING flag will bring the lock properly
1639* up-to-date, and the change will be ordered properly for the waiter.
1640* We will *not* attempt to modify the lock underneath the waiter.
1641*/
1642
1643static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1644 struct dlm_lock_resource *res,
1645 struct dlm_migratable_lockres *mres)
1646{
1647 struct dlm_migratable_lock *ml;
1648 struct list_head *queue;
1649 struct dlm_lock *newlock = NULL;
1650 struct dlm_lockstatus *lksb = NULL;
1651 int ret = 0;
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001652 int i, bad;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001653 struct list_head *iter;
1654 struct dlm_lock *lock = NULL;
1655
1656 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1657 for (i=0; i<mres->num_locks; i++) {
1658 ml = &(mres->ml[i]);
1659 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1660 newlock = NULL;
1661 lksb = NULL;
1662
1663 queue = dlm_list_num_to_pointer(res, ml->list);
1664
1665 /* if the lock is for the local node it needs to
1666 * be moved to the proper location within the queue.
1667 * do not allocate a new lock structure. */
1668 if (ml->node == dlm->node_num) {
1669 /* MIGRATION ONLY! */
1670 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1671
1672 spin_lock(&res->spinlock);
1673 list_for_each(iter, queue) {
1674 lock = list_entry (iter, struct dlm_lock, list);
1675 if (lock->ml.cookie != ml->cookie)
1676 lock = NULL;
1677 else
1678 break;
1679 }
1680
1681 /* lock is always created locally first, and
1682 * destroyed locally last. it must be on the list */
1683 if (!lock) {
Kurt Hackel29004852006-03-02 16:43:36 -08001684 u64 c = ml->cookie;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001685 mlog(ML_ERROR, "could not find local lock "
Kurt Hackel29004852006-03-02 16:43:36 -08001686 "with cookie %u:%llu!\n",
1687 dlm_get_lock_cookie_node(c),
1688 dlm_get_lock_cookie_seq(c));
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001689 BUG();
1690 }
1691 BUG_ON(lock->ml.node != ml->node);
1692
1693 /* see NOTE above about why we do not update
1694 * to match the master here */
1695
1696 /* move the lock to its proper place */
1697 /* do not alter lock refcount. switching lists. */
Akinobu Mitaf1166292006-06-26 00:24:46 -07001698 list_move_tail(&lock->list, queue);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001699 spin_unlock(&res->spinlock);
1700
1701 mlog(0, "just reordered a local lock!\n");
1702 continue;
1703 }
1704
1705 /* lock is for another node. */
1706 newlock = dlm_new_lock(ml->type, ml->node,
1707 be64_to_cpu(ml->cookie), NULL);
1708 if (!newlock) {
1709 ret = -ENOMEM;
1710 goto leave;
1711 }
1712 lksb = newlock->lksb;
1713 dlm_lock_attach_lockres(newlock, res);
1714
1715 if (ml->convert_type != LKM_IVMODE) {
1716 BUG_ON(queue != &res->converting);
1717 newlock->ml.convert_type = ml->convert_type;
1718 }
1719 lksb->flags |= (ml->flags &
1720 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001721
1722 if (ml->type == LKM_NLMODE)
1723 goto skip_lvb;
1724
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001725 if (!dlm_lvb_is_empty(mres->lvb)) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001726 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1727 /* other node was trying to update
1728 * lvb when node died. recreate the
1729 * lksb with the updated lvb. */
1730 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001731 /* the lock resource lvb update must happen
1732 * NOW, before the spinlock is dropped.
1733 * we no longer wait for the AST to update
1734 * the lvb. */
1735 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001736 } else {
1737 /* otherwise, the node is sending its
1738 * most recent valid lvb info */
1739 BUG_ON(ml->type != LKM_EXMODE &&
1740 ml->type != LKM_PRMODE);
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001741 if (!dlm_lvb_is_empty(res->lvb) &&
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001742 (ml->type == LKM_EXMODE ||
1743 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1744 int i;
1745 mlog(ML_ERROR, "%s:%.*s: received bad "
1746 "lvb! type=%d\n", dlm->name,
1747 res->lockname.len,
1748 res->lockname.name, ml->type);
1749 printk("lockres lvb=[");
1750 for (i=0; i<DLM_LVB_LEN; i++)
1751 printk("%02x", res->lvb[i]);
1752 printk("]\nmigrated lvb=[");
1753 for (i=0; i<DLM_LVB_LEN; i++)
1754 printk("%02x", mres->lvb[i]);
1755 printk("]\n");
1756 dlm_print_one_lock_resource(res);
1757 BUG();
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001758 }
1759 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1760 }
1761 }
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001762skip_lvb:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001763
1764 /* NOTE:
1765 * wrt lock queue ordering and recovery:
1766 * 1. order of locks on granted queue is
1767 * meaningless.
1768 * 2. order of locks on converting queue is
1769 * LOST with the node death. sorry charlie.
1770 * 3. order of locks on the blocked queue is
1771 * also LOST.
1772 * order of locks does not affect integrity, it
1773 * just means that a lock request may get pushed
1774 * back in line as a result of the node death.
1775 * also note that for a given node the lock order
1776 * for its secondary queue locks is preserved
1777 * relative to each other, but clearly *not*
1778 * preserved relative to locks from other nodes.
1779 */
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001780 bad = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001781 spin_lock(&res->spinlock);
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001782 list_for_each_entry(lock, queue, list) {
1783 if (lock->ml.cookie == ml->cookie) {
1784 u64 c = lock->ml.cookie;
1785 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1786 "exists on this lockres!\n", dlm->name,
1787 res->lockname.len, res->lockname.name,
1788 dlm_get_lock_cookie_node(c),
1789 dlm_get_lock_cookie_seq(c));
1790
1791 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1792 "node=%u, cookie=%u:%llu, queue=%d\n",
1793 ml->type, ml->convert_type, ml->node,
1794 dlm_get_lock_cookie_node(ml->cookie),
1795 dlm_get_lock_cookie_seq(ml->cookie),
1796 ml->list);
1797
1798 __dlm_print_one_lock_resource(res);
1799 bad = 1;
1800 break;
1801 }
1802 }
1803 if (!bad) {
1804 dlm_lock_get(newlock);
1805 list_add_tail(&newlock->list, queue);
1806 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001807 spin_unlock(&res->spinlock);
1808 }
1809 mlog(0, "done running all the locks\n");
1810
1811leave:
1812 if (ret < 0) {
1813 mlog_errno(ret);
1814 if (newlock)
1815 dlm_lock_put(newlock);
1816 }
1817
1818 mlog_exit(ret);
1819 return ret;
1820}
1821
1822void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1823 struct dlm_lock_resource *res)
1824{
1825 int i;
1826 struct list_head *queue, *iter, *iter2;
1827 struct dlm_lock *lock;
1828
1829 res->state |= DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001830 if (!list_empty(&res->recovering)) {
1831 mlog(0,
1832 "Recovering res %s:%.*s, is already on recovery list!\n",
1833 dlm->name, res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001834 list_del_init(&res->recovering);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001835 }
1836 /* We need to hold a reference while on the recovery list */
1837 dlm_lockres_get(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001838 list_add_tail(&res->recovering, &dlm->reco.resources);
1839
1840 /* find any pending locks and put them back on proper list */
1841 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1842 queue = dlm_list_idx_to_ptr(res, i);
1843 list_for_each_safe(iter, iter2, queue) {
1844 lock = list_entry (iter, struct dlm_lock, list);
1845 dlm_lock_get(lock);
1846 if (lock->convert_pending) {
1847 /* move converting lock back to granted */
1848 BUG_ON(i != DLM_CONVERTING_LIST);
1849 mlog(0, "node died with convert pending "
1850 "on %.*s. move back to granted list.\n",
1851 res->lockname.len, res->lockname.name);
1852 dlm_revert_pending_convert(res, lock);
1853 lock->convert_pending = 0;
1854 } else if (lock->lock_pending) {
1855 /* remove pending lock requests completely */
1856 BUG_ON(i != DLM_BLOCKED_LIST);
1857 mlog(0, "node died with lock pending "
1858 "on %.*s. remove from blocked list and skip.\n",
1859 res->lockname.len, res->lockname.name);
1860 /* lock will be floating until ref in
1861 * dlmlock_remote is freed after the network
1862 * call returns. ok for it to not be on any
1863 * list since no ast can be called
1864 * (the master is dead). */
1865 dlm_revert_pending_lock(res, lock);
1866 lock->lock_pending = 0;
1867 } else if (lock->unlock_pending) {
1868 /* if an unlock was in progress, treat as
1869 * if this had completed successfully
1870 * before sending this lock state to the
1871 * new master. note that the dlm_unlock
1872 * call is still responsible for calling
1873 * the unlockast. that will happen after
1874 * the network call times out. for now,
1875 * just move lists to prepare the new
1876 * recovery master. */
1877 BUG_ON(i != DLM_GRANTED_LIST);
1878 mlog(0, "node died with unlock pending "
1879 "on %.*s. remove from blocked list and skip.\n",
1880 res->lockname.len, res->lockname.name);
1881 dlm_commit_pending_unlock(res, lock);
1882 lock->unlock_pending = 0;
1883 } else if (lock->cancel_pending) {
1884 /* if a cancel was in progress, treat as
1885 * if this had completed successfully
1886 * before sending this lock state to the
1887 * new master */
1888 BUG_ON(i != DLM_CONVERTING_LIST);
1889 mlog(0, "node died with cancel pending "
1890 "on %.*s. move back to granted list.\n",
1891 res->lockname.len, res->lockname.name);
1892 dlm_commit_pending_cancel(res, lock);
1893 lock->cancel_pending = 0;
1894 }
1895 dlm_lock_put(lock);
1896 }
1897 }
1898}
1899
1900
1901
1902/* removes all recovered locks from the recovery list.
1903 * sets the res->owner to the new master.
1904 * unsets the RECOVERY flag and wakes waiters. */
1905static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1906 u8 dead_node, u8 new_master)
1907{
1908 int i;
Mark Fasheh81f20942006-02-28 17:31:22 -08001909 struct list_head *iter, *iter2;
1910 struct hlist_node *hash_iter;
1911 struct hlist_head *bucket;
1912
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001913 struct dlm_lock_resource *res;
1914
1915 mlog_entry_void();
1916
1917 assert_spin_locked(&dlm->spinlock);
1918
1919 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1920 res = list_entry (iter, struct dlm_lock_resource, recovering);
1921 if (res->owner == dead_node) {
1922 list_del_init(&res->recovering);
1923 spin_lock(&res->spinlock);
1924 dlm_change_lockres_owner(dlm, res, new_master);
1925 res->state &= ~DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001926 if (!__dlm_lockres_unused(res))
1927 __dlm_dirty_lockres(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001928 spin_unlock(&res->spinlock);
1929 wake_up(&res->wq);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001930 dlm_lockres_put(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001931 }
1932 }
1933
1934 /* this will become unnecessary eventually, but
1935 * for now we need to run the whole hash, clear
1936 * the RECOVERING state and set the owner
1937 * if necessary */
Mark Fasheh81f20942006-02-28 17:31:22 -08001938 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
Daniel Phillips03d864c2006-03-10 18:08:16 -08001939 bucket = dlm_lockres_hash(dlm, i);
Mark Fasheh81f20942006-02-28 17:31:22 -08001940 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001941 if (res->state & DLM_LOCK_RES_RECOVERING) {
1942 if (res->owner == dead_node) {
1943 mlog(0, "(this=%u) res %.*s owner=%u "
1944 "was not on recovering list, but "
1945 "clearing state anyway\n",
1946 dlm->node_num, res->lockname.len,
1947 res->lockname.name, new_master);
1948 } else if (res->owner == dlm->node_num) {
1949 mlog(0, "(this=%u) res %.*s owner=%u "
1950 "was not on recovering list, "
1951 "owner is THIS node, clearing\n",
1952 dlm->node_num, res->lockname.len,
1953 res->lockname.name, new_master);
1954 } else
1955 continue;
1956
Kurt Hackelc03872f2006-03-06 14:08:49 -08001957 if (!list_empty(&res->recovering)) {
1958 mlog(0, "%s:%.*s: lockres was "
1959 "marked RECOVERING, owner=%u\n",
1960 dlm->name, res->lockname.len,
1961 res->lockname.name, res->owner);
1962 list_del_init(&res->recovering);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001963 dlm_lockres_put(res);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001964 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001965 spin_lock(&res->spinlock);
1966 dlm_change_lockres_owner(dlm, res, new_master);
1967 res->state &= ~DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001968 if (!__dlm_lockres_unused(res))
1969 __dlm_dirty_lockres(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001970 spin_unlock(&res->spinlock);
1971 wake_up(&res->wq);
1972 }
1973 }
1974 }
1975}
1976
1977static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1978{
1979 if (local) {
1980 if (lock->ml.type != LKM_EXMODE &&
1981 lock->ml.type != LKM_PRMODE)
1982 return 1;
1983 } else if (lock->ml.type == LKM_EXMODE)
1984 return 1;
1985 return 0;
1986}
1987
1988static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1989 struct dlm_lock_resource *res, u8 dead_node)
1990{
1991 struct list_head *iter, *queue;
1992 struct dlm_lock *lock;
1993 int blank_lvb = 0, local = 0;
1994 int i;
1995 u8 search_node;
1996
1997 assert_spin_locked(&dlm->spinlock);
1998 assert_spin_locked(&res->spinlock);
1999
2000 if (res->owner == dlm->node_num)
2001 /* if this node owned the lockres, and if the dead node
2002 * had an EX when he died, blank out the lvb */
2003 search_node = dead_node;
2004 else {
2005 /* if this is a secondary lockres, and we had no EX or PR
2006 * locks granted, we can no longer trust the lvb */
2007 search_node = dlm->node_num;
2008 local = 1; /* check local state for valid lvb */
2009 }
2010
2011 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2012 queue = dlm_list_idx_to_ptr(res, i);
2013 list_for_each(iter, queue) {
2014 lock = list_entry (iter, struct dlm_lock, list);
2015 if (lock->ml.node == search_node) {
2016 if (dlm_lvb_needs_invalidation(lock, local)) {
2017 /* zero the lksb lvb and lockres lvb */
2018 blank_lvb = 1;
2019 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2020 }
2021 }
2022 }
2023 }
2024
2025 if (blank_lvb) {
2026 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2027 res->lockname.len, res->lockname.name, dead_node);
2028 memset(res->lvb, 0, DLM_LVB_LEN);
2029 }
2030}
2031
2032static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2033 struct dlm_lock_resource *res, u8 dead_node)
2034{
2035 struct list_head *iter, *tmpiter;
2036 struct dlm_lock *lock;
2037
2038 /* this node is the lockres master:
2039 * 1) remove any stale locks for the dead node
2040 * 2) if the dead node had an EX when he died, blank out the lvb
2041 */
2042 assert_spin_locked(&dlm->spinlock);
2043 assert_spin_locked(&res->spinlock);
2044
2045 /* TODO: check pending_asts, pending_basts here */
2046 list_for_each_safe(iter, tmpiter, &res->granted) {
2047 lock = list_entry (iter, struct dlm_lock, list);
2048 if (lock->ml.node == dead_node) {
2049 list_del_init(&lock->list);
2050 dlm_lock_put(lock);
2051 }
2052 }
2053 list_for_each_safe(iter, tmpiter, &res->converting) {
2054 lock = list_entry (iter, struct dlm_lock, list);
2055 if (lock->ml.node == dead_node) {
2056 list_del_init(&lock->list);
2057 dlm_lock_put(lock);
2058 }
2059 }
2060 list_for_each_safe(iter, tmpiter, &res->blocked) {
2061 lock = list_entry (iter, struct dlm_lock, list);
2062 if (lock->ml.node == dead_node) {
2063 list_del_init(&lock->list);
2064 dlm_lock_put(lock);
2065 }
2066 }
2067
2068 /* do not kick thread yet */
2069 __dlm_dirty_lockres(dlm, res);
2070}
2071
2072/* if this node is the recovery master, and there are no
2073 * locks for a given lockres owned by this node that are in
2074 * either PR or EX mode, zero out the lvb before requesting.
2075 *
2076 */
2077
2078
2079static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2080{
Mark Fasheh81f20942006-02-28 17:31:22 -08002081 struct hlist_node *iter;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002082 struct dlm_lock_resource *res;
2083 int i;
Mark Fasheh81f20942006-02-28 17:31:22 -08002084 struct hlist_head *bucket;
Kurt Hackele2faea42006-01-12 14:24:55 -08002085 struct dlm_lock *lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002086
2087
2088 /* purge any stale mles */
2089 dlm_clean_master_list(dlm, dead_node);
2090
2091 /*
2092 * now clean up all lock resources. there are two rules:
2093 *
2094 * 1) if the dead node was the master, move the lockres
2095 * to the recovering list. set the RECOVERING flag.
2096 * this lockres needs to be cleaned up before it can
2097 * be used further.
2098 *
2099 * 2) if this node was the master, remove all locks from
2100 * each of the lockres queues that were owned by the
2101 * dead node. once recovery finishes, the dlm thread
2102 * can be kicked again to see if any ASTs or BASTs
2103 * need to be fired as a result.
2104 */
Mark Fasheh81f20942006-02-28 17:31:22 -08002105 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
Daniel Phillips03d864c2006-03-10 18:08:16 -08002106 bucket = dlm_lockres_hash(dlm, i);
Mark Fasheh81f20942006-02-28 17:31:22 -08002107 hlist_for_each_entry(res, iter, bucket, hash_node) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002108 /* always prune any $RECOVERY entries for dead nodes,
2109 * otherwise hangs can occur during later recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002110 if (dlm_is_recovery_lock(res->lockname.name,
Kurt Hackele2faea42006-01-12 14:24:55 -08002111 res->lockname.len)) {
2112 spin_lock(&res->spinlock);
2113 list_for_each_entry(lock, &res->granted, list) {
2114 if (lock->ml.node == dead_node) {
2115 mlog(0, "AHA! there was "
2116 "a $RECOVERY lock for dead "
2117 "node %u (%s)!\n",
2118 dead_node, dlm->name);
2119 list_del_init(&lock->list);
2120 dlm_lock_put(lock);
2121 break;
2122 }
2123 }
2124 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002125 continue;
Kurt Hackele2faea42006-01-12 14:24:55 -08002126 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002127 spin_lock(&res->spinlock);
2128 /* zero the lvb if necessary */
2129 dlm_revalidate_lvb(dlm, res, dead_node);
2130 if (res->owner == dead_node)
2131 dlm_move_lockres_to_recovery_list(dlm, res);
2132 else if (res->owner == dlm->node_num) {
2133 dlm_free_dead_locks(dlm, res, dead_node);
2134 __dlm_lockres_calc_usage(dlm, res);
2135 }
2136 spin_unlock(&res->spinlock);
2137 }
2138 }
2139
2140}
2141
2142static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2143{
2144 assert_spin_locked(&dlm->spinlock);
2145
Kurt Hackel466d1a42006-05-01 11:11:13 -07002146 if (dlm->reco.new_master == idx) {
2147 mlog(0, "%s: recovery master %d just died\n",
2148 dlm->name, idx);
2149 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2150 /* finalize1 was reached, so it is safe to clear
2151 * the new_master and dead_node. that recovery
2152 * is complete. */
2153 mlog(0, "%s: dead master %d had reached "
2154 "finalize1 state, clearing\n", dlm->name, idx);
2155 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2156 __dlm_reset_recovery(dlm);
2157 }
2158 }
2159
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002160 /* check to see if the node is already considered dead */
2161 if (!test_bit(idx, dlm->live_nodes_map)) {
2162 mlog(0, "for domain %s, node %d is already dead. "
2163 "another node likely did recovery already.\n",
2164 dlm->name, idx);
2165 return;
2166 }
2167
2168 /* check to see if we do not care about this node */
2169 if (!test_bit(idx, dlm->domain_map)) {
2170 /* This also catches the case that we get a node down
2171 * but haven't joined the domain yet. */
2172 mlog(0, "node %u already removed from domain!\n", idx);
2173 return;
2174 }
2175
2176 clear_bit(idx, dlm->live_nodes_map);
2177
2178 /* Clean up join state on node death. */
2179 if (dlm->joining_node == idx) {
2180 mlog(0, "Clearing join state for node %u\n", idx);
2181 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2182 }
2183
2184 /* make sure local cleanup occurs before the heartbeat events */
2185 if (!test_bit(idx, dlm->recovery_map))
2186 dlm_do_local_recovery_cleanup(dlm, idx);
2187
2188 /* notify anything attached to the heartbeat events */
2189 dlm_hb_event_notify_attached(dlm, idx, 0);
2190
2191 mlog(0, "node %u being removed from domain map!\n", idx);
2192 clear_bit(idx, dlm->domain_map);
2193 /* wake up migration waiters if a node goes down.
2194 * perhaps later we can genericize this for other waiters. */
2195 wake_up(&dlm->migration_wq);
2196
2197 if (test_bit(idx, dlm->recovery_map))
2198 mlog(0, "domain %s, node %u already added "
2199 "to recovery map!\n", dlm->name, idx);
2200 else
2201 set_bit(idx, dlm->recovery_map);
2202}
2203
2204void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2205{
2206 struct dlm_ctxt *dlm = data;
2207
2208 if (!dlm_grab(dlm))
2209 return;
2210
2211 spin_lock(&dlm->spinlock);
2212 __dlm_hb_node_down(dlm, idx);
2213 spin_unlock(&dlm->spinlock);
2214
2215 dlm_put(dlm);
2216}
2217
2218void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2219{
2220 struct dlm_ctxt *dlm = data;
2221
2222 if (!dlm_grab(dlm))
2223 return;
2224
2225 spin_lock(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002226 set_bit(idx, dlm->live_nodes_map);
Kurt Hackele2faea42006-01-12 14:24:55 -08002227 /* do NOT notify mle attached to the heartbeat events.
2228 * new nodes are not interesting in mastery until joined. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002229 spin_unlock(&dlm->spinlock);
2230
2231 dlm_put(dlm);
2232}
2233
2234static void dlm_reco_ast(void *astdata)
2235{
2236 struct dlm_ctxt *dlm = astdata;
2237 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2238 dlm->node_num, dlm->name);
2239}
2240static void dlm_reco_bast(void *astdata, int blocked_type)
2241{
2242 struct dlm_ctxt *dlm = astdata;
2243 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2244 dlm->node_num, dlm->name);
2245}
2246static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2247{
2248 mlog(0, "unlockast for recovery lock fired!\n");
2249}
2250
Kurt Hackele2faea42006-01-12 14:24:55 -08002251/*
2252 * dlm_pick_recovery_master will continually attempt to use
2253 * dlmlock() on the special "$RECOVERY" lockres with the
2254 * LKM_NOQUEUE flag to get an EX. every thread that enters
2255 * this function on each node racing to become the recovery
2256 * master will not stop attempting this until either:
2257 * a) this node gets the EX (and becomes the recovery master),
2258 * or b) dlm->reco.new_master gets set to some nodenum
2259 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2260 * so each time a recovery master is needed, the entire cluster
2261 * will sync at this point. if the new master dies, that will
2262 * be detected in dlm_do_recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002263static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2264{
2265 enum dlm_status ret;
2266 struct dlm_lockstatus lksb;
2267 int status = -EINVAL;
2268
2269 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2270 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002271again:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002272 memset(&lksb, 0, sizeof(lksb));
2273
2274 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2275 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2276
Kurt Hackele2faea42006-01-12 14:24:55 -08002277 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2278 dlm->name, ret, lksb.status);
2279
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002280 if (ret == DLM_NORMAL) {
2281 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2282 dlm->name, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002283
2284 /* got the EX lock. check to see if another node
2285 * just became the reco master */
2286 if (dlm_reco_master_ready(dlm)) {
2287 mlog(0, "%s: got reco EX lock, but %u will "
2288 "do the recovery\n", dlm->name,
2289 dlm->reco.new_master);
2290 status = -EEXIST;
2291 } else {
Kurt Hackel898effa2006-01-18 17:01:25 -08002292 status = 0;
2293
2294 /* see if recovery was already finished elsewhere */
2295 spin_lock(&dlm->spinlock);
2296 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2297 status = -EINVAL;
2298 mlog(0, "%s: got reco EX lock, but "
2299 "node got recovered already\n", dlm->name);
2300 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2301 mlog(ML_ERROR, "%s: new master is %u "
2302 "but no dead node!\n",
2303 dlm->name, dlm->reco.new_master);
2304 BUG();
2305 }
2306 }
2307 spin_unlock(&dlm->spinlock);
2308 }
2309
2310 /* if this node has actually become the recovery master,
2311 * set the master and send the messages to begin recovery */
2312 if (!status) {
2313 mlog(0, "%s: dead=%u, this=%u, sending "
2314 "begin_reco now\n", dlm->name,
2315 dlm->reco.dead_node, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002316 status = dlm_send_begin_reco_message(dlm,
2317 dlm->reco.dead_node);
2318 /* this always succeeds */
2319 BUG_ON(status);
2320
2321 /* set the new_master to this node */
2322 spin_lock(&dlm->spinlock);
Kurt Hackelab27eb62006-04-27 18:03:49 -07002323 dlm_set_reco_master(dlm, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002324 spin_unlock(&dlm->spinlock);
2325 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002326
2327 /* recovery lock is a special case. ast will not get fired,
2328 * so just go ahead and unlock it. */
2329 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
Kurt Hackele2faea42006-01-12 14:24:55 -08002330 if (ret == DLM_DENIED) {
2331 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2332 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2333 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002334 if (ret != DLM_NORMAL) {
2335 /* this would really suck. this could only happen
2336 * if there was a network error during the unlock
2337 * because of node death. this means the unlock
2338 * is actually "done" and the lock structure is
2339 * even freed. we can continue, but only
2340 * because this specific lock name is special. */
Kurt Hackele2faea42006-01-12 14:24:55 -08002341 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002342 }
2343 } else if (ret == DLM_NOTQUEUED) {
2344 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2345 dlm->name, dlm->node_num);
2346 /* another node is master. wait on
Kurt Hackele2faea42006-01-12 14:24:55 -08002347 * reco.new_master != O2NM_INVALID_NODE_NUM
2348 * for at most one second */
2349 wait_event_timeout(dlm->dlm_reco_thread_wq,
2350 dlm_reco_master_ready(dlm),
2351 msecs_to_jiffies(1000));
2352 if (!dlm_reco_master_ready(dlm)) {
2353 mlog(0, "%s: reco master taking awhile\n",
2354 dlm->name);
2355 goto again;
2356 }
2357 /* another node has informed this one that it is reco master */
2358 mlog(0, "%s: reco master %u is ready to recover %u\n",
2359 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002360 status = -EEXIST;
Kurt Hackelc8df4122006-05-01 13:47:50 -07002361 } else if (ret == DLM_RECOVERING) {
2362 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2363 dlm->name, dlm->node_num);
2364 goto again;
Kurt Hackele2faea42006-01-12 14:24:55 -08002365 } else {
2366 struct dlm_lock_resource *res;
2367
2368 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2369 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2370 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2371 dlm_errname(lksb.status));
2372 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2373 DLM_RECOVERY_LOCK_NAME_LEN);
2374 if (res) {
2375 dlm_print_one_lock_resource(res);
2376 dlm_lockres_put(res);
2377 } else {
2378 mlog(ML_ERROR, "recovery lock not found\n");
2379 }
2380 BUG();
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002381 }
2382
2383 return status;
2384}
2385
2386static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2387{
2388 struct dlm_begin_reco br;
2389 int ret = 0;
2390 struct dlm_node_iter iter;
2391 int nodenum;
2392 int status;
2393
2394 mlog_entry("%u\n", dead_node);
2395
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002396 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002397
2398 spin_lock(&dlm->spinlock);
2399 dlm_node_iter_init(dlm->domain_map, &iter);
2400 spin_unlock(&dlm->spinlock);
2401
2402 clear_bit(dead_node, iter.node_map);
2403
2404 memset(&br, 0, sizeof(br));
2405 br.node_idx = dlm->node_num;
2406 br.dead_node = dead_node;
2407
2408 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2409 ret = 0;
2410 if (nodenum == dead_node) {
2411 mlog(0, "not sending begin reco to dead node "
2412 "%u\n", dead_node);
2413 continue;
2414 }
2415 if (nodenum == dlm->node_num) {
2416 mlog(0, "not sending begin reco to self\n");
2417 continue;
2418 }
Kurt Hackele2faea42006-01-12 14:24:55 -08002419retry:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002420 ret = -EINVAL;
2421 mlog(0, "attempting to send begin reco msg to %d\n",
2422 nodenum);
2423 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2424 &br, sizeof(br), nodenum, &status);
2425 /* negative status is handled ok by caller here */
2426 if (ret >= 0)
2427 ret = status;
Kurt Hackele2faea42006-01-12 14:24:55 -08002428 if (dlm_is_host_down(ret)) {
2429 /* node is down. not involved in recovery
2430 * so just keep going */
2431 mlog(0, "%s: node %u was down when sending "
2432 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2433 ret = 0;
2434 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002435 if (ret < 0) {
2436 struct dlm_lock_resource *res;
Kurt Hackele2faea42006-01-12 14:24:55 -08002437 /* this is now a serious problem, possibly ENOMEM
2438 * in the network stack. must retry */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002439 mlog_errno(ret);
2440 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2441 " returned %d\n", dlm->name, nodenum, ret);
2442 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2443 DLM_RECOVERY_LOCK_NAME_LEN);
2444 if (res) {
2445 dlm_print_one_lock_resource(res);
2446 dlm_lockres_put(res);
2447 } else {
2448 mlog(ML_ERROR, "recovery lock not found\n");
2449 }
Kurt Hackele2faea42006-01-12 14:24:55 -08002450 /* sleep for a bit in hopes that we can avoid
2451 * another ENOMEM */
2452 msleep(100);
2453 goto retry;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002454 } else if (ret == EAGAIN) {
2455 mlog(0, "%s: trying to start recovery of node "
2456 "%u, but node %u is waiting for last recovery "
2457 "to complete, backoff for a bit\n", dlm->name,
2458 dead_node, nodenum);
2459 /* TODO Look into replacing msleep with cond_resched() */
2460 msleep(100);
2461 goto retry;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002462 }
2463 }
2464
2465 return ret;
2466}
2467
2468int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2469{
2470 struct dlm_ctxt *dlm = data;
2471 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2472
2473 /* ok to return 0, domain has gone away */
2474 if (!dlm_grab(dlm))
2475 return 0;
2476
Kurt Hackel466d1a42006-05-01 11:11:13 -07002477 spin_lock(&dlm->spinlock);
2478 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2479 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2480 "but this node is in finalize state, waiting on finalize2\n",
2481 dlm->name, br->node_idx, br->dead_node,
2482 dlm->reco.dead_node, dlm->reco.new_master);
2483 spin_unlock(&dlm->spinlock);
2484 return EAGAIN;
2485 }
2486 spin_unlock(&dlm->spinlock);
2487
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002488 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2489 dlm->name, br->node_idx, br->dead_node,
2490 dlm->reco.dead_node, dlm->reco.new_master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002491
2492 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2493
2494 spin_lock(&dlm->spinlock);
2495 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002496 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2497 mlog(0, "%s: new_master %u died, changing "
2498 "to %u\n", dlm->name, dlm->reco.new_master,
2499 br->node_idx);
2500 } else {
2501 mlog(0, "%s: new_master %u NOT DEAD, changing "
2502 "to %u\n", dlm->name, dlm->reco.new_master,
2503 br->node_idx);
2504 /* may not have seen the new master as dead yet */
2505 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002506 }
2507 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002508 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2509 "node %u changing it to %u\n", dlm->name,
2510 dlm->reco.dead_node, br->node_idx, br->dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002511 }
Kurt Hackelab27eb62006-04-27 18:03:49 -07002512 dlm_set_reco_master(dlm, br->node_idx);
2513 dlm_set_reco_dead_node(dlm, br->dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002514 if (!test_bit(br->dead_node, dlm->recovery_map)) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002515 mlog(0, "recovery master %u sees %u as dead, but this "
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002516 "node has not yet. marking %u as dead\n",
2517 br->node_idx, br->dead_node, br->dead_node);
Kurt Hackele2faea42006-01-12 14:24:55 -08002518 if (!test_bit(br->dead_node, dlm->domain_map) ||
2519 !test_bit(br->dead_node, dlm->live_nodes_map))
2520 mlog(0, "%u not in domain/live_nodes map "
2521 "so setting it in reco map manually\n",
2522 br->dead_node);
Kurt Hackelc03872f2006-03-06 14:08:49 -08002523 /* force the recovery cleanup in __dlm_hb_node_down
2524 * both of these will be cleared in a moment */
2525 set_bit(br->dead_node, dlm->domain_map);
2526 set_bit(br->dead_node, dlm->live_nodes_map);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002527 __dlm_hb_node_down(dlm, br->dead_node);
2528 }
2529 spin_unlock(&dlm->spinlock);
2530
2531 dlm_kick_recovery_thread(dlm);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002532
2533 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2534 dlm->name, br->node_idx, br->dead_node,
2535 dlm->reco.dead_node, dlm->reco.new_master);
2536
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002537 dlm_put(dlm);
2538 return 0;
2539}
2540
Kurt Hackel466d1a42006-05-01 11:11:13 -07002541#define DLM_FINALIZE_STAGE2 0x01
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002542static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2543{
2544 int ret = 0;
2545 struct dlm_finalize_reco fr;
2546 struct dlm_node_iter iter;
2547 int nodenum;
2548 int status;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002549 int stage = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002550
Kurt Hackel466d1a42006-05-01 11:11:13 -07002551 mlog(0, "finishing recovery for node %s:%u, "
2552 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002553
2554 spin_lock(&dlm->spinlock);
2555 dlm_node_iter_init(dlm->domain_map, &iter);
2556 spin_unlock(&dlm->spinlock);
2557
Kurt Hackel466d1a42006-05-01 11:11:13 -07002558stage2:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002559 memset(&fr, 0, sizeof(fr));
2560 fr.node_idx = dlm->node_num;
2561 fr.dead_node = dlm->reco.dead_node;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002562 if (stage == 2)
2563 fr.flags |= DLM_FINALIZE_STAGE2;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002564
2565 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2566 if (nodenum == dlm->node_num)
2567 continue;
2568 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2569 &fr, sizeof(fr), nodenum, &status);
Kurt Hackel466d1a42006-05-01 11:11:13 -07002570 if (ret >= 0)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002571 ret = status;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002572 if (ret < 0) {
2573 mlog_errno(ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002574 if (dlm_is_host_down(ret)) {
2575 /* this has no effect on this recovery
2576 * session, so set the status to zero to
2577 * finish out the last recovery */
2578 mlog(ML_ERROR, "node %u went down after this "
2579 "node finished recovery.\n", nodenum);
2580 ret = 0;
Kurt Hackelc27069e2006-05-01 13:51:49 -07002581 continue;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002582 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002583 break;
2584 }
2585 }
Kurt Hackel466d1a42006-05-01 11:11:13 -07002586 if (stage == 1) {
2587 /* reset the node_iter back to the top and send finalize2 */
2588 iter.curnode = -1;
2589 stage = 2;
2590 goto stage2;
2591 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002592
2593 return ret;
2594}
2595
2596int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2597{
2598 struct dlm_ctxt *dlm = data;
2599 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002600 int stage = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002601
2602 /* ok to return 0, domain has gone away */
2603 if (!dlm_grab(dlm))
2604 return 0;
2605
Kurt Hackel466d1a42006-05-01 11:11:13 -07002606 if (fr->flags & DLM_FINALIZE_STAGE2)
2607 stage = 2;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002608
Kurt Hackel466d1a42006-05-01 11:11:13 -07002609 mlog(0, "%s: node %u finalizing recovery stage%d of "
2610 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2611 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2612
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002613 spin_lock(&dlm->spinlock);
2614
2615 if (dlm->reco.new_master != fr->node_idx) {
2616 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2617 "%u is supposed to be the new master, dead=%u\n",
2618 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2619 BUG();
2620 }
2621 if (dlm->reco.dead_node != fr->dead_node) {
2622 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2623 "node %u, but node %u is supposed to be dead\n",
2624 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2625 BUG();
2626 }
2627
Kurt Hackel466d1a42006-05-01 11:11:13 -07002628 switch (stage) {
2629 case 1:
2630 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2631 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2632 mlog(ML_ERROR, "%s: received finalize1 from "
2633 "new master %u for dead node %u, but "
2634 "this node has already received it!\n",
2635 dlm->name, fr->node_idx, fr->dead_node);
2636 dlm_print_reco_node_status(dlm);
2637 BUG();
2638 }
2639 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2640 spin_unlock(&dlm->spinlock);
2641 break;
2642 case 2:
2643 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2644 mlog(ML_ERROR, "%s: received finalize2 from "
2645 "new master %u for dead node %u, but "
2646 "this node did not have finalize1!\n",
2647 dlm->name, fr->node_idx, fr->dead_node);
2648 dlm_print_reco_node_status(dlm);
2649 BUG();
2650 }
2651 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2652 spin_unlock(&dlm->spinlock);
2653 dlm_reset_recovery(dlm);
2654 dlm_kick_recovery_thread(dlm);
2655 break;
2656 default:
2657 BUG();
2658 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002659
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002660 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2661 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2662
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002663 dlm_put(dlm);
2664 return 0;
2665}