blob: c699a450b0f21a1f3ac244c605969b5c148bf088 [file] [log] [blame]
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmrecovery.c
5 *
6 * recovery stuff
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/timer.h>
41#include <linux/kthread.h>
Adrian Bunkb4c7f532006-01-14 20:55:10 +010042#include <linux/delay.h>
Kurt Hackel6714d8e2005-12-15 14:31:23 -080043
44
45#include "cluster/heartbeat.h"
46#include "cluster/nodemanager.h"
47#include "cluster/tcp.h"
48
49#include "dlmapi.h"
50#include "dlmcommon.h"
51#include "dlmdomain.h"
52
53#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54#include "cluster/masklog.h"
55
56static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
57
58static int dlm_recovery_thread(void *data);
59void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackelc03872f2006-03-06 14:08:49 -080061void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080062static int dlm_do_recovery(struct dlm_ctxt *dlm);
63
64static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
70
71static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
75 u8 flags, u8 master);
76static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
78 u8 send_to,
79 struct dlm_lock_resource *res,
80 int total_locks);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080081static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 struct dlm_migratable_lockres *mres);
Kurt Hackel6714d8e2005-12-15 14:31:23 -080084static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 u8 dead_node, u8 send_to);
87static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 struct list_head *list, u8 dead_node);
90static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 u8 dead_node, u8 new_master);
92static void dlm_reco_ast(void *astdata);
93static void dlm_reco_bast(void *astdata, int blocked_type);
94static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95static void dlm_request_all_locks_worker(struct dlm_work_item *item,
96 void *data);
97static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98
99static u64 dlm_get_next_mig_cookie(void);
100
101static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
102static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
103static u64 dlm_mig_cookie = 1;
104
105static u64 dlm_get_next_mig_cookie(void)
106{
107 u64 c;
108 spin_lock(&dlm_mig_cookie_lock);
109 c = dlm_mig_cookie;
110 if (dlm_mig_cookie == (~0ULL))
111 dlm_mig_cookie = 1;
112 else
113 dlm_mig_cookie++;
114 spin_unlock(&dlm_mig_cookie_lock);
115 return c;
116}
117
Kurt Hackelab27eb62006-04-27 18:03:49 -0700118static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
119 u8 dead_node)
120{
121 assert_spin_locked(&dlm->spinlock);
122 if (dlm->reco.dead_node != dead_node)
123 mlog(0, "%s: changing dead_node from %u to %u\n",
124 dlm->name, dlm->reco.dead_node, dead_node);
125 dlm->reco.dead_node = dead_node;
126}
127
128static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
129 u8 master)
130{
131 assert_spin_locked(&dlm->spinlock);
132 mlog(0, "%s: changing new_master from %u to %u\n",
133 dlm->name, dlm->reco.new_master, master);
134 dlm->reco.new_master = master;
135}
136
Kurt Hackel466d1a42006-05-01 11:11:13 -0700137static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800138{
Kurt Hackel466d1a42006-05-01 11:11:13 -0700139 assert_spin_locked(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800140 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
Kurt Hackelab27eb62006-04-27 18:03:49 -0700141 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
142 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel466d1a42006-05-01 11:11:13 -0700143}
144
145static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
146{
147 spin_lock(&dlm->spinlock);
148 __dlm_reset_recovery(dlm);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800149 spin_unlock(&dlm->spinlock);
150}
151
152/* Worker function used during recovery. */
153void dlm_dispatch_work(void *data)
154{
155 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
156 LIST_HEAD(tmp_list);
157 struct list_head *iter, *iter2;
158 struct dlm_work_item *item;
159 dlm_workfunc_t *workfunc;
160
161 spin_lock(&dlm->work_lock);
162 list_splice_init(&dlm->work_list, &tmp_list);
163 spin_unlock(&dlm->work_lock);
164
165 list_for_each_safe(iter, iter2, &tmp_list) {
166 item = list_entry(iter, struct dlm_work_item, list);
167 workfunc = item->func;
168 list_del_init(&item->list);
169
170 /* already have ref on dlm to avoid having
171 * it disappear. just double-check. */
172 BUG_ON(item->dlm != dlm);
173
174 /* this is allowed to sleep and
175 * call network stuff */
176 workfunc(item, item->data);
177
178 dlm_put(dlm);
179 kfree(item);
180 }
181}
182
183/*
184 * RECOVERY THREAD
185 */
186
Kurt Hackelc03872f2006-03-06 14:08:49 -0800187void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800188{
189 /* wake the recovery thread
190 * this will wake the reco thread in one of three places
191 * 1) sleeping with no recovery happening
192 * 2) sleeping with recovery mastered elsewhere
193 * 3) recovery mastered here, waiting on reco data */
194
195 wake_up(&dlm->dlm_reco_thread_wq);
196}
197
198/* Launch the recovery thread */
199int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
200{
201 mlog(0, "starting dlm recovery thread...\n");
202
203 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
204 "dlm_reco_thread");
205 if (IS_ERR(dlm->dlm_reco_thread_task)) {
206 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
207 dlm->dlm_reco_thread_task = NULL;
208 return -EINVAL;
209 }
210
211 return 0;
212}
213
214void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
215{
216 if (dlm->dlm_reco_thread_task) {
217 mlog(0, "waiting for dlm recovery thread to exit\n");
218 kthread_stop(dlm->dlm_reco_thread_task);
219 dlm->dlm_reco_thread_task = NULL;
220 }
221}
222
223
224
225/*
226 * this is lame, but here's how recovery works...
227 * 1) all recovery threads cluster wide will work on recovering
228 * ONE node at a time
229 * 2) negotiate who will take over all the locks for the dead node.
230 * thats right... ALL the locks.
231 * 3) once a new master is chosen, everyone scans all locks
232 * and moves aside those mastered by the dead guy
233 * 4) each of these locks should be locked until recovery is done
234 * 5) the new master collects up all of secondary lock queue info
235 * one lock at a time, forcing each node to communicate back
236 * before continuing
237 * 6) each secondary lock queue responds with the full known lock info
238 * 7) once the new master has run all its locks, it sends a ALLDONE!
239 * message to everyone
240 * 8) upon receiving this message, the secondary queue node unlocks
241 * and responds to the ALLDONE
242 * 9) once the new master gets responses from everyone, he unlocks
243 * everything and recovery for this dead node is done
244 *10) go back to 2) while there are still dead nodes
245 *
246 */
247
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700248static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
249{
250 struct dlm_reco_node_data *ndata;
251 struct dlm_lock_resource *res;
252
253 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
254 dlm->name, dlm->dlm_reco_thread_task->pid,
255 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
256 dlm->reco.dead_node, dlm->reco.new_master);
257
258 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
259 char *st = "unknown";
260 switch (ndata->state) {
261 case DLM_RECO_NODE_DATA_INIT:
262 st = "init";
263 break;
264 case DLM_RECO_NODE_DATA_REQUESTING:
265 st = "requesting";
266 break;
267 case DLM_RECO_NODE_DATA_DEAD:
268 st = "dead";
269 break;
270 case DLM_RECO_NODE_DATA_RECEIVING:
271 st = "receiving";
272 break;
273 case DLM_RECO_NODE_DATA_REQUESTED:
274 st = "requested";
275 break;
276 case DLM_RECO_NODE_DATA_DONE:
277 st = "done";
278 break;
279 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
280 st = "finalize-sent";
281 break;
282 default:
283 st = "bad";
284 break;
285 }
286 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
287 dlm->name, ndata->node_num, st);
288 }
289 list_for_each_entry(res, &dlm->reco.resources, recovering) {
290 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
291 dlm->name, res->lockname.len, res->lockname.name);
292 }
293}
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800294
295#define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
296
297static int dlm_recovery_thread(void *data)
298{
299 int status;
300 struct dlm_ctxt *dlm = data;
301 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
302
303 mlog(0, "dlm thread running for %s...\n", dlm->name);
304
305 while (!kthread_should_stop()) {
306 if (dlm_joined(dlm)) {
307 status = dlm_do_recovery(dlm);
308 if (status == -EAGAIN) {
309 /* do not sleep, recheck immediately. */
310 continue;
311 }
312 if (status < 0)
313 mlog_errno(status);
314 }
315
316 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
317 kthread_should_stop(),
318 timeout);
319 }
320
321 mlog(0, "quitting DLM recovery thread\n");
322 return 0;
323}
324
Kurt Hackele2faea42006-01-12 14:24:55 -0800325/* returns true when the recovery master has contacted us */
326static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
327{
328 int ready;
329 spin_lock(&dlm->spinlock);
330 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
331 spin_unlock(&dlm->spinlock);
332 return ready;
333}
334
335/* returns true if node is no longer in the domain
336 * could be dead or just not joined */
337int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
338{
339 int dead;
340 spin_lock(&dlm->spinlock);
Kurt Hackelaba9aac2006-04-27 18:00:21 -0700341 dead = !test_bit(node, dlm->domain_map);
Kurt Hackele2faea42006-01-12 14:24:55 -0800342 spin_unlock(&dlm->spinlock);
343 return dead;
344}
345
Kurt Hackel44465a72006-01-18 17:05:38 -0800346int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
347{
348 if (timeout) {
349 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
350 "death of node %u\n", dlm->name, timeout, node);
351 wait_event_timeout(dlm->dlm_reco_thread_wq,
352 dlm_is_node_dead(dlm, node),
353 msecs_to_jiffies(timeout));
354 } else {
355 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
356 "of death of node %u\n", dlm->name, node);
357 wait_event(dlm->dlm_reco_thread_wq,
358 dlm_is_node_dead(dlm, node));
359 }
360 /* for now, return 0 */
361 return 0;
362}
363
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800364/* callers of the top-level api calls (dlmlock/dlmunlock) should
365 * block on the dlm->reco.event when recovery is in progress.
366 * the dlm recovery thread will set this state when it begins
367 * recovering a dead node (as the new master or not) and clear
368 * the state and wake as soon as all affected lock resources have
369 * been marked with the RECOVERY flag */
370static int dlm_in_recovery(struct dlm_ctxt *dlm)
371{
372 int in_recovery;
373 spin_lock(&dlm->spinlock);
374 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
375 spin_unlock(&dlm->spinlock);
376 return in_recovery;
377}
378
379
380void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
381{
382 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
383}
384
385static void dlm_begin_recovery(struct dlm_ctxt *dlm)
386{
387 spin_lock(&dlm->spinlock);
388 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
389 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
390 spin_unlock(&dlm->spinlock);
391}
392
393static void dlm_end_recovery(struct dlm_ctxt *dlm)
394{
395 spin_lock(&dlm->spinlock);
396 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
397 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
398 spin_unlock(&dlm->spinlock);
399 wake_up(&dlm->reco.event);
400}
401
402static int dlm_do_recovery(struct dlm_ctxt *dlm)
403{
404 int status = 0;
Kurt Hackele2faea42006-01-12 14:24:55 -0800405 int ret;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800406
407 spin_lock(&dlm->spinlock);
408
409 /* check to see if the new master has died */
410 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
411 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
412 mlog(0, "new master %u died while recovering %u!\n",
413 dlm->reco.new_master, dlm->reco.dead_node);
414 /* unset the new_master, leave dead_node */
Kurt Hackelab27eb62006-04-27 18:03:49 -0700415 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800416 }
417
418 /* select a target to recover */
419 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
420 int bit;
421
422 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
423 if (bit >= O2NM_MAX_NODES || bit < 0)
Kurt Hackelab27eb62006-04-27 18:03:49 -0700424 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800425 else
Kurt Hackelab27eb62006-04-27 18:03:49 -0700426 dlm_set_reco_dead_node(dlm, bit);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800427 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
428 /* BUG? */
429 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
430 dlm->reco.dead_node);
Kurt Hackelab27eb62006-04-27 18:03:49 -0700431 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800432 }
433
434 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
435 // mlog(0, "nothing to recover! sleeping now!\n");
436 spin_unlock(&dlm->spinlock);
437 /* return to main thread loop and sleep. */
438 return 0;
439 }
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700440 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
441 dlm->name, dlm->dlm_reco_thread_task->pid,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800442 dlm->reco.dead_node);
443 spin_unlock(&dlm->spinlock);
444
445 /* take write barrier */
446 /* (stops the list reshuffling thread, proxy ast handling) */
447 dlm_begin_recovery(dlm);
448
449 if (dlm->reco.new_master == dlm->node_num)
450 goto master_here;
451
452 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -0800453 /* choose a new master, returns 0 if this node
454 * is the master, -EEXIST if it's another node.
455 * this does not return until a new master is chosen
456 * or recovery completes entirely. */
457 ret = dlm_pick_recovery_master(dlm);
458 if (!ret) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800459 /* already notified everyone. go. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800460 goto master_here;
461 }
462 mlog(0, "another node will master this recovery session.\n");
463 }
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700464 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
465 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800466 dlm->node_num, dlm->reco.dead_node);
467
468 /* it is safe to start everything back up here
469 * because all of the dead node's lock resources
470 * have been marked as in-recovery */
471 dlm_end_recovery(dlm);
472
473 /* sleep out in main dlm_recovery_thread loop. */
474 return 0;
475
476master_here:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700477 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
478 dlm->dlm_reco_thread_task->pid,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800479 dlm->name, dlm->reco.dead_node, dlm->node_num);
480
481 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
482 if (status < 0) {
Kurt Hackel6a413212006-05-01 13:49:20 -0700483 /* we should never hit this anymore */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800484 mlog(ML_ERROR, "error %d remastering locks for node %u, "
485 "retrying.\n", status, dlm->reco.dead_node);
Kurt Hackele2faea42006-01-12 14:24:55 -0800486 /* yield a bit to allow any final network messages
487 * to get handled on remaining nodes */
488 msleep(100);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800489 } else {
490 /* success! see if any other nodes need recovery */
Kurt Hackele2faea42006-01-12 14:24:55 -0800491 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
492 dlm->name, dlm->reco.dead_node, dlm->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800493 dlm_reset_recovery(dlm);
494 }
495 dlm_end_recovery(dlm);
496
497 /* continue and look for another dead node */
498 return -EAGAIN;
499}
500
501static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
502{
503 int status = 0;
504 struct dlm_reco_node_data *ndata;
505 struct list_head *iter;
506 int all_nodes_done;
507 int destroy = 0;
508 int pass = 0;
509
Kurt Hackel6a413212006-05-01 13:49:20 -0700510 do {
511 /* we have become recovery master. there is no escaping
512 * this, so just keep trying until we get it. */
513 status = dlm_init_recovery_area(dlm, dead_node);
514 if (status < 0) {
515 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
516 "retrying\n", dlm->name);
517 msleep(1000);
518 }
519 } while (status != 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800520
521 /* safe to access the node data list without a lock, since this
522 * process is the only one to change the list */
523 list_for_each(iter, &dlm->reco.node_data) {
524 ndata = list_entry (iter, struct dlm_reco_node_data, list);
525 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
526 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
527
528 mlog(0, "requesting lock info from node %u\n",
529 ndata->node_num);
530
531 if (ndata->node_num == dlm->node_num) {
532 ndata->state = DLM_RECO_NODE_DATA_DONE;
533 continue;
534 }
535
Kurt Hackel6a413212006-05-01 13:49:20 -0700536 do {
537 status = dlm_request_all_locks(dlm, ndata->node_num,
538 dead_node);
539 if (status < 0) {
540 mlog_errno(status);
541 if (dlm_is_host_down(status)) {
542 /* node died, ignore it for recovery */
543 status = 0;
544 ndata->state = DLM_RECO_NODE_DATA_DEAD;
545 /* wait for the domain map to catch up
546 * with the network state. */
547 wait_event_timeout(dlm->dlm_reco_thread_wq,
548 dlm_is_node_dead(dlm,
549 ndata->node_num),
550 msecs_to_jiffies(1000));
551 mlog(0, "waited 1 sec for %u, "
552 "dead? %s\n", ndata->node_num,
553 dlm_is_node_dead(dlm, ndata->node_num) ?
554 "yes" : "no");
555 } else {
556 /* -ENOMEM on the other node */
557 mlog(0, "%s: node %u returned "
558 "%d during recovery, retrying "
559 "after a short wait\n",
560 dlm->name, ndata->node_num,
561 status);
562 msleep(100);
563 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800564 }
Kurt Hackel6a413212006-05-01 13:49:20 -0700565 } while (status != 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800566
567 switch (ndata->state) {
568 case DLM_RECO_NODE_DATA_INIT:
569 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
570 case DLM_RECO_NODE_DATA_REQUESTED:
571 BUG();
572 break;
573 case DLM_RECO_NODE_DATA_DEAD:
574 mlog(0, "node %u died after requesting "
575 "recovery info for node %u\n",
576 ndata->node_num, dead_node);
Kurt Hackel6a413212006-05-01 13:49:20 -0700577 /* fine. don't need this node's info.
578 * continue without it. */
579 break;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800580 case DLM_RECO_NODE_DATA_REQUESTING:
581 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
582 mlog(0, "now receiving recovery data from "
583 "node %u for dead node %u\n",
584 ndata->node_num, dead_node);
585 break;
586 case DLM_RECO_NODE_DATA_RECEIVING:
587 mlog(0, "already receiving recovery data from "
588 "node %u for dead node %u\n",
589 ndata->node_num, dead_node);
590 break;
591 case DLM_RECO_NODE_DATA_DONE:
592 mlog(0, "already DONE receiving recovery data "
593 "from node %u for dead node %u\n",
594 ndata->node_num, dead_node);
595 break;
596 }
597 }
598
599 mlog(0, "done requesting all lock info\n");
600
601 /* nodes should be sending reco data now
602 * just need to wait */
603
604 while (1) {
605 /* check all the nodes now to see if we are
606 * done, or if anyone died */
607 all_nodes_done = 1;
608 spin_lock(&dlm_reco_state_lock);
609 list_for_each(iter, &dlm->reco.node_data) {
610 ndata = list_entry (iter, struct dlm_reco_node_data, list);
611
612 mlog(0, "checking recovery state of node %u\n",
613 ndata->node_num);
614 switch (ndata->state) {
615 case DLM_RECO_NODE_DATA_INIT:
616 case DLM_RECO_NODE_DATA_REQUESTING:
617 mlog(ML_ERROR, "bad ndata state for "
618 "node %u: state=%d\n",
619 ndata->node_num, ndata->state);
620 BUG();
621 break;
622 case DLM_RECO_NODE_DATA_DEAD:
Kurt Hackel6a413212006-05-01 13:49:20 -0700623 mlog(0, "node %u died after "
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800624 "requesting recovery info for "
625 "node %u\n", ndata->node_num,
626 dead_node);
Kurt Hackel6a413212006-05-01 13:49:20 -0700627 break;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800628 case DLM_RECO_NODE_DATA_RECEIVING:
629 case DLM_RECO_NODE_DATA_REQUESTED:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700630 mlog(0, "%s: node %u still in state %s\n",
631 dlm->name, ndata->node_num,
632 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
633 "receiving" : "requested");
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800634 all_nodes_done = 0;
635 break;
636 case DLM_RECO_NODE_DATA_DONE:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700637 mlog(0, "%s: node %u state is done\n",
638 dlm->name, ndata->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800639 break;
640 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700641 mlog(0, "%s: node %u state is finalize\n",
642 dlm->name, ndata->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800643 break;
644 }
645 }
646 spin_unlock(&dlm_reco_state_lock);
647
648 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
649 all_nodes_done?"yes":"no");
650 if (all_nodes_done) {
651 int ret;
652
653 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
654 * just send a finalize message to everyone and
655 * clean up */
656 mlog(0, "all nodes are done! send finalize\n");
657 ret = dlm_send_finalize_reco_message(dlm);
658 if (ret < 0)
659 mlog_errno(ret);
660
661 spin_lock(&dlm->spinlock);
662 dlm_finish_local_lockres_recovery(dlm, dead_node,
663 dlm->node_num);
664 spin_unlock(&dlm->spinlock);
665 mlog(0, "should be done with recovery!\n");
666
667 mlog(0, "finishing recovery of %s at %lu, "
668 "dead=%u, this=%u, new=%u\n", dlm->name,
669 jiffies, dlm->reco.dead_node,
670 dlm->node_num, dlm->reco.new_master);
671 destroy = 1;
Kurt Hackel6a413212006-05-01 13:49:20 -0700672 status = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800673 /* rescan everything marked dirty along the way */
674 dlm_kick_thread(dlm, NULL);
675 break;
676 }
677 /* wait to be signalled, with periodic timeout
678 * to check for node death */
679 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
680 kthread_should_stop(),
681 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
682
683 }
684
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800685 if (destroy)
686 dlm_destroy_recovery_area(dlm, dead_node);
687
688 mlog_exit(status);
689 return status;
690}
691
692static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
693{
694 int num=0;
695 struct dlm_reco_node_data *ndata;
696
697 spin_lock(&dlm->spinlock);
698 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
699 /* nodes can only be removed (by dying) after dropping
700 * this lock, and death will be trapped later, so this should do */
701 spin_unlock(&dlm->spinlock);
702
703 while (1) {
704 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
705 if (num >= O2NM_MAX_NODES) {
706 break;
707 }
708 BUG_ON(num == dead_node);
709
710 ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
711 if (!ndata) {
712 dlm_destroy_recovery_area(dlm, dead_node);
713 return -ENOMEM;
714 }
715 ndata->node_num = num;
716 ndata->state = DLM_RECO_NODE_DATA_INIT;
717 spin_lock(&dlm_reco_state_lock);
718 list_add_tail(&ndata->list, &dlm->reco.node_data);
719 spin_unlock(&dlm_reco_state_lock);
720 num++;
721 }
722
723 return 0;
724}
725
726static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
727{
728 struct list_head *iter, *iter2;
729 struct dlm_reco_node_data *ndata;
730 LIST_HEAD(tmplist);
731
732 spin_lock(&dlm_reco_state_lock);
733 list_splice_init(&dlm->reco.node_data, &tmplist);
734 spin_unlock(&dlm_reco_state_lock);
735
736 list_for_each_safe(iter, iter2, &tmplist) {
737 ndata = list_entry (iter, struct dlm_reco_node_data, list);
738 list_del_init(&ndata->list);
739 kfree(ndata);
740 }
741}
742
743static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
744 u8 dead_node)
745{
746 struct dlm_lock_request lr;
747 enum dlm_status ret;
748
749 mlog(0, "\n");
750
751
752 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
753 "to %u\n", dead_node, request_from);
754
755 memset(&lr, 0, sizeof(lr));
756 lr.node_idx = dlm->node_num;
757 lr.dead_node = dead_node;
758
759 // send message
760 ret = DLM_NOLOCKMGR;
761 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
762 &lr, sizeof(lr), request_from, NULL);
763
764 /* negative status is handled by caller */
765 if (ret < 0)
766 mlog_errno(ret);
767
768 // return from here, then
769 // sleep until all received or error
770 return ret;
771
772}
773
774int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
775{
776 struct dlm_ctxt *dlm = data;
777 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
778 char *buf = NULL;
779 struct dlm_work_item *item = NULL;
780
781 if (!dlm_grab(dlm))
782 return -EINVAL;
783
Kurt Hackelc3187ce2006-04-27 18:05:41 -0700784 if (lr->dead_node != dlm->reco.dead_node) {
785 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
786 "dead_node is %u\n", dlm->name, lr->node_idx,
787 lr->dead_node, dlm->reco.dead_node);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700788 dlm_print_reco_node_status(dlm);
Kurt Hackelc3187ce2006-04-27 18:05:41 -0700789 /* this is a hack */
790 dlm_put(dlm);
791 return -ENOMEM;
792 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800793 BUG_ON(lr->dead_node != dlm->reco.dead_node);
794
795 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
796 if (!item) {
797 dlm_put(dlm);
798 return -ENOMEM;
799 }
800
801 /* this will get freed by dlm_request_all_locks_worker */
802 buf = (char *) __get_free_page(GFP_KERNEL);
803 if (!buf) {
804 kfree(item);
805 dlm_put(dlm);
806 return -ENOMEM;
807 }
808
809 /* queue up work for dlm_request_all_locks_worker */
810 dlm_grab(dlm); /* get an extra ref for the work item */
811 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
812 item->u.ral.reco_master = lr->node_idx;
813 item->u.ral.dead_node = lr->dead_node;
814 spin_lock(&dlm->work_lock);
815 list_add_tail(&item->list, &dlm->work_list);
816 spin_unlock(&dlm->work_lock);
817 schedule_work(&dlm->dispatched_work);
818
819 dlm_put(dlm);
820 return 0;
821}
822
823static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
824{
825 struct dlm_migratable_lockres *mres;
826 struct dlm_lock_resource *res;
827 struct dlm_ctxt *dlm;
828 LIST_HEAD(resources);
829 struct list_head *iter;
830 int ret;
831 u8 dead_node, reco_master;
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700832 int skip_all_done = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800833
834 dlm = item->dlm;
835 dead_node = item->u.ral.dead_node;
836 reco_master = item->u.ral.reco_master;
Kurt Hackele2faea42006-01-12 14:24:55 -0800837 mres = (struct dlm_migratable_lockres *)data;
838
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700839 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
840 dlm->name, dead_node, reco_master);
841
Kurt Hackele2faea42006-01-12 14:24:55 -0800842 if (dead_node != dlm->reco.dead_node ||
843 reco_master != dlm->reco.new_master) {
Kurt Hackel6a413212006-05-01 13:49:20 -0700844 /* worker could have been created before the recovery master
845 * died. if so, do not continue, but do not error. */
846 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
847 mlog(ML_NOTICE, "%s: will not send recovery state, "
848 "recovery master %u died, thread=(dead=%u,mas=%u)"
849 " current=(dead=%u,mas=%u)\n", dlm->name,
850 reco_master, dead_node, reco_master,
851 dlm->reco.dead_node, dlm->reco.new_master);
852 } else {
853 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
854 "master=%u), request(dead=%u, master=%u)\n",
855 dlm->name, dlm->reco.dead_node,
856 dlm->reco.new_master, dead_node, reco_master);
857 }
858 goto leave;
Kurt Hackele2faea42006-01-12 14:24:55 -0800859 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800860
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800861 /* lock resources should have already been moved to the
862 * dlm->reco.resources list. now move items from that list
863 * to a temp list if the dead owner matches. note that the
864 * whole cluster recovers only one node at a time, so we
865 * can safely move UNKNOWN lock resources for each recovery
866 * session. */
867 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
868
869 /* now we can begin blasting lockreses without the dlm lock */
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700870
871 /* any errors returned will be due to the new_master dying,
872 * the dlm_reco_thread should detect this */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800873 list_for_each(iter, &resources) {
874 res = list_entry (iter, struct dlm_lock_resource, recovering);
875 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
876 DLM_MRES_RECOVERY);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700877 if (ret < 0) {
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700878 mlog(ML_ERROR, "%s: node %u went down while sending "
879 "recovery state for dead node %u, ret=%d\n", dlm->name,
880 reco_master, dead_node, ret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700881 skip_all_done = 1;
882 break;
883 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800884 }
885
886 /* move the resources back to the list */
887 spin_lock(&dlm->spinlock);
888 list_splice_init(&resources, &dlm->reco.resources);
889 spin_unlock(&dlm->spinlock);
890
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700891 if (!skip_all_done) {
892 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
893 if (ret < 0) {
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700894 mlog(ML_ERROR, "%s: node %u went down while sending "
895 "recovery all-done for dead node %u, ret=%d\n",
896 dlm->name, reco_master, dead_node, ret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700897 }
898 }
Kurt Hackel6a413212006-05-01 13:49:20 -0700899leave:
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800900 free_page((unsigned long)data);
901}
902
903
904static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
905{
906 int ret, tmpret;
907 struct dlm_reco_data_done done_msg;
908
909 memset(&done_msg, 0, sizeof(done_msg));
910 done_msg.node_idx = dlm->node_num;
911 done_msg.dead_node = dead_node;
912 mlog(0, "sending DATA DONE message to %u, "
913 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
914 done_msg.dead_node);
915
916 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
917 sizeof(done_msg), send_to, &tmpret);
Kurt Hackel29c0fa02006-04-27 18:06:58 -0700918 if (ret < 0) {
919 if (!dlm_is_host_down(ret)) {
920 mlog_errno(ret);
921 mlog(ML_ERROR, "%s: unknown error sending data-done "
922 "to %u\n", dlm->name, send_to);
923 BUG();
924 }
925 } else
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800926 ret = tmpret;
927 return ret;
928}
929
930
931int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
932{
933 struct dlm_ctxt *dlm = data;
934 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
935 struct list_head *iter;
936 struct dlm_reco_node_data *ndata = NULL;
937 int ret = -EINVAL;
938
939 if (!dlm_grab(dlm))
940 return -EINVAL;
941
942 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
943 "node_idx=%u, this node=%u\n", done->dead_node,
944 dlm->reco.dead_node, done->node_idx, dlm->node_num);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -0700945
946 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
947 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
948 "node_idx=%u, this node=%u\n", done->dead_node,
949 dlm->reco.dead_node, done->node_idx, dlm->node_num);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800950
951 spin_lock(&dlm_reco_state_lock);
952 list_for_each(iter, &dlm->reco.node_data) {
953 ndata = list_entry (iter, struct dlm_reco_node_data, list);
954 if (ndata->node_num != done->node_idx)
955 continue;
956
957 switch (ndata->state) {
Kurt Hackele2faea42006-01-12 14:24:55 -0800958 /* should have moved beyond INIT but not to FINALIZE yet */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800959 case DLM_RECO_NODE_DATA_INIT:
960 case DLM_RECO_NODE_DATA_DEAD:
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800961 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
962 mlog(ML_ERROR, "bad ndata state for node %u:"
963 " state=%d\n", ndata->node_num,
964 ndata->state);
965 BUG();
966 break;
Kurt Hackele2faea42006-01-12 14:24:55 -0800967 /* these states are possible at this point, anywhere along
968 * the line of recovery */
969 case DLM_RECO_NODE_DATA_DONE:
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800970 case DLM_RECO_NODE_DATA_RECEIVING:
971 case DLM_RECO_NODE_DATA_REQUESTED:
972 case DLM_RECO_NODE_DATA_REQUESTING:
973 mlog(0, "node %u is DONE sending "
974 "recovery data!\n",
975 ndata->node_num);
976
977 ndata->state = DLM_RECO_NODE_DATA_DONE;
978 ret = 0;
979 break;
980 }
981 }
982 spin_unlock(&dlm_reco_state_lock);
983
984 /* wake the recovery thread, some node is done */
985 if (!ret)
986 dlm_kick_recovery_thread(dlm);
987
988 if (ret < 0)
989 mlog(ML_ERROR, "failed to find recovery node data for node "
990 "%u\n", done->node_idx);
991 dlm_put(dlm);
992
993 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
994 return ret;
995}
996
997static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
998 struct list_head *list,
999 u8 dead_node)
1000{
1001 struct dlm_lock_resource *res;
1002 struct list_head *iter, *iter2;
Kurt Hackele2faea42006-01-12 14:24:55 -08001003 struct dlm_lock *lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001004
1005 spin_lock(&dlm->spinlock);
1006 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1007 res = list_entry (iter, struct dlm_lock_resource, recovering);
Kurt Hackele2faea42006-01-12 14:24:55 -08001008 /* always prune any $RECOVERY entries for dead nodes,
1009 * otherwise hangs can occur during later recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001010 if (dlm_is_recovery_lock(res->lockname.name,
Kurt Hackele2faea42006-01-12 14:24:55 -08001011 res->lockname.len)) {
1012 spin_lock(&res->spinlock);
1013 list_for_each_entry(lock, &res->granted, list) {
1014 if (lock->ml.node == dead_node) {
1015 mlog(0, "AHA! there was "
1016 "a $RECOVERY lock for dead "
1017 "node %u (%s)!\n",
1018 dead_node, dlm->name);
1019 list_del_init(&lock->list);
1020 dlm_lock_put(lock);
1021 break;
1022 }
1023 }
1024 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001025 continue;
Kurt Hackele2faea42006-01-12 14:24:55 -08001026 }
1027
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001028 if (res->owner == dead_node) {
1029 mlog(0, "found lockres owned by dead node while "
1030 "doing recovery for node %u. sending it.\n",
1031 dead_node);
Akinobu Mitaf1166292006-06-26 00:24:46 -07001032 list_move_tail(&res->recovering, list);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001033 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1034 mlog(0, "found UNKNOWN owner while doing recovery "
1035 "for node %u. sending it.\n", dead_node);
Akinobu Mitaf1166292006-06-26 00:24:46 -07001036 list_move_tail(&res->recovering, list);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001037 }
1038 }
1039 spin_unlock(&dlm->spinlock);
1040}
1041
1042static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1043{
1044 int total_locks = 0;
1045 struct list_head *iter, *queue = &res->granted;
1046 int i;
1047
1048 for (i=0; i<3; i++) {
1049 list_for_each(iter, queue)
1050 total_locks++;
1051 queue++;
1052 }
1053 return total_locks;
1054}
1055
1056
1057static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1058 struct dlm_migratable_lockres *mres,
1059 u8 send_to,
1060 struct dlm_lock_resource *res,
1061 int total_locks)
1062{
1063 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1064 int mres_total_locks = be32_to_cpu(mres->total_locks);
1065 int sz, ret = 0, status = 0;
1066 u8 orig_flags = mres->flags,
1067 orig_master = mres->master;
1068
1069 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1070 if (!mres->num_locks)
1071 return 0;
1072
1073 sz = sizeof(struct dlm_migratable_lockres) +
1074 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1075
1076 /* add an all-done flag if we reached the last lock */
1077 orig_flags = mres->flags;
1078 BUG_ON(total_locks > mres_total_locks);
1079 if (total_locks == mres_total_locks)
1080 mres->flags |= DLM_MRES_ALL_DONE;
1081
1082 /* send it */
1083 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1084 sz, send_to, &status);
1085 if (ret < 0) {
1086 /* XXX: negative status is not handled.
1087 * this will end up killing this node. */
1088 mlog_errno(ret);
1089 } else {
1090 /* might get an -ENOMEM back here */
1091 ret = status;
1092 if (ret < 0) {
1093 mlog_errno(ret);
1094
1095 if (ret == -EFAULT) {
1096 mlog(ML_ERROR, "node %u told me to kill "
1097 "myself!\n", send_to);
1098 BUG();
1099 }
1100 }
1101 }
1102
1103 /* zero and reinit the message buffer */
1104 dlm_init_migratable_lockres(mres, res->lockname.name,
1105 res->lockname.len, mres_total_locks,
1106 mig_cookie, orig_flags, orig_master);
1107 return ret;
1108}
1109
1110static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1111 const char *lockname, int namelen,
1112 int total_locks, u64 cookie,
1113 u8 flags, u8 master)
1114{
1115 /* mres here is one full page */
1116 memset(mres, 0, PAGE_SIZE);
1117 mres->lockname_len = namelen;
1118 memcpy(mres->lockname, lockname, namelen);
1119 mres->num_locks = 0;
1120 mres->total_locks = cpu_to_be32(total_locks);
1121 mres->mig_cookie = cpu_to_be64(cookie);
1122 mres->flags = flags;
1123 mres->master = master;
1124}
1125
1126
1127/* returns 1 if this lock fills the network structure,
1128 * 0 otherwise */
1129static int dlm_add_lock_to_array(struct dlm_lock *lock,
1130 struct dlm_migratable_lockres *mres, int queue)
1131{
1132 struct dlm_migratable_lock *ml;
1133 int lock_num = mres->num_locks;
1134
1135 ml = &(mres->ml[lock_num]);
1136 ml->cookie = lock->ml.cookie;
1137 ml->type = lock->ml.type;
1138 ml->convert_type = lock->ml.convert_type;
1139 ml->highest_blocked = lock->ml.highest_blocked;
1140 ml->list = queue;
1141 if (lock->lksb) {
1142 ml->flags = lock->lksb->flags;
1143 /* send our current lvb */
1144 if (ml->type == LKM_EXMODE ||
1145 ml->type == LKM_PRMODE) {
1146 /* if it is already set, this had better be a PR
1147 * and it has to match */
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001148 if (!dlm_lvb_is_empty(mres->lvb) &&
1149 (ml->type == LKM_EXMODE ||
1150 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001151 mlog(ML_ERROR, "mismatched lvbs!\n");
1152 __dlm_print_one_lock_resource(lock->lockres);
1153 BUG();
1154 }
1155 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1156 }
1157 }
1158 ml->node = lock->ml.node;
1159 mres->num_locks++;
1160 /* we reached the max, send this network message */
1161 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1162 return 1;
1163 return 0;
1164}
1165
1166
1167int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1168 struct dlm_migratable_lockres *mres,
1169 u8 send_to, u8 flags)
1170{
1171 struct list_head *queue, *iter;
1172 int total_locks, i;
1173 u64 mig_cookie = 0;
1174 struct dlm_lock *lock;
1175 int ret = 0;
1176
1177 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1178
1179 mlog(0, "sending to %u\n", send_to);
1180
1181 total_locks = dlm_num_locks_in_lockres(res);
1182 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1183 /* rare, but possible */
1184 mlog(0, "argh. lockres has %d locks. this will "
1185 "require more than one network packet to "
1186 "migrate\n", total_locks);
1187 mig_cookie = dlm_get_next_mig_cookie();
1188 }
1189
1190 dlm_init_migratable_lockres(mres, res->lockname.name,
1191 res->lockname.len, total_locks,
1192 mig_cookie, flags, res->owner);
1193
1194 total_locks = 0;
1195 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1196 queue = dlm_list_idx_to_ptr(res, i);
1197 list_for_each(iter, queue) {
1198 lock = list_entry (iter, struct dlm_lock, list);
1199
1200 /* add another lock. */
1201 total_locks++;
1202 if (!dlm_add_lock_to_array(lock, mres, i))
1203 continue;
1204
1205 /* this filled the lock message,
1206 * we must send it immediately. */
1207 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1208 res, total_locks);
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001209 if (ret < 0)
1210 goto error;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001211 }
1212 }
1213 /* flush any remaining locks */
1214 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001215 if (ret < 0)
1216 goto error;
1217 return ret;
1218
1219error:
1220 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1221 dlm->name, ret);
1222 if (!dlm_is_host_down(ret))
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001223 BUG();
Kurt Hackel29c0fa02006-04-27 18:06:58 -07001224 mlog(0, "%s: node %u went down while sending %s "
1225 "lockres %.*s\n", dlm->name, send_to,
1226 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1227 res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001228 return ret;
1229}
1230
1231
1232
1233/*
1234 * this message will contain no more than one page worth of
1235 * recovery data, and it will work on only one lockres.
1236 * there may be many locks in this page, and we may need to wait
1237 * for additional packets to complete all the locks (rare, but
1238 * possible).
1239 */
1240/*
1241 * NOTE: the allocation error cases here are scary
1242 * we really cannot afford to fail an alloc in recovery
1243 * do we spin? returning an error only delays the problem really
1244 */
1245
1246int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1247{
1248 struct dlm_ctxt *dlm = data;
1249 struct dlm_migratable_lockres *mres =
1250 (struct dlm_migratable_lockres *)msg->buf;
1251 int ret = 0;
1252 u8 real_master;
1253 char *buf = NULL;
1254 struct dlm_work_item *item = NULL;
1255 struct dlm_lock_resource *res = NULL;
1256
1257 if (!dlm_grab(dlm))
1258 return -EINVAL;
1259
1260 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1261
1262 real_master = mres->master;
1263 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1264 /* cannot migrate a lockres with no master */
1265 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1266 }
1267
1268 mlog(0, "%s message received from node %u\n",
1269 (mres->flags & DLM_MRES_RECOVERY) ?
1270 "recovery" : "migration", mres->master);
1271 if (mres->flags & DLM_MRES_ALL_DONE)
1272 mlog(0, "all done flag. all lockres data received!\n");
1273
1274 ret = -ENOMEM;
1275 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
1276 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1277 if (!buf || !item)
1278 goto leave;
1279
1280 /* lookup the lock to see if we have a secondary queue for this
1281 * already... just add the locks in and this will have its owner
1282 * and RECOVERY flag changed when it completes. */
1283 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1284 if (res) {
1285 /* this will get a ref on res */
1286 /* mark it as recovering/migrating and hash it */
1287 spin_lock(&res->spinlock);
1288 if (mres->flags & DLM_MRES_RECOVERY) {
1289 res->state |= DLM_LOCK_RES_RECOVERING;
1290 } else {
1291 if (res->state & DLM_LOCK_RES_MIGRATING) {
1292 /* this is at least the second
1293 * lockres message */
1294 mlog(0, "lock %.*s is already migrating\n",
1295 mres->lockname_len,
1296 mres->lockname);
1297 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1298 /* caller should BUG */
1299 mlog(ML_ERROR, "node is attempting to migrate "
1300 "lock %.*s, but marked as recovering!\n",
1301 mres->lockname_len, mres->lockname);
1302 ret = -EFAULT;
1303 spin_unlock(&res->spinlock);
1304 goto leave;
1305 }
1306 res->state |= DLM_LOCK_RES_MIGRATING;
1307 }
1308 spin_unlock(&res->spinlock);
1309 } else {
1310 /* need to allocate, just like if it was
1311 * mastered here normally */
1312 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1313 if (!res)
1314 goto leave;
1315
1316 /* to match the ref that we would have gotten if
1317 * dlm_lookup_lockres had succeeded */
1318 dlm_lockres_get(res);
1319
1320 /* mark it as recovering/migrating and hash it */
1321 if (mres->flags & DLM_MRES_RECOVERY)
1322 res->state |= DLM_LOCK_RES_RECOVERING;
1323 else
1324 res->state |= DLM_LOCK_RES_MIGRATING;
1325
1326 spin_lock(&dlm->spinlock);
1327 __dlm_insert_lockres(dlm, res);
1328 spin_unlock(&dlm->spinlock);
1329
1330 /* now that the new lockres is inserted,
1331 * make it usable by other processes */
1332 spin_lock(&res->spinlock);
1333 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1334 spin_unlock(&res->spinlock);
1335
1336 /* add an extra ref for just-allocated lockres
1337 * otherwise the lockres will be purged immediately */
1338 dlm_lockres_get(res);
1339
1340 }
1341
1342 /* at this point we have allocated everything we need,
1343 * and we have a hashed lockres with an extra ref and
1344 * the proper res->state flags. */
1345 ret = 0;
1346 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1347 /* migration cannot have an unknown master */
1348 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1349 mlog(0, "recovery has passed me a lockres with an "
1350 "unknown owner.. will need to requery: "
1351 "%.*s\n", mres->lockname_len, mres->lockname);
1352 } else {
1353 spin_lock(&res->spinlock);
1354 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1355 spin_unlock(&res->spinlock);
1356 }
1357
1358 /* queue up work for dlm_mig_lockres_worker */
1359 dlm_grab(dlm); /* get an extra ref for the work item */
1360 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1361 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1362 item->u.ml.lockres = res; /* already have a ref */
1363 item->u.ml.real_master = real_master;
1364 spin_lock(&dlm->work_lock);
1365 list_add_tail(&item->list, &dlm->work_list);
1366 spin_unlock(&dlm->work_lock);
1367 schedule_work(&dlm->dispatched_work);
1368
1369leave:
1370 dlm_put(dlm);
1371 if (ret < 0) {
1372 if (buf)
1373 kfree(buf);
1374 if (item)
1375 kfree(item);
1376 }
1377
1378 mlog_exit(ret);
1379 return ret;
1380}
1381
1382
1383static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1384{
1385 struct dlm_ctxt *dlm = data;
1386 struct dlm_migratable_lockres *mres;
1387 int ret = 0;
1388 struct dlm_lock_resource *res;
1389 u8 real_master;
1390
1391 dlm = item->dlm;
1392 mres = (struct dlm_migratable_lockres *)data;
1393
1394 res = item->u.ml.lockres;
1395 real_master = item->u.ml.real_master;
1396
1397 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1398 /* this case is super-rare. only occurs if
1399 * node death happens during migration. */
1400again:
1401 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1402 if (ret < 0) {
Kurt Hackele2faea42006-01-12 14:24:55 -08001403 mlog(0, "dlm_lockres_master_requery ret=%d\n",
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001404 ret);
1405 goto again;
1406 }
1407 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1408 mlog(0, "lockres %.*s not claimed. "
1409 "this node will take it.\n",
1410 res->lockname.len, res->lockname.name);
1411 } else {
1412 mlog(0, "master needs to respond to sender "
1413 "that node %u still owns %.*s\n",
1414 real_master, res->lockname.len,
1415 res->lockname.name);
1416 /* cannot touch this lockres */
1417 goto leave;
1418 }
1419 }
1420
1421 ret = dlm_process_recovery_data(dlm, res, mres);
1422 if (ret < 0)
1423 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1424 else
1425 mlog(0, "dlm_process_recovery_data succeeded\n");
1426
1427 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1428 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1429 ret = dlm_finish_migration(dlm, res, mres->master);
1430 if (ret < 0)
1431 mlog_errno(ret);
1432 }
1433
1434leave:
1435 kfree(data);
1436 mlog_exit(ret);
1437}
1438
1439
1440
Kurt Hackelc03872f2006-03-06 14:08:49 -08001441int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1442 struct dlm_lock_resource *res, u8 *real_master)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001443{
1444 struct dlm_node_iter iter;
1445 int nodenum;
1446 int ret = 0;
1447
1448 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1449
1450 /* we only reach here if one of the two nodes in a
1451 * migration died while the migration was in progress.
1452 * at this point we need to requery the master. we
1453 * know that the new_master got as far as creating
1454 * an mle on at least one node, but we do not know
1455 * if any nodes had actually cleared the mle and set
1456 * the master to the new_master. the old master
1457 * is supposed to set the owner to UNKNOWN in the
1458 * event of a new_master death, so the only possible
1459 * responses that we can get from nodes here are
1460 * that the master is new_master, or that the master
1461 * is UNKNOWN.
1462 * if all nodes come back with UNKNOWN then we know
1463 * the lock needs remastering here.
1464 * if any node comes back with a valid master, check
1465 * to see if that master is the one that we are
1466 * recovering. if so, then the new_master died and
1467 * we need to remaster this lock. if not, then the
1468 * new_master survived and that node will respond to
1469 * other nodes about the owner.
1470 * if there is an owner, this node needs to dump this
1471 * lockres and alert the sender that this lockres
1472 * was rejected. */
1473 spin_lock(&dlm->spinlock);
1474 dlm_node_iter_init(dlm->domain_map, &iter);
1475 spin_unlock(&dlm->spinlock);
1476
1477 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1478 /* do not send to self */
1479 if (nodenum == dlm->node_num)
1480 continue;
1481 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1482 if (ret < 0) {
1483 mlog_errno(ret);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001484 if (!dlm_is_host_down(ret))
1485 BUG();
1486 /* host is down, so answer for that node would be
1487 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001488 }
1489 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1490 mlog(0, "lock master is %u\n", *real_master);
1491 break;
1492 }
1493 }
1494 return ret;
1495}
1496
1497
Kurt Hackelc03872f2006-03-06 14:08:49 -08001498int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1499 u8 nodenum, u8 *real_master)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001500{
1501 int ret = -EINVAL;
1502 struct dlm_master_requery req;
1503 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1504
1505 memset(&req, 0, sizeof(req));
1506 req.node_idx = dlm->node_num;
1507 req.namelen = res->lockname.len;
1508 memcpy(req.name, res->lockname.name, res->lockname.len);
1509
1510 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1511 &req, sizeof(req), nodenum, &status);
1512 /* XXX: negative status not handled properly here. */
1513 if (ret < 0)
1514 mlog_errno(ret);
1515 else {
1516 BUG_ON(status < 0);
1517 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1518 *real_master = (u8) (status & 0xff);
1519 mlog(0, "node %u responded to master requery with %u\n",
1520 nodenum, *real_master);
1521 ret = 0;
1522 }
1523 return ret;
1524}
1525
1526
1527/* this function cannot error, so unless the sending
1528 * or receiving of the message failed, the owner can
1529 * be trusted */
1530int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1531{
1532 struct dlm_ctxt *dlm = data;
1533 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1534 struct dlm_lock_resource *res = NULL;
Mark Fasheha3d33292006-03-09 17:55:56 -08001535 unsigned int hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001536 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1537 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1538
1539 if (!dlm_grab(dlm)) {
1540 /* since the domain has gone away on this
1541 * node, the proper response is UNKNOWN */
1542 return master;
1543 }
1544
Mark Fasheha3d33292006-03-09 17:55:56 -08001545 hash = dlm_lockid_hash(req->name, req->namelen);
1546
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001547 spin_lock(&dlm->spinlock);
Mark Fasheha3d33292006-03-09 17:55:56 -08001548 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001549 if (res) {
1550 spin_lock(&res->spinlock);
1551 master = res->owner;
1552 if (master == dlm->node_num) {
1553 int ret = dlm_dispatch_assert_master(dlm, res,
1554 0, 0, flags);
1555 if (ret < 0) {
1556 mlog_errno(-ENOMEM);
1557 /* retry!? */
1558 BUG();
1559 }
1560 }
1561 spin_unlock(&res->spinlock);
1562 }
1563 spin_unlock(&dlm->spinlock);
1564
1565 dlm_put(dlm);
1566 return master;
1567}
1568
1569static inline struct list_head *
1570dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1571{
1572 struct list_head *ret;
1573 BUG_ON(list_num < 0);
1574 BUG_ON(list_num > 2);
1575 ret = &(res->granted);
1576 ret += list_num;
1577 return ret;
1578}
1579/* TODO: do ast flush business
1580 * TODO: do MIGRATING and RECOVERING spinning
1581 */
1582
1583/*
1584* NOTE about in-flight requests during migration:
1585*
1586* Before attempting the migrate, the master has marked the lockres as
1587* MIGRATING and then flushed all of its pending ASTS. So any in-flight
1588* requests either got queued before the MIGRATING flag got set, in which
1589* case the lock data will reflect the change and a return message is on
1590* the way, or the request failed to get in before MIGRATING got set. In
1591* this case, the caller will be told to spin and wait for the MIGRATING
1592* flag to be dropped, then recheck the master.
1593* This holds true for the convert, cancel and unlock cases, and since lvb
1594* updates are tied to these same messages, it applies to lvb updates as
1595* well. For the lock case, there is no way a lock can be on the master
1596* queue and not be on the secondary queue since the lock is always added
1597* locally first. This means that the new target node will never be sent
1598* a lock that he doesn't already have on the list.
1599* In total, this means that the local lock is correct and should not be
1600* updated to match the one sent by the master. Any messages sent back
1601* from the master before the MIGRATING flag will bring the lock properly
1602* up-to-date, and the change will be ordered properly for the waiter.
1603* We will *not* attempt to modify the lock underneath the waiter.
1604*/
1605
1606static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1607 struct dlm_lock_resource *res,
1608 struct dlm_migratable_lockres *mres)
1609{
1610 struct dlm_migratable_lock *ml;
1611 struct list_head *queue;
1612 struct dlm_lock *newlock = NULL;
1613 struct dlm_lockstatus *lksb = NULL;
1614 int ret = 0;
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001615 int i, bad;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001616 struct list_head *iter;
1617 struct dlm_lock *lock = NULL;
1618
1619 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1620 for (i=0; i<mres->num_locks; i++) {
1621 ml = &(mres->ml[i]);
1622 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1623 newlock = NULL;
1624 lksb = NULL;
1625
1626 queue = dlm_list_num_to_pointer(res, ml->list);
1627
1628 /* if the lock is for the local node it needs to
1629 * be moved to the proper location within the queue.
1630 * do not allocate a new lock structure. */
1631 if (ml->node == dlm->node_num) {
1632 /* MIGRATION ONLY! */
1633 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1634
1635 spin_lock(&res->spinlock);
1636 list_for_each(iter, queue) {
1637 lock = list_entry (iter, struct dlm_lock, list);
1638 if (lock->ml.cookie != ml->cookie)
1639 lock = NULL;
1640 else
1641 break;
1642 }
1643
1644 /* lock is always created locally first, and
1645 * destroyed locally last. it must be on the list */
1646 if (!lock) {
Kurt Hackel29004852006-03-02 16:43:36 -08001647 u64 c = ml->cookie;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001648 mlog(ML_ERROR, "could not find local lock "
Kurt Hackel29004852006-03-02 16:43:36 -08001649 "with cookie %u:%llu!\n",
1650 dlm_get_lock_cookie_node(c),
1651 dlm_get_lock_cookie_seq(c));
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001652 BUG();
1653 }
1654 BUG_ON(lock->ml.node != ml->node);
1655
1656 /* see NOTE above about why we do not update
1657 * to match the master here */
1658
1659 /* move the lock to its proper place */
1660 /* do not alter lock refcount. switching lists. */
Akinobu Mitaf1166292006-06-26 00:24:46 -07001661 list_move_tail(&lock->list, queue);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001662 spin_unlock(&res->spinlock);
1663
1664 mlog(0, "just reordered a local lock!\n");
1665 continue;
1666 }
1667
1668 /* lock is for another node. */
1669 newlock = dlm_new_lock(ml->type, ml->node,
1670 be64_to_cpu(ml->cookie), NULL);
1671 if (!newlock) {
1672 ret = -ENOMEM;
1673 goto leave;
1674 }
1675 lksb = newlock->lksb;
1676 dlm_lock_attach_lockres(newlock, res);
1677
1678 if (ml->convert_type != LKM_IVMODE) {
1679 BUG_ON(queue != &res->converting);
1680 newlock->ml.convert_type = ml->convert_type;
1681 }
1682 lksb->flags |= (ml->flags &
1683 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001684
1685 if (ml->type == LKM_NLMODE)
1686 goto skip_lvb;
1687
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001688 if (!dlm_lvb_is_empty(mres->lvb)) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001689 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1690 /* other node was trying to update
1691 * lvb when node died. recreate the
1692 * lksb with the updated lvb. */
1693 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001694 /* the lock resource lvb update must happen
1695 * NOW, before the spinlock is dropped.
1696 * we no longer wait for the AST to update
1697 * the lvb. */
1698 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001699 } else {
1700 /* otherwise, the node is sending its
1701 * most recent valid lvb info */
1702 BUG_ON(ml->type != LKM_EXMODE &&
1703 ml->type != LKM_PRMODE);
Kurt Hackel8bc674c2006-04-27 18:02:10 -07001704 if (!dlm_lvb_is_empty(res->lvb) &&
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001705 (ml->type == LKM_EXMODE ||
1706 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1707 int i;
1708 mlog(ML_ERROR, "%s:%.*s: received bad "
1709 "lvb! type=%d\n", dlm->name,
1710 res->lockname.len,
1711 res->lockname.name, ml->type);
1712 printk("lockres lvb=[");
1713 for (i=0; i<DLM_LVB_LEN; i++)
1714 printk("%02x", res->lvb[i]);
1715 printk("]\nmigrated lvb=[");
1716 for (i=0; i<DLM_LVB_LEN; i++)
1717 printk("%02x", mres->lvb[i]);
1718 printk("]\n");
1719 dlm_print_one_lock_resource(res);
1720 BUG();
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001721 }
1722 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1723 }
1724 }
Kurt Hackelccd8b1f2006-05-01 11:32:14 -07001725skip_lvb:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001726
1727 /* NOTE:
1728 * wrt lock queue ordering and recovery:
1729 * 1. order of locks on granted queue is
1730 * meaningless.
1731 * 2. order of locks on converting queue is
1732 * LOST with the node death. sorry charlie.
1733 * 3. order of locks on the blocked queue is
1734 * also LOST.
1735 * order of locks does not affect integrity, it
1736 * just means that a lock request may get pushed
1737 * back in line as a result of the node death.
1738 * also note that for a given node the lock order
1739 * for its secondary queue locks is preserved
1740 * relative to each other, but clearly *not*
1741 * preserved relative to locks from other nodes.
1742 */
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001743 bad = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001744 spin_lock(&res->spinlock);
Kurt Hackelc3187ce2006-04-27 18:05:41 -07001745 list_for_each_entry(lock, queue, list) {
1746 if (lock->ml.cookie == ml->cookie) {
1747 u64 c = lock->ml.cookie;
1748 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1749 "exists on this lockres!\n", dlm->name,
1750 res->lockname.len, res->lockname.name,
1751 dlm_get_lock_cookie_node(c),
1752 dlm_get_lock_cookie_seq(c));
1753
1754 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1755 "node=%u, cookie=%u:%llu, queue=%d\n",
1756 ml->type, ml->convert_type, ml->node,
1757 dlm_get_lock_cookie_node(ml->cookie),
1758 dlm_get_lock_cookie_seq(ml->cookie),
1759 ml->list);
1760
1761 __dlm_print_one_lock_resource(res);
1762 bad = 1;
1763 break;
1764 }
1765 }
1766 if (!bad) {
1767 dlm_lock_get(newlock);
1768 list_add_tail(&newlock->list, queue);
1769 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001770 spin_unlock(&res->spinlock);
1771 }
1772 mlog(0, "done running all the locks\n");
1773
1774leave:
1775 if (ret < 0) {
1776 mlog_errno(ret);
1777 if (newlock)
1778 dlm_lock_put(newlock);
1779 }
1780
1781 mlog_exit(ret);
1782 return ret;
1783}
1784
1785void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1786 struct dlm_lock_resource *res)
1787{
1788 int i;
1789 struct list_head *queue, *iter, *iter2;
1790 struct dlm_lock *lock;
1791
1792 res->state |= DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001793 if (!list_empty(&res->recovering)) {
1794 mlog(0,
1795 "Recovering res %s:%.*s, is already on recovery list!\n",
1796 dlm->name, res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001797 list_del_init(&res->recovering);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001798 }
1799 /* We need to hold a reference while on the recovery list */
1800 dlm_lockres_get(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001801 list_add_tail(&res->recovering, &dlm->reco.resources);
1802
1803 /* find any pending locks and put them back on proper list */
1804 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1805 queue = dlm_list_idx_to_ptr(res, i);
1806 list_for_each_safe(iter, iter2, queue) {
1807 lock = list_entry (iter, struct dlm_lock, list);
1808 dlm_lock_get(lock);
1809 if (lock->convert_pending) {
1810 /* move converting lock back to granted */
1811 BUG_ON(i != DLM_CONVERTING_LIST);
1812 mlog(0, "node died with convert pending "
1813 "on %.*s. move back to granted list.\n",
1814 res->lockname.len, res->lockname.name);
1815 dlm_revert_pending_convert(res, lock);
1816 lock->convert_pending = 0;
1817 } else if (lock->lock_pending) {
1818 /* remove pending lock requests completely */
1819 BUG_ON(i != DLM_BLOCKED_LIST);
1820 mlog(0, "node died with lock pending "
1821 "on %.*s. remove from blocked list and skip.\n",
1822 res->lockname.len, res->lockname.name);
1823 /* lock will be floating until ref in
1824 * dlmlock_remote is freed after the network
1825 * call returns. ok for it to not be on any
1826 * list since no ast can be called
1827 * (the master is dead). */
1828 dlm_revert_pending_lock(res, lock);
1829 lock->lock_pending = 0;
1830 } else if (lock->unlock_pending) {
1831 /* if an unlock was in progress, treat as
1832 * if this had completed successfully
1833 * before sending this lock state to the
1834 * new master. note that the dlm_unlock
1835 * call is still responsible for calling
1836 * the unlockast. that will happen after
1837 * the network call times out. for now,
1838 * just move lists to prepare the new
1839 * recovery master. */
1840 BUG_ON(i != DLM_GRANTED_LIST);
1841 mlog(0, "node died with unlock pending "
1842 "on %.*s. remove from blocked list and skip.\n",
1843 res->lockname.len, res->lockname.name);
1844 dlm_commit_pending_unlock(res, lock);
1845 lock->unlock_pending = 0;
1846 } else if (lock->cancel_pending) {
1847 /* if a cancel was in progress, treat as
1848 * if this had completed successfully
1849 * before sending this lock state to the
1850 * new master */
1851 BUG_ON(i != DLM_CONVERTING_LIST);
1852 mlog(0, "node died with cancel pending "
1853 "on %.*s. move back to granted list.\n",
1854 res->lockname.len, res->lockname.name);
1855 dlm_commit_pending_cancel(res, lock);
1856 lock->cancel_pending = 0;
1857 }
1858 dlm_lock_put(lock);
1859 }
1860 }
1861}
1862
1863
1864
1865/* removes all recovered locks from the recovery list.
1866 * sets the res->owner to the new master.
1867 * unsets the RECOVERY flag and wakes waiters. */
1868static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1869 u8 dead_node, u8 new_master)
1870{
1871 int i;
Mark Fasheh81f20942006-02-28 17:31:22 -08001872 struct list_head *iter, *iter2;
1873 struct hlist_node *hash_iter;
1874 struct hlist_head *bucket;
1875
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001876 struct dlm_lock_resource *res;
1877
1878 mlog_entry_void();
1879
1880 assert_spin_locked(&dlm->spinlock);
1881
1882 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1883 res = list_entry (iter, struct dlm_lock_resource, recovering);
1884 if (res->owner == dead_node) {
1885 list_del_init(&res->recovering);
1886 spin_lock(&res->spinlock);
1887 dlm_change_lockres_owner(dlm, res, new_master);
1888 res->state &= ~DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001889 if (!__dlm_lockres_unused(res))
1890 __dlm_dirty_lockres(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001891 spin_unlock(&res->spinlock);
1892 wake_up(&res->wq);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001893 dlm_lockres_put(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001894 }
1895 }
1896
1897 /* this will become unnecessary eventually, but
1898 * for now we need to run the whole hash, clear
1899 * the RECOVERING state and set the owner
1900 * if necessary */
Mark Fasheh81f20942006-02-28 17:31:22 -08001901 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
Daniel Phillips03d864c2006-03-10 18:08:16 -08001902 bucket = dlm_lockres_hash(dlm, i);
Mark Fasheh81f20942006-02-28 17:31:22 -08001903 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001904 if (res->state & DLM_LOCK_RES_RECOVERING) {
1905 if (res->owner == dead_node) {
1906 mlog(0, "(this=%u) res %.*s owner=%u "
1907 "was not on recovering list, but "
1908 "clearing state anyway\n",
1909 dlm->node_num, res->lockname.len,
1910 res->lockname.name, new_master);
1911 } else if (res->owner == dlm->node_num) {
1912 mlog(0, "(this=%u) res %.*s owner=%u "
1913 "was not on recovering list, "
1914 "owner is THIS node, clearing\n",
1915 dlm->node_num, res->lockname.len,
1916 res->lockname.name, new_master);
1917 } else
1918 continue;
1919
Kurt Hackelc03872f2006-03-06 14:08:49 -08001920 if (!list_empty(&res->recovering)) {
1921 mlog(0, "%s:%.*s: lockres was "
1922 "marked RECOVERING, owner=%u\n",
1923 dlm->name, res->lockname.len,
1924 res->lockname.name, res->owner);
1925 list_del_init(&res->recovering);
Kurt Hackel69d72b02006-05-01 10:57:51 -07001926 dlm_lockres_put(res);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001927 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001928 spin_lock(&res->spinlock);
1929 dlm_change_lockres_owner(dlm, res, new_master);
1930 res->state &= ~DLM_LOCK_RES_RECOVERING;
Kurt Hackel69d72b02006-05-01 10:57:51 -07001931 if (!__dlm_lockres_unused(res))
1932 __dlm_dirty_lockres(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001933 spin_unlock(&res->spinlock);
1934 wake_up(&res->wq);
1935 }
1936 }
1937 }
1938}
1939
1940static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1941{
1942 if (local) {
1943 if (lock->ml.type != LKM_EXMODE &&
1944 lock->ml.type != LKM_PRMODE)
1945 return 1;
1946 } else if (lock->ml.type == LKM_EXMODE)
1947 return 1;
1948 return 0;
1949}
1950
1951static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1952 struct dlm_lock_resource *res, u8 dead_node)
1953{
1954 struct list_head *iter, *queue;
1955 struct dlm_lock *lock;
1956 int blank_lvb = 0, local = 0;
1957 int i;
1958 u8 search_node;
1959
1960 assert_spin_locked(&dlm->spinlock);
1961 assert_spin_locked(&res->spinlock);
1962
1963 if (res->owner == dlm->node_num)
1964 /* if this node owned the lockres, and if the dead node
1965 * had an EX when he died, blank out the lvb */
1966 search_node = dead_node;
1967 else {
1968 /* if this is a secondary lockres, and we had no EX or PR
1969 * locks granted, we can no longer trust the lvb */
1970 search_node = dlm->node_num;
1971 local = 1; /* check local state for valid lvb */
1972 }
1973
1974 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
1975 queue = dlm_list_idx_to_ptr(res, i);
1976 list_for_each(iter, queue) {
1977 lock = list_entry (iter, struct dlm_lock, list);
1978 if (lock->ml.node == search_node) {
1979 if (dlm_lvb_needs_invalidation(lock, local)) {
1980 /* zero the lksb lvb and lockres lvb */
1981 blank_lvb = 1;
1982 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
1983 }
1984 }
1985 }
1986 }
1987
1988 if (blank_lvb) {
1989 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1990 res->lockname.len, res->lockname.name, dead_node);
1991 memset(res->lvb, 0, DLM_LVB_LEN);
1992 }
1993}
1994
1995static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
1996 struct dlm_lock_resource *res, u8 dead_node)
1997{
1998 struct list_head *iter, *tmpiter;
1999 struct dlm_lock *lock;
2000
2001 /* this node is the lockres master:
2002 * 1) remove any stale locks for the dead node
2003 * 2) if the dead node had an EX when he died, blank out the lvb
2004 */
2005 assert_spin_locked(&dlm->spinlock);
2006 assert_spin_locked(&res->spinlock);
2007
2008 /* TODO: check pending_asts, pending_basts here */
2009 list_for_each_safe(iter, tmpiter, &res->granted) {
2010 lock = list_entry (iter, struct dlm_lock, list);
2011 if (lock->ml.node == dead_node) {
2012 list_del_init(&lock->list);
2013 dlm_lock_put(lock);
2014 }
2015 }
2016 list_for_each_safe(iter, tmpiter, &res->converting) {
2017 lock = list_entry (iter, struct dlm_lock, list);
2018 if (lock->ml.node == dead_node) {
2019 list_del_init(&lock->list);
2020 dlm_lock_put(lock);
2021 }
2022 }
2023 list_for_each_safe(iter, tmpiter, &res->blocked) {
2024 lock = list_entry (iter, struct dlm_lock, list);
2025 if (lock->ml.node == dead_node) {
2026 list_del_init(&lock->list);
2027 dlm_lock_put(lock);
2028 }
2029 }
2030
2031 /* do not kick thread yet */
2032 __dlm_dirty_lockres(dlm, res);
2033}
2034
2035/* if this node is the recovery master, and there are no
2036 * locks for a given lockres owned by this node that are in
2037 * either PR or EX mode, zero out the lvb before requesting.
2038 *
2039 */
2040
2041
2042static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2043{
Mark Fasheh81f20942006-02-28 17:31:22 -08002044 struct hlist_node *iter;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002045 struct dlm_lock_resource *res;
2046 int i;
Mark Fasheh81f20942006-02-28 17:31:22 -08002047 struct hlist_head *bucket;
Kurt Hackele2faea42006-01-12 14:24:55 -08002048 struct dlm_lock *lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002049
2050
2051 /* purge any stale mles */
2052 dlm_clean_master_list(dlm, dead_node);
2053
2054 /*
2055 * now clean up all lock resources. there are two rules:
2056 *
2057 * 1) if the dead node was the master, move the lockres
2058 * to the recovering list. set the RECOVERING flag.
2059 * this lockres needs to be cleaned up before it can
2060 * be used further.
2061 *
2062 * 2) if this node was the master, remove all locks from
2063 * each of the lockres queues that were owned by the
2064 * dead node. once recovery finishes, the dlm thread
2065 * can be kicked again to see if any ASTs or BASTs
2066 * need to be fired as a result.
2067 */
Mark Fasheh81f20942006-02-28 17:31:22 -08002068 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
Daniel Phillips03d864c2006-03-10 18:08:16 -08002069 bucket = dlm_lockres_hash(dlm, i);
Mark Fasheh81f20942006-02-28 17:31:22 -08002070 hlist_for_each_entry(res, iter, bucket, hash_node) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002071 /* always prune any $RECOVERY entries for dead nodes,
2072 * otherwise hangs can occur during later recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002073 if (dlm_is_recovery_lock(res->lockname.name,
Kurt Hackele2faea42006-01-12 14:24:55 -08002074 res->lockname.len)) {
2075 spin_lock(&res->spinlock);
2076 list_for_each_entry(lock, &res->granted, list) {
2077 if (lock->ml.node == dead_node) {
2078 mlog(0, "AHA! there was "
2079 "a $RECOVERY lock for dead "
2080 "node %u (%s)!\n",
2081 dead_node, dlm->name);
2082 list_del_init(&lock->list);
2083 dlm_lock_put(lock);
2084 break;
2085 }
2086 }
2087 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002088 continue;
Kurt Hackele2faea42006-01-12 14:24:55 -08002089 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002090 spin_lock(&res->spinlock);
2091 /* zero the lvb if necessary */
2092 dlm_revalidate_lvb(dlm, res, dead_node);
2093 if (res->owner == dead_node)
2094 dlm_move_lockres_to_recovery_list(dlm, res);
2095 else if (res->owner == dlm->node_num) {
2096 dlm_free_dead_locks(dlm, res, dead_node);
2097 __dlm_lockres_calc_usage(dlm, res);
2098 }
2099 spin_unlock(&res->spinlock);
2100 }
2101 }
2102
2103}
2104
2105static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2106{
2107 assert_spin_locked(&dlm->spinlock);
2108
Kurt Hackel466d1a42006-05-01 11:11:13 -07002109 if (dlm->reco.new_master == idx) {
2110 mlog(0, "%s: recovery master %d just died\n",
2111 dlm->name, idx);
2112 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2113 /* finalize1 was reached, so it is safe to clear
2114 * the new_master and dead_node. that recovery
2115 * is complete. */
2116 mlog(0, "%s: dead master %d had reached "
2117 "finalize1 state, clearing\n", dlm->name, idx);
2118 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2119 __dlm_reset_recovery(dlm);
2120 }
2121 }
2122
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002123 /* check to see if the node is already considered dead */
2124 if (!test_bit(idx, dlm->live_nodes_map)) {
2125 mlog(0, "for domain %s, node %d is already dead. "
2126 "another node likely did recovery already.\n",
2127 dlm->name, idx);
2128 return;
2129 }
2130
2131 /* check to see if we do not care about this node */
2132 if (!test_bit(idx, dlm->domain_map)) {
2133 /* This also catches the case that we get a node down
2134 * but haven't joined the domain yet. */
2135 mlog(0, "node %u already removed from domain!\n", idx);
2136 return;
2137 }
2138
2139 clear_bit(idx, dlm->live_nodes_map);
2140
2141 /* Clean up join state on node death. */
2142 if (dlm->joining_node == idx) {
2143 mlog(0, "Clearing join state for node %u\n", idx);
2144 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2145 }
2146
2147 /* make sure local cleanup occurs before the heartbeat events */
2148 if (!test_bit(idx, dlm->recovery_map))
2149 dlm_do_local_recovery_cleanup(dlm, idx);
2150
2151 /* notify anything attached to the heartbeat events */
2152 dlm_hb_event_notify_attached(dlm, idx, 0);
2153
2154 mlog(0, "node %u being removed from domain map!\n", idx);
2155 clear_bit(idx, dlm->domain_map);
2156 /* wake up migration waiters if a node goes down.
2157 * perhaps later we can genericize this for other waiters. */
2158 wake_up(&dlm->migration_wq);
2159
2160 if (test_bit(idx, dlm->recovery_map))
2161 mlog(0, "domain %s, node %u already added "
2162 "to recovery map!\n", dlm->name, idx);
2163 else
2164 set_bit(idx, dlm->recovery_map);
2165}
2166
2167void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2168{
2169 struct dlm_ctxt *dlm = data;
2170
2171 if (!dlm_grab(dlm))
2172 return;
2173
2174 spin_lock(&dlm->spinlock);
2175 __dlm_hb_node_down(dlm, idx);
2176 spin_unlock(&dlm->spinlock);
2177
2178 dlm_put(dlm);
2179}
2180
2181void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2182{
2183 struct dlm_ctxt *dlm = data;
2184
2185 if (!dlm_grab(dlm))
2186 return;
2187
2188 spin_lock(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002189 set_bit(idx, dlm->live_nodes_map);
Kurt Hackele2faea42006-01-12 14:24:55 -08002190 /* do NOT notify mle attached to the heartbeat events.
2191 * new nodes are not interesting in mastery until joined. */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002192 spin_unlock(&dlm->spinlock);
2193
2194 dlm_put(dlm);
2195}
2196
2197static void dlm_reco_ast(void *astdata)
2198{
2199 struct dlm_ctxt *dlm = astdata;
2200 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2201 dlm->node_num, dlm->name);
2202}
2203static void dlm_reco_bast(void *astdata, int blocked_type)
2204{
2205 struct dlm_ctxt *dlm = astdata;
2206 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2207 dlm->node_num, dlm->name);
2208}
2209static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2210{
2211 mlog(0, "unlockast for recovery lock fired!\n");
2212}
2213
Kurt Hackele2faea42006-01-12 14:24:55 -08002214/*
2215 * dlm_pick_recovery_master will continually attempt to use
2216 * dlmlock() on the special "$RECOVERY" lockres with the
2217 * LKM_NOQUEUE flag to get an EX. every thread that enters
2218 * this function on each node racing to become the recovery
2219 * master will not stop attempting this until either:
2220 * a) this node gets the EX (and becomes the recovery master),
2221 * or b) dlm->reco.new_master gets set to some nodenum
2222 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2223 * so each time a recovery master is needed, the entire cluster
2224 * will sync at this point. if the new master dies, that will
2225 * be detected in dlm_do_recovery */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002226static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2227{
2228 enum dlm_status ret;
2229 struct dlm_lockstatus lksb;
2230 int status = -EINVAL;
2231
2232 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2233 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002234again:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002235 memset(&lksb, 0, sizeof(lksb));
2236
2237 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2238 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2239
Kurt Hackele2faea42006-01-12 14:24:55 -08002240 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2241 dlm->name, ret, lksb.status);
2242
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002243 if (ret == DLM_NORMAL) {
2244 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2245 dlm->name, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002246
2247 /* got the EX lock. check to see if another node
2248 * just became the reco master */
2249 if (dlm_reco_master_ready(dlm)) {
2250 mlog(0, "%s: got reco EX lock, but %u will "
2251 "do the recovery\n", dlm->name,
2252 dlm->reco.new_master);
2253 status = -EEXIST;
2254 } else {
Kurt Hackel898effa2006-01-18 17:01:25 -08002255 status = 0;
2256
2257 /* see if recovery was already finished elsewhere */
2258 spin_lock(&dlm->spinlock);
2259 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2260 status = -EINVAL;
2261 mlog(0, "%s: got reco EX lock, but "
2262 "node got recovered already\n", dlm->name);
2263 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2264 mlog(ML_ERROR, "%s: new master is %u "
2265 "but no dead node!\n",
2266 dlm->name, dlm->reco.new_master);
2267 BUG();
2268 }
2269 }
2270 spin_unlock(&dlm->spinlock);
2271 }
2272
2273 /* if this node has actually become the recovery master,
2274 * set the master and send the messages to begin recovery */
2275 if (!status) {
2276 mlog(0, "%s: dead=%u, this=%u, sending "
2277 "begin_reco now\n", dlm->name,
2278 dlm->reco.dead_node, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002279 status = dlm_send_begin_reco_message(dlm,
2280 dlm->reco.dead_node);
2281 /* this always succeeds */
2282 BUG_ON(status);
2283
2284 /* set the new_master to this node */
2285 spin_lock(&dlm->spinlock);
Kurt Hackelab27eb62006-04-27 18:03:49 -07002286 dlm_set_reco_master(dlm, dlm->node_num);
Kurt Hackele2faea42006-01-12 14:24:55 -08002287 spin_unlock(&dlm->spinlock);
2288 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002289
2290 /* recovery lock is a special case. ast will not get fired,
2291 * so just go ahead and unlock it. */
2292 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
Kurt Hackele2faea42006-01-12 14:24:55 -08002293 if (ret == DLM_DENIED) {
2294 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2295 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2296 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002297 if (ret != DLM_NORMAL) {
2298 /* this would really suck. this could only happen
2299 * if there was a network error during the unlock
2300 * because of node death. this means the unlock
2301 * is actually "done" and the lock structure is
2302 * even freed. we can continue, but only
2303 * because this specific lock name is special. */
Kurt Hackele2faea42006-01-12 14:24:55 -08002304 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002305 }
2306 } else if (ret == DLM_NOTQUEUED) {
2307 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2308 dlm->name, dlm->node_num);
2309 /* another node is master. wait on
Kurt Hackele2faea42006-01-12 14:24:55 -08002310 * reco.new_master != O2NM_INVALID_NODE_NUM
2311 * for at most one second */
2312 wait_event_timeout(dlm->dlm_reco_thread_wq,
2313 dlm_reco_master_ready(dlm),
2314 msecs_to_jiffies(1000));
2315 if (!dlm_reco_master_ready(dlm)) {
2316 mlog(0, "%s: reco master taking awhile\n",
2317 dlm->name);
2318 goto again;
2319 }
2320 /* another node has informed this one that it is reco master */
2321 mlog(0, "%s: reco master %u is ready to recover %u\n",
2322 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002323 status = -EEXIST;
Kurt Hackelc8df4122006-05-01 13:47:50 -07002324 } else if (ret == DLM_RECOVERING) {
2325 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2326 dlm->name, dlm->node_num);
2327 goto again;
Kurt Hackele2faea42006-01-12 14:24:55 -08002328 } else {
2329 struct dlm_lock_resource *res;
2330
2331 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2332 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2333 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2334 dlm_errname(lksb.status));
2335 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2336 DLM_RECOVERY_LOCK_NAME_LEN);
2337 if (res) {
2338 dlm_print_one_lock_resource(res);
2339 dlm_lockres_put(res);
2340 } else {
2341 mlog(ML_ERROR, "recovery lock not found\n");
2342 }
2343 BUG();
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002344 }
2345
2346 return status;
2347}
2348
2349static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2350{
2351 struct dlm_begin_reco br;
2352 int ret = 0;
2353 struct dlm_node_iter iter;
2354 int nodenum;
2355 int status;
2356
2357 mlog_entry("%u\n", dead_node);
2358
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002359 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002360
2361 spin_lock(&dlm->spinlock);
2362 dlm_node_iter_init(dlm->domain_map, &iter);
2363 spin_unlock(&dlm->spinlock);
2364
2365 clear_bit(dead_node, iter.node_map);
2366
2367 memset(&br, 0, sizeof(br));
2368 br.node_idx = dlm->node_num;
2369 br.dead_node = dead_node;
2370
2371 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2372 ret = 0;
2373 if (nodenum == dead_node) {
2374 mlog(0, "not sending begin reco to dead node "
2375 "%u\n", dead_node);
2376 continue;
2377 }
2378 if (nodenum == dlm->node_num) {
2379 mlog(0, "not sending begin reco to self\n");
2380 continue;
2381 }
Kurt Hackele2faea42006-01-12 14:24:55 -08002382retry:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002383 ret = -EINVAL;
2384 mlog(0, "attempting to send begin reco msg to %d\n",
2385 nodenum);
2386 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2387 &br, sizeof(br), nodenum, &status);
2388 /* negative status is handled ok by caller here */
2389 if (ret >= 0)
2390 ret = status;
Kurt Hackele2faea42006-01-12 14:24:55 -08002391 if (dlm_is_host_down(ret)) {
2392 /* node is down. not involved in recovery
2393 * so just keep going */
2394 mlog(0, "%s: node %u was down when sending "
2395 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2396 ret = 0;
2397 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002398 if (ret < 0) {
2399 struct dlm_lock_resource *res;
Kurt Hackele2faea42006-01-12 14:24:55 -08002400 /* this is now a serious problem, possibly ENOMEM
2401 * in the network stack. must retry */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002402 mlog_errno(ret);
2403 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2404 " returned %d\n", dlm->name, nodenum, ret);
2405 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2406 DLM_RECOVERY_LOCK_NAME_LEN);
2407 if (res) {
2408 dlm_print_one_lock_resource(res);
2409 dlm_lockres_put(res);
2410 } else {
2411 mlog(ML_ERROR, "recovery lock not found\n");
2412 }
Kurt Hackele2faea42006-01-12 14:24:55 -08002413 /* sleep for a bit in hopes that we can avoid
2414 * another ENOMEM */
2415 msleep(100);
2416 goto retry;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002417 } else if (ret == EAGAIN) {
2418 mlog(0, "%s: trying to start recovery of node "
2419 "%u, but node %u is waiting for last recovery "
2420 "to complete, backoff for a bit\n", dlm->name,
2421 dead_node, nodenum);
2422 /* TODO Look into replacing msleep with cond_resched() */
2423 msleep(100);
2424 goto retry;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002425 }
2426 }
2427
2428 return ret;
2429}
2430
2431int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2432{
2433 struct dlm_ctxt *dlm = data;
2434 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2435
2436 /* ok to return 0, domain has gone away */
2437 if (!dlm_grab(dlm))
2438 return 0;
2439
Kurt Hackel466d1a42006-05-01 11:11:13 -07002440 spin_lock(&dlm->spinlock);
2441 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2442 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2443 "but this node is in finalize state, waiting on finalize2\n",
2444 dlm->name, br->node_idx, br->dead_node,
2445 dlm->reco.dead_node, dlm->reco.new_master);
2446 spin_unlock(&dlm->spinlock);
2447 return EAGAIN;
2448 }
2449 spin_unlock(&dlm->spinlock);
2450
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002451 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2452 dlm->name, br->node_idx, br->dead_node,
2453 dlm->reco.dead_node, dlm->reco.new_master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002454
2455 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2456
2457 spin_lock(&dlm->spinlock);
2458 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002459 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2460 mlog(0, "%s: new_master %u died, changing "
2461 "to %u\n", dlm->name, dlm->reco.new_master,
2462 br->node_idx);
2463 } else {
2464 mlog(0, "%s: new_master %u NOT DEAD, changing "
2465 "to %u\n", dlm->name, dlm->reco.new_master,
2466 br->node_idx);
2467 /* may not have seen the new master as dead yet */
2468 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002469 }
2470 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002471 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2472 "node %u changing it to %u\n", dlm->name,
2473 dlm->reco.dead_node, br->node_idx, br->dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002474 }
Kurt Hackelab27eb62006-04-27 18:03:49 -07002475 dlm_set_reco_master(dlm, br->node_idx);
2476 dlm_set_reco_dead_node(dlm, br->dead_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002477 if (!test_bit(br->dead_node, dlm->recovery_map)) {
Kurt Hackele2faea42006-01-12 14:24:55 -08002478 mlog(0, "recovery master %u sees %u as dead, but this "
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002479 "node has not yet. marking %u as dead\n",
2480 br->node_idx, br->dead_node, br->dead_node);
Kurt Hackele2faea42006-01-12 14:24:55 -08002481 if (!test_bit(br->dead_node, dlm->domain_map) ||
2482 !test_bit(br->dead_node, dlm->live_nodes_map))
2483 mlog(0, "%u not in domain/live_nodes map "
2484 "so setting it in reco map manually\n",
2485 br->dead_node);
Kurt Hackelc03872f2006-03-06 14:08:49 -08002486 /* force the recovery cleanup in __dlm_hb_node_down
2487 * both of these will be cleared in a moment */
2488 set_bit(br->dead_node, dlm->domain_map);
2489 set_bit(br->dead_node, dlm->live_nodes_map);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002490 __dlm_hb_node_down(dlm, br->dead_node);
2491 }
2492 spin_unlock(&dlm->spinlock);
2493
2494 dlm_kick_recovery_thread(dlm);
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002495
2496 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2497 dlm->name, br->node_idx, br->dead_node,
2498 dlm->reco.dead_node, dlm->reco.new_master);
2499
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002500 dlm_put(dlm);
2501 return 0;
2502}
2503
Kurt Hackel466d1a42006-05-01 11:11:13 -07002504#define DLM_FINALIZE_STAGE2 0x01
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002505static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2506{
2507 int ret = 0;
2508 struct dlm_finalize_reco fr;
2509 struct dlm_node_iter iter;
2510 int nodenum;
2511 int status;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002512 int stage = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002513
Kurt Hackel466d1a42006-05-01 11:11:13 -07002514 mlog(0, "finishing recovery for node %s:%u, "
2515 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002516
2517 spin_lock(&dlm->spinlock);
2518 dlm_node_iter_init(dlm->domain_map, &iter);
2519 spin_unlock(&dlm->spinlock);
2520
Kurt Hackel466d1a42006-05-01 11:11:13 -07002521stage2:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002522 memset(&fr, 0, sizeof(fr));
2523 fr.node_idx = dlm->node_num;
2524 fr.dead_node = dlm->reco.dead_node;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002525 if (stage == 2)
2526 fr.flags |= DLM_FINALIZE_STAGE2;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002527
2528 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2529 if (nodenum == dlm->node_num)
2530 continue;
2531 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2532 &fr, sizeof(fr), nodenum, &status);
Kurt Hackel466d1a42006-05-01 11:11:13 -07002533 if (ret >= 0)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002534 ret = status;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002535 if (ret < 0) {
2536 mlog_errno(ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002537 if (dlm_is_host_down(ret)) {
2538 /* this has no effect on this recovery
2539 * session, so set the status to zero to
2540 * finish out the last recovery */
2541 mlog(ML_ERROR, "node %u went down after this "
2542 "node finished recovery.\n", nodenum);
2543 ret = 0;
Kurt Hackelc27069e2006-05-01 13:51:49 -07002544 continue;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002545 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002546 break;
2547 }
2548 }
Kurt Hackel466d1a42006-05-01 11:11:13 -07002549 if (stage == 1) {
2550 /* reset the node_iter back to the top and send finalize2 */
2551 iter.curnode = -1;
2552 stage = 2;
2553 goto stage2;
2554 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002555
2556 return ret;
2557}
2558
2559int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2560{
2561 struct dlm_ctxt *dlm = data;
2562 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
Kurt Hackel466d1a42006-05-01 11:11:13 -07002563 int stage = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002564
2565 /* ok to return 0, domain has gone away */
2566 if (!dlm_grab(dlm))
2567 return 0;
2568
Kurt Hackel466d1a42006-05-01 11:11:13 -07002569 if (fr->flags & DLM_FINALIZE_STAGE2)
2570 stage = 2;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002571
Kurt Hackel466d1a42006-05-01 11:11:13 -07002572 mlog(0, "%s: node %u finalizing recovery stage%d of "
2573 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2574 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2575
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002576 spin_lock(&dlm->spinlock);
2577
2578 if (dlm->reco.new_master != fr->node_idx) {
2579 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2580 "%u is supposed to be the new master, dead=%u\n",
2581 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2582 BUG();
2583 }
2584 if (dlm->reco.dead_node != fr->dead_node) {
2585 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2586 "node %u, but node %u is supposed to be dead\n",
2587 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2588 BUG();
2589 }
2590
Kurt Hackel466d1a42006-05-01 11:11:13 -07002591 switch (stage) {
2592 case 1:
2593 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2594 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2595 mlog(ML_ERROR, "%s: received finalize1 from "
2596 "new master %u for dead node %u, but "
2597 "this node has already received it!\n",
2598 dlm->name, fr->node_idx, fr->dead_node);
2599 dlm_print_reco_node_status(dlm);
2600 BUG();
2601 }
2602 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2603 spin_unlock(&dlm->spinlock);
2604 break;
2605 case 2:
2606 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2607 mlog(ML_ERROR, "%s: received finalize2 from "
2608 "new master %u for dead node %u, but "
2609 "this node did not have finalize1!\n",
2610 dlm->name, fr->node_idx, fr->dead_node);
2611 dlm_print_reco_node_status(dlm);
2612 BUG();
2613 }
2614 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2615 spin_unlock(&dlm->spinlock);
2616 dlm_reset_recovery(dlm);
2617 dlm_kick_recovery_thread(dlm);
2618 break;
2619 default:
2620 BUG();
2621 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002622
Kurt Hackeld6dea6e2006-04-27 18:08:51 -07002623 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2624 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2625
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002626 dlm_put(dlm);
2627 return 0;
2628}