blob: 97133465891c59be6f9ceeea0900f2db958e82d9 [file] [log] [blame]
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmmod.c
5 *
6 * standalone DLM module
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
31#include <linux/slab.h>
32#include <linux/highmem.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/sysctl.h>
36#include <linux/random.h>
37#include <linux/blkdev.h>
38#include <linux/socket.h>
39#include <linux/inet.h>
40#include <linux/spinlock.h>
41#include <linux/delay.h>
42
43
44#include "cluster/heartbeat.h"
45#include "cluster/nodemanager.h"
46#include "cluster/tcp.h"
47
48#include "dlmapi.h"
49#include "dlmcommon.h"
Adrian Bunk82353b52005-12-19 11:16:07 -080050#include "dlmdomain.h"
Kurt Hackel6714d8e2005-12-15 14:31:23 -080051
52#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53#include "cluster/masklog.h"
54
55enum dlm_mle_type {
56 DLM_MLE_BLOCK,
57 DLM_MLE_MASTER,
58 DLM_MLE_MIGRATION
59};
60
61struct dlm_lock_name
62{
63 u8 len;
64 u8 name[DLM_LOCKID_NAME_MAX];
65};
66
67struct dlm_master_list_entry
68{
69 struct list_head list;
70 struct list_head hb_events;
71 struct dlm_ctxt *dlm;
72 spinlock_t spinlock;
73 wait_queue_head_t wq;
74 atomic_t woken;
75 struct kref mle_refs;
Kurt Hackela2bf0472006-04-27 18:51:26 -070076 int inuse;
Kurt Hackel6714d8e2005-12-15 14:31:23 -080077 unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
78 unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79 unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
81 u8 master;
82 u8 new_master;
83 enum dlm_mle_type type;
84 struct o2hb_callback_func mle_hb_up;
85 struct o2hb_callback_func mle_hb_down;
86 union {
87 struct dlm_lock_resource *res;
88 struct dlm_lock_name name;
89 } u;
90};
91
92static void dlm_mle_node_down(struct dlm_ctxt *dlm,
93 struct dlm_master_list_entry *mle,
94 struct o2nm_node *node,
95 int idx);
96static void dlm_mle_node_up(struct dlm_ctxt *dlm,
97 struct dlm_master_list_entry *mle,
98 struct o2nm_node *node,
99 int idx);
100
101static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800102static int dlm_do_assert_master(struct dlm_ctxt *dlm,
103 struct dlm_lock_resource *res,
104 void *nodemap, u32 flags);
Sunil Mushranf3f85462007-01-29 15:19:16 -0800105static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800106
107static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
108 struct dlm_master_list_entry *mle,
109 const char *name,
110 unsigned int namelen)
111{
112 struct dlm_lock_resource *res;
113
114 if (dlm != mle->dlm)
115 return 0;
116
117 if (mle->type == DLM_MLE_BLOCK ||
118 mle->type == DLM_MLE_MIGRATION) {
119 if (namelen != mle->u.name.len ||
120 memcmp(name, mle->u.name.name, namelen)!=0)
121 return 0;
122 } else {
123 res = mle->u.res;
124 if (namelen != res->lockname.len ||
125 memcmp(res->lockname.name, name, namelen) != 0)
126 return 0;
127 }
128 return 1;
129}
130
Kurt Hackel95883712006-04-27 18:47:41 -0700131#define dlm_print_nodemap(m) _dlm_print_nodemap(m,#m)
Mark Fasheh8a9343f2006-05-01 14:55:10 -0700132static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
Kurt Hackel95883712006-04-27 18:47:41 -0700133{
134 int i;
135 printk("%s=[ ", mapname);
136 for (i=0; i<O2NM_MAX_NODES; i++)
137 if (test_bit(i, map))
138 printk("%d ", i);
139 printk("]");
140}
141
Mark Fasheh8a9343f2006-05-01 14:55:10 -0700142static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800143{
Kurt Hackel95883712006-04-27 18:47:41 -0700144 int refs;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800145 char *type;
146 char attached;
147 u8 master;
148 unsigned int namelen;
149 const char *name;
150 struct kref *k;
Kurt Hackel95883712006-04-27 18:47:41 -0700151 unsigned long *maybe = mle->maybe_map,
152 *vote = mle->vote_map,
153 *resp = mle->response_map,
154 *node = mle->node_map;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800155
156 k = &mle->mle_refs;
157 if (mle->type == DLM_MLE_BLOCK)
158 type = "BLK";
159 else if (mle->type == DLM_MLE_MASTER)
160 type = "MAS";
161 else
162 type = "MIG";
163 refs = atomic_read(&k->refcount);
164 master = mle->master;
165 attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
166
167 if (mle->type != DLM_MLE_MASTER) {
168 namelen = mle->u.name.len;
169 name = mle->u.name.name;
170 } else {
171 namelen = mle->u.res->lockname.len;
172 name = mle->u.res->lockname.name;
173 }
174
Kurt Hackel95883712006-04-27 18:47:41 -0700175 mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
176 namelen, name, type, refs, master, mle->new_master, attached,
177 mle->inuse);
178 dlm_print_nodemap(maybe);
179 printk(", ");
180 dlm_print_nodemap(vote);
181 printk(", ");
182 dlm_print_nodemap(resp);
183 printk(", ");
184 dlm_print_nodemap(node);
185 printk(", ");
186 printk("\n");
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800187}
188
Mark Fasheh8a9343f2006-05-01 14:55:10 -0700189#if 0
190/* Code here is included but defined out as it aids debugging */
191
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800192static void dlm_dump_mles(struct dlm_ctxt *dlm)
193{
194 struct dlm_master_list_entry *mle;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800195
196 mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800197 spin_lock(&dlm->master_lock);
Christoph Hellwig800deef2007-05-17 16:03:13 +0200198 list_for_each_entry(mle, &dlm->master_list, list)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800199 dlm_print_one_mle(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800200 spin_unlock(&dlm->master_lock);
201}
202
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800203int dlm_dump_all_mles(const char __user *data, unsigned int len)
204{
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800205 struct dlm_ctxt *dlm;
206
207 spin_lock(&dlm_domain_lock);
Christoph Hellwig800deef2007-05-17 16:03:13 +0200208 list_for_each_entry(dlm, &dlm_domains, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800209 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
210 dlm_dump_mles(dlm);
211 }
212 spin_unlock(&dlm_domain_lock);
213 return len;
214}
215EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
216
217#endif /* 0 */
218
Sunil Mushran724bdca2008-03-10 15:16:20 -0700219static struct kmem_cache *dlm_lockres_cache = NULL;
220static struct kmem_cache *dlm_lockname_cache = NULL;
Christoph Lametere18b8902006-12-06 20:33:20 -0800221static struct kmem_cache *dlm_mle_cache = NULL;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800222
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800223static void dlm_mle_release(struct kref *kref);
224static void dlm_init_mle(struct dlm_master_list_entry *mle,
225 enum dlm_mle_type type,
226 struct dlm_ctxt *dlm,
227 struct dlm_lock_resource *res,
228 const char *name,
229 unsigned int namelen);
230static void dlm_put_mle(struct dlm_master_list_entry *mle);
231static void __dlm_put_mle(struct dlm_master_list_entry *mle);
232static int dlm_find_mle(struct dlm_ctxt *dlm,
233 struct dlm_master_list_entry **mle,
234 char *name, unsigned int namelen);
235
Kurt Hackelba2bf212006-12-01 14:47:20 -0800236static int dlm_do_master_request(struct dlm_lock_resource *res,
237 struct dlm_master_list_entry *mle, int to);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800238
239
240static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
241 struct dlm_lock_resource *res,
242 struct dlm_master_list_entry *mle,
243 int *blocked);
244static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
245 struct dlm_lock_resource *res,
246 struct dlm_master_list_entry *mle,
247 int blocked);
248static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
249 struct dlm_lock_resource *res,
250 struct dlm_master_list_entry *mle,
251 struct dlm_master_list_entry **oldmle,
252 const char *name, unsigned int namelen,
253 u8 new_master, u8 master);
254
255static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
256 struct dlm_lock_resource *res);
257static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
258 struct dlm_lock_resource *res);
259static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
260 struct dlm_lock_resource *res,
261 u8 target);
Kurt Hackelc03872f2006-03-06 14:08:49 -0800262static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
263 struct dlm_lock_resource *res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800264
265
266int dlm_is_host_down(int errno)
267{
268 switch (errno) {
269 case -EBADF:
270 case -ECONNREFUSED:
271 case -ENOTCONN:
272 case -ECONNRESET:
273 case -EPIPE:
274 case -EHOSTDOWN:
275 case -EHOSTUNREACH:
276 case -ETIMEDOUT:
277 case -ECONNABORTED:
278 case -ENETDOWN:
279 case -ENETUNREACH:
280 case -ENETRESET:
281 case -ESHUTDOWN:
282 case -ENOPROTOOPT:
283 case -EINVAL: /* if returned from our tcp code,
284 this means there is no socket */
285 return 1;
286 }
287 return 0;
288}
289
290
291/*
292 * MASTER LIST FUNCTIONS
293 */
294
295
296/*
297 * regarding master list entries and heartbeat callbacks:
298 *
299 * in order to avoid sleeping and allocation that occurs in
300 * heartbeat, master list entries are simply attached to the
301 * dlm's established heartbeat callbacks. the mle is attached
302 * when it is created, and since the dlm->spinlock is held at
303 * that time, any heartbeat event will be properly discovered
304 * by the mle. the mle needs to be detached from the
305 * dlm->mle_hb_events list as soon as heartbeat events are no
306 * longer useful to the mle, and before the mle is freed.
307 *
308 * as a general rule, heartbeat events are no longer needed by
309 * the mle once an "answer" regarding the lock master has been
310 * received.
311 */
312static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
313 struct dlm_master_list_entry *mle)
314{
315 assert_spin_locked(&dlm->spinlock);
316
317 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
318}
319
320
321static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
322 struct dlm_master_list_entry *mle)
323{
324 if (!list_empty(&mle->hb_events))
325 list_del_init(&mle->hb_events);
326}
327
328
329static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
330 struct dlm_master_list_entry *mle)
331{
332 spin_lock(&dlm->spinlock);
333 __dlm_mle_detach_hb_events(dlm, mle);
334 spin_unlock(&dlm->spinlock);
335}
336
Kurt Hackela2bf0472006-04-27 18:51:26 -0700337static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
338{
339 struct dlm_ctxt *dlm;
340 dlm = mle->dlm;
341
342 assert_spin_locked(&dlm->spinlock);
343 assert_spin_locked(&dlm->master_lock);
344 mle->inuse++;
345 kref_get(&mle->mle_refs);
346}
347
348static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
349{
350 struct dlm_ctxt *dlm;
351 dlm = mle->dlm;
352
353 spin_lock(&dlm->spinlock);
354 spin_lock(&dlm->master_lock);
355 mle->inuse--;
356 __dlm_put_mle(mle);
357 spin_unlock(&dlm->master_lock);
358 spin_unlock(&dlm->spinlock);
359
360}
361
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800362/* remove from list and free */
363static void __dlm_put_mle(struct dlm_master_list_entry *mle)
364{
365 struct dlm_ctxt *dlm;
366 dlm = mle->dlm;
367
368 assert_spin_locked(&dlm->spinlock);
369 assert_spin_locked(&dlm->master_lock);
Kurt Hackelaa852352006-04-27 19:04:49 -0700370 if (!atomic_read(&mle->mle_refs.refcount)) {
371 /* this may or may not crash, but who cares.
372 * it's a BUG. */
373 mlog(ML_ERROR, "bad mle: %p\n", mle);
374 dlm_print_one_mle(mle);
375 BUG();
376 } else
377 kref_put(&mle->mle_refs, dlm_mle_release);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800378}
379
380
381/* must not have any spinlocks coming in */
382static void dlm_put_mle(struct dlm_master_list_entry *mle)
383{
384 struct dlm_ctxt *dlm;
385 dlm = mle->dlm;
386
387 spin_lock(&dlm->spinlock);
388 spin_lock(&dlm->master_lock);
389 __dlm_put_mle(mle);
390 spin_unlock(&dlm->master_lock);
391 spin_unlock(&dlm->spinlock);
392}
393
394static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
395{
396 kref_get(&mle->mle_refs);
397}
398
399static void dlm_init_mle(struct dlm_master_list_entry *mle,
400 enum dlm_mle_type type,
401 struct dlm_ctxt *dlm,
402 struct dlm_lock_resource *res,
403 const char *name,
404 unsigned int namelen)
405{
406 assert_spin_locked(&dlm->spinlock);
407
408 mle->dlm = dlm;
409 mle->type = type;
410 INIT_LIST_HEAD(&mle->list);
411 INIT_LIST_HEAD(&mle->hb_events);
412 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
413 spin_lock_init(&mle->spinlock);
414 init_waitqueue_head(&mle->wq);
415 atomic_set(&mle->woken, 0);
416 kref_init(&mle->mle_refs);
417 memset(mle->response_map, 0, sizeof(mle->response_map));
418 mle->master = O2NM_MAX_NODES;
419 mle->new_master = O2NM_MAX_NODES;
Kurt Hackela2bf0472006-04-27 18:51:26 -0700420 mle->inuse = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800421
422 if (mle->type == DLM_MLE_MASTER) {
423 BUG_ON(!res);
424 mle->u.res = res;
425 } else if (mle->type == DLM_MLE_BLOCK) {
426 BUG_ON(!name);
427 memcpy(mle->u.name.name, name, namelen);
428 mle->u.name.len = namelen;
429 } else /* DLM_MLE_MIGRATION */ {
430 BUG_ON(!name);
431 memcpy(mle->u.name.name, name, namelen);
432 mle->u.name.len = namelen;
433 }
434
435 /* copy off the node_map and register hb callbacks on our copy */
436 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
437 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
438 clear_bit(dlm->node_num, mle->vote_map);
439 clear_bit(dlm->node_num, mle->node_map);
440
441 /* attach the mle to the domain node up/down events */
442 __dlm_mle_attach_hb_events(dlm, mle);
443}
444
445
446/* returns 1 if found, 0 if not */
447static int dlm_find_mle(struct dlm_ctxt *dlm,
448 struct dlm_master_list_entry **mle,
449 char *name, unsigned int namelen)
450{
451 struct dlm_master_list_entry *tmpmle;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800452
453 assert_spin_locked(&dlm->master_lock);
454
Christoph Hellwig800deef2007-05-17 16:03:13 +0200455 list_for_each_entry(tmpmle, &dlm->master_list, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800456 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
457 continue;
458 dlm_get_mle(tmpmle);
459 *mle = tmpmle;
460 return 1;
461 }
462 return 0;
463}
464
465void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
466{
467 struct dlm_master_list_entry *mle;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800468
469 assert_spin_locked(&dlm->spinlock);
470
Christoph Hellwig800deef2007-05-17 16:03:13 +0200471 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800472 if (node_up)
473 dlm_mle_node_up(dlm, mle, NULL, idx);
474 else
475 dlm_mle_node_down(dlm, mle, NULL, idx);
476 }
477}
478
479static void dlm_mle_node_down(struct dlm_ctxt *dlm,
480 struct dlm_master_list_entry *mle,
481 struct o2nm_node *node, int idx)
482{
483 spin_lock(&mle->spinlock);
484
485 if (!test_bit(idx, mle->node_map))
486 mlog(0, "node %u already removed from nodemap!\n", idx);
487 else
488 clear_bit(idx, mle->node_map);
489
490 spin_unlock(&mle->spinlock);
491}
492
493static void dlm_mle_node_up(struct dlm_ctxt *dlm,
494 struct dlm_master_list_entry *mle,
495 struct o2nm_node *node, int idx)
496{
497 spin_lock(&mle->spinlock);
498
499 if (test_bit(idx, mle->node_map))
500 mlog(0, "node %u already in node map!\n", idx);
501 else
502 set_bit(idx, mle->node_map);
503
504 spin_unlock(&mle->spinlock);
505}
506
507
508int dlm_init_mle_cache(void)
509{
Sunil Mushran12eb0032008-03-10 15:16:19 -0700510 dlm_mle_cache = kmem_cache_create("o2dlm_mle",
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800511 sizeof(struct dlm_master_list_entry),
512 0, SLAB_HWCACHE_ALIGN,
Paul Mundt20c2df82007-07-20 10:11:58 +0900513 NULL);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800514 if (dlm_mle_cache == NULL)
515 return -ENOMEM;
516 return 0;
517}
518
519void dlm_destroy_mle_cache(void)
520{
521 if (dlm_mle_cache)
522 kmem_cache_destroy(dlm_mle_cache);
523}
524
525static void dlm_mle_release(struct kref *kref)
526{
527 struct dlm_master_list_entry *mle;
528 struct dlm_ctxt *dlm;
529
530 mlog_entry_void();
531
532 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
533 dlm = mle->dlm;
534
535 if (mle->type != DLM_MLE_MASTER) {
536 mlog(0, "calling mle_release for %.*s, type %d\n",
537 mle->u.name.len, mle->u.name.name, mle->type);
538 } else {
539 mlog(0, "calling mle_release for %.*s, type %d\n",
540 mle->u.res->lockname.len,
541 mle->u.res->lockname.name, mle->type);
542 }
543 assert_spin_locked(&dlm->spinlock);
544 assert_spin_locked(&dlm->master_lock);
545
546 /* remove from list if not already */
547 if (!list_empty(&mle->list))
548 list_del_init(&mle->list);
549
550 /* detach the mle from the domain node up/down events */
551 __dlm_mle_detach_hb_events(dlm, mle);
552
553 /* NOTE: kfree under spinlock here.
554 * if this is bad, we can move this to a freelist. */
555 kmem_cache_free(dlm_mle_cache, mle);
556}
557
558
559/*
560 * LOCK RESOURCE FUNCTIONS
561 */
562
Sunil Mushran724bdca2008-03-10 15:16:20 -0700563int dlm_init_master_caches(void)
564{
565 dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
566 sizeof(struct dlm_lock_resource),
567 0, SLAB_HWCACHE_ALIGN, NULL);
568 if (!dlm_lockres_cache)
569 goto bail;
570
571 dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
572 DLM_LOCKID_NAME_MAX, 0,
573 SLAB_HWCACHE_ALIGN, NULL);
574 if (!dlm_lockname_cache)
575 goto bail;
576
577 return 0;
578bail:
579 dlm_destroy_master_caches();
580 return -ENOMEM;
581}
582
583void dlm_destroy_master_caches(void)
584{
585 if (dlm_lockname_cache)
586 kmem_cache_destroy(dlm_lockname_cache);
587
588 if (dlm_lockres_cache)
589 kmem_cache_destroy(dlm_lockres_cache);
590}
591
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800592static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
593 struct dlm_lock_resource *res,
594 u8 owner)
595{
596 assert_spin_locked(&res->spinlock);
597
598 mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
599
600 if (owner == dlm->node_num)
601 atomic_inc(&dlm->local_resources);
602 else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
603 atomic_inc(&dlm->unknown_resources);
604 else
605 atomic_inc(&dlm->remote_resources);
606
607 res->owner = owner;
608}
609
610void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
611 struct dlm_lock_resource *res, u8 owner)
612{
613 assert_spin_locked(&res->spinlock);
614
615 if (owner == res->owner)
616 return;
617
618 if (res->owner == dlm->node_num)
619 atomic_dec(&dlm->local_resources);
620 else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
621 atomic_dec(&dlm->unknown_resources);
622 else
623 atomic_dec(&dlm->remote_resources);
624
625 dlm_set_lockres_owner(dlm, res, owner);
626}
627
628
629static void dlm_lockres_release(struct kref *kref)
630{
631 struct dlm_lock_resource *res;
632
633 res = container_of(kref, struct dlm_lock_resource, refs);
634
635 /* This should not happen -- all lockres' have a name
636 * associated with them at init time. */
637 BUG_ON(!res->lockname.name);
638
639 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
640 res->lockname.name);
641
Sunil Mushran29576f82008-03-10 15:16:21 -0700642 if (!list_empty(&res->tracking))
643 list_del_init(&res->tracking);
644 else {
645 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
646 res->lockname.len, res->lockname.name);
647 dlm_print_one_lock_resource(res);
648 }
649
Kurt Hackela7f90d82006-04-27 19:24:21 -0700650 if (!hlist_unhashed(&res->hash_node) ||
651 !list_empty(&res->granted) ||
652 !list_empty(&res->converting) ||
653 !list_empty(&res->blocked) ||
654 !list_empty(&res->dirty) ||
655 !list_empty(&res->recovering) ||
656 !list_empty(&res->purge)) {
657 mlog(ML_ERROR,
658 "Going to BUG for resource %.*s."
659 " We're on a list! [%c%c%c%c%c%c%c]\n",
660 res->lockname.len, res->lockname.name,
661 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
662 !list_empty(&res->granted) ? 'G' : ' ',
663 !list_empty(&res->converting) ? 'C' : ' ',
664 !list_empty(&res->blocked) ? 'B' : ' ',
665 !list_empty(&res->dirty) ? 'D' : ' ',
666 !list_empty(&res->recovering) ? 'R' : ' ',
667 !list_empty(&res->purge) ? 'P' : ' ');
668
669 dlm_print_one_lock_resource(res);
670 }
671
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800672 /* By the time we're ready to blow this guy away, we shouldn't
673 * be on any lists. */
Mark Fasheh81f20942006-02-28 17:31:22 -0800674 BUG_ON(!hlist_unhashed(&res->hash_node));
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800675 BUG_ON(!list_empty(&res->granted));
676 BUG_ON(!list_empty(&res->converting));
677 BUG_ON(!list_empty(&res->blocked));
678 BUG_ON(!list_empty(&res->dirty));
679 BUG_ON(!list_empty(&res->recovering));
680 BUG_ON(!list_empty(&res->purge));
681
Sunil Mushran724bdca2008-03-10 15:16:20 -0700682 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800683
Sunil Mushran724bdca2008-03-10 15:16:20 -0700684 kmem_cache_free(dlm_lockres_cache, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800685}
686
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800687void dlm_lockres_put(struct dlm_lock_resource *res)
688{
689 kref_put(&res->refs, dlm_lockres_release);
690}
691
692static void dlm_init_lockres(struct dlm_ctxt *dlm,
693 struct dlm_lock_resource *res,
694 const char *name, unsigned int namelen)
695{
696 char *qname;
697
698 /* If we memset here, we lose our reference to the kmalloc'd
699 * res->lockname.name, so be sure to init every field
700 * correctly! */
701
702 qname = (char *) res->lockname.name;
703 memcpy(qname, name, namelen);
704
705 res->lockname.len = namelen;
Mark Fasheha3d33292006-03-09 17:55:56 -0800706 res->lockname.hash = dlm_lockid_hash(name, namelen);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800707
708 init_waitqueue_head(&res->wq);
709 spin_lock_init(&res->spinlock);
Mark Fasheh81f20942006-02-28 17:31:22 -0800710 INIT_HLIST_NODE(&res->hash_node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800711 INIT_LIST_HEAD(&res->granted);
712 INIT_LIST_HEAD(&res->converting);
713 INIT_LIST_HEAD(&res->blocked);
714 INIT_LIST_HEAD(&res->dirty);
715 INIT_LIST_HEAD(&res->recovering);
716 INIT_LIST_HEAD(&res->purge);
Sunil Mushran29576f82008-03-10 15:16:21 -0700717 INIT_LIST_HEAD(&res->tracking);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800718 atomic_set(&res->asts_reserved, 0);
719 res->migration_pending = 0;
Kurt Hackelba2bf212006-12-01 14:47:20 -0800720 res->inflight_locks = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800721
722 kref_init(&res->refs);
723
724 /* just for consistency */
725 spin_lock(&res->spinlock);
726 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
727 spin_unlock(&res->spinlock);
728
729 res->state = DLM_LOCK_RES_IN_PROGRESS;
730
731 res->last_used = 0;
732
Sunil Mushran29576f82008-03-10 15:16:21 -0700733 list_add_tail(&res->tracking, &dlm->tracking_list);
734
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800735 memset(res->lvb, 0, DLM_LVB_LEN);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800736 memset(res->refmap, 0, sizeof(res->refmap));
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800737}
738
739struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
740 const char *name,
741 unsigned int namelen)
742{
Sunil Mushran724bdca2008-03-10 15:16:20 -0700743 struct dlm_lock_resource *res = NULL;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800744
Sunil Mushran724bdca2008-03-10 15:16:20 -0700745 res = (struct dlm_lock_resource *)
746 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800747 if (!res)
Sunil Mushran724bdca2008-03-10 15:16:20 -0700748 goto error;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800749
Sunil Mushran724bdca2008-03-10 15:16:20 -0700750 res->lockname.name = (char *)
751 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
752 if (!res->lockname.name)
753 goto error;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800754
755 dlm_init_lockres(dlm, res, name, namelen);
756 return res;
Sunil Mushran724bdca2008-03-10 15:16:20 -0700757
758error:
759 if (res && res->lockname.name)
760 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
761
762 if (res)
763 kmem_cache_free(dlm_lockres_cache, res);
764 return NULL;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800765}
766
Kurt Hackelba2bf212006-12-01 14:47:20 -0800767void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
768 struct dlm_lock_resource *res,
769 int new_lockres,
770 const char *file,
771 int line)
772{
773 if (!new_lockres)
774 assert_spin_locked(&res->spinlock);
775
776 if (!test_bit(dlm->node_num, res->refmap)) {
777 BUG_ON(res->inflight_locks != 0);
778 dlm_lockres_set_refmap_bit(dlm->node_num, res);
779 }
780 res->inflight_locks++;
781 mlog(0, "%s:%.*s: inflight++: now %u\n",
782 dlm->name, res->lockname.len, res->lockname.name,
783 res->inflight_locks);
784}
785
786void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
787 struct dlm_lock_resource *res,
788 const char *file,
789 int line)
790{
791 assert_spin_locked(&res->spinlock);
792
793 BUG_ON(res->inflight_locks == 0);
794 res->inflight_locks--;
795 mlog(0, "%s:%.*s: inflight--: now %u\n",
796 dlm->name, res->lockname.len, res->lockname.name,
797 res->inflight_locks);
798 if (res->inflight_locks == 0)
799 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
800 wake_up(&res->wq);
801}
802
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800803/*
804 * lookup a lock resource by name.
805 * may already exist in the hashtable.
806 * lockid is null terminated
807 *
808 * if not, allocate enough for the lockres and for
809 * the temporary structure used in doing the mastering.
810 *
811 * also, do a lookup in the dlm->master_list to see
812 * if another node has begun mastering the same lock.
813 * if so, there should be a block entry in there
814 * for this name, and we should *not* attempt to master
815 * the lock here. need to wait around for that node
816 * to assert_master (or die).
817 *
818 */
819struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
820 const char *lockid,
Mark Fasheh3384f3d2006-09-08 11:38:29 -0700821 int namelen,
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800822 int flags)
823{
824 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
825 struct dlm_master_list_entry *mle = NULL;
826 struct dlm_master_list_entry *alloc_mle = NULL;
827 int blocked = 0;
828 int ret, nodenum;
829 struct dlm_node_iter iter;
Mark Fasheh3384f3d2006-09-08 11:38:29 -0700830 unsigned int hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800831 int tries = 0;
Kurt Hackelc03872f2006-03-06 14:08:49 -0800832 int bit, wait_on_recovery = 0;
Kurt Hackelba2bf212006-12-01 14:47:20 -0800833 int drop_inflight_if_nonlocal = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800834
835 BUG_ON(!lockid);
836
Mark Fasheha3d33292006-03-09 17:55:56 -0800837 hash = dlm_lockid_hash(lockid, namelen);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800838
839 mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
840
841lookup:
842 spin_lock(&dlm->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800843 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800844 if (tmpres) {
Kurt Hackelba2bf212006-12-01 14:47:20 -0800845 int dropping_ref = 0;
846
847 spin_lock(&tmpres->spinlock);
848 if (tmpres->owner == dlm->node_num) {
849 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
850 dlm_lockres_grab_inflight_ref(dlm, tmpres);
851 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
852 dropping_ref = 1;
853 spin_unlock(&tmpres->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800854 spin_unlock(&dlm->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800855
856 /* wait until done messaging the master, drop our ref to allow
857 * the lockres to be purged, start over. */
858 if (dropping_ref) {
859 spin_lock(&tmpres->spinlock);
860 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
861 spin_unlock(&tmpres->spinlock);
862 dlm_lockres_put(tmpres);
863 tmpres = NULL;
864 goto lookup;
865 }
866
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800867 mlog(0, "found in hash!\n");
868 if (res)
869 dlm_lockres_put(res);
870 res = tmpres;
871 goto leave;
872 }
873
874 if (!res) {
875 spin_unlock(&dlm->spinlock);
876 mlog(0, "allocating a new resource\n");
877 /* nothing found and we need to allocate one. */
878 alloc_mle = (struct dlm_master_list_entry *)
Kurt Hackelad8100e2006-05-01 14:25:21 -0700879 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800880 if (!alloc_mle)
881 goto leave;
882 res = dlm_new_lockres(dlm, lockid, namelen);
883 if (!res)
884 goto leave;
885 goto lookup;
886 }
887
888 mlog(0, "no lockres found, allocated our own: %p\n", res);
889
890 if (flags & LKM_LOCAL) {
891 /* caller knows it's safe to assume it's not mastered elsewhere
892 * DONE! return right away */
893 spin_lock(&res->spinlock);
894 dlm_change_lockres_owner(dlm, res, dlm->node_num);
895 __dlm_insert_lockres(dlm, res);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800896 dlm_lockres_grab_inflight_ref(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800897 spin_unlock(&res->spinlock);
898 spin_unlock(&dlm->spinlock);
899 /* lockres still marked IN_PROGRESS */
900 goto wake_waiters;
901 }
902
903 /* check master list to see if another node has started mastering it */
904 spin_lock(&dlm->master_lock);
905
906 /* if we found a block, wait for lock to be mastered by another node */
907 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
908 if (blocked) {
Kurt Hackelba2bf212006-12-01 14:47:20 -0800909 int mig;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800910 if (mle->type == DLM_MLE_MASTER) {
911 mlog(ML_ERROR, "master entry for nonexistent lock!\n");
912 BUG();
Kurt Hackelba2bf212006-12-01 14:47:20 -0800913 }
914 mig = (mle->type == DLM_MLE_MIGRATION);
915 /* if there is a migration in progress, let the migration
916 * finish before continuing. we can wait for the absence
917 * of the MIGRATION mle: either the migrate finished or
918 * one of the nodes died and the mle was cleaned up.
919 * if there is a BLOCK here, but it already has a master
920 * set, we are too late. the master does not have a ref
921 * for us in the refmap. detach the mle and drop it.
922 * either way, go back to the top and start over. */
923 if (mig || mle->master != O2NM_MAX_NODES) {
924 BUG_ON(mig && mle->master == dlm->node_num);
925 /* we arrived too late. the master does not
926 * have a ref for us. retry. */
927 mlog(0, "%s:%.*s: late on %s\n",
928 dlm->name, namelen, lockid,
929 mig ? "MIGRATION" : "BLOCK");
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800930 spin_unlock(&dlm->master_lock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800931 spin_unlock(&dlm->spinlock);
932
933 /* master is known, detach */
Kurt Hackelba2bf212006-12-01 14:47:20 -0800934 if (!mig)
935 dlm_mle_detach_hb_events(dlm, mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800936 dlm_put_mle(mle);
937 mle = NULL;
Kurt Hackelba2bf212006-12-01 14:47:20 -0800938 /* this is lame, but we cant wait on either
939 * the mle or lockres waitqueue here */
940 if (mig)
941 msleep(100);
942 goto lookup;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800943 }
944 } else {
945 /* go ahead and try to master lock on this node */
946 mle = alloc_mle;
947 /* make sure this does not get freed below */
948 alloc_mle = NULL;
949 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
950 set_bit(dlm->node_num, mle->maybe_map);
951 list_add(&mle->list, &dlm->master_list);
Kurt Hackelc03872f2006-03-06 14:08:49 -0800952
953 /* still holding the dlm spinlock, check the recovery map
954 * to see if there are any nodes that still need to be
955 * considered. these will not appear in the mle nodemap
956 * but they might own this lockres. wait on them. */
957 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
958 if (bit < O2NM_MAX_NODES) {
Joe Perches27592362007-11-19 17:53:34 -0800959 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
Kurt Hackelc03872f2006-03-06 14:08:49 -0800960 "recover before lock mastery can begin\n",
961 dlm->name, namelen, (char *)lockid, bit);
962 wait_on_recovery = 1;
963 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800964 }
965
966 /* at this point there is either a DLM_MLE_BLOCK or a
967 * DLM_MLE_MASTER on the master list, so it's safe to add the
968 * lockres to the hashtable. anyone who finds the lock will
969 * still have to wait on the IN_PROGRESS. */
970
971 /* finally add the lockres to its hash bucket */
972 __dlm_insert_lockres(dlm, res);
Kurt Hackelba2bf212006-12-01 14:47:20 -0800973 /* since this lockres is new it doesnt not require the spinlock */
974 dlm_lockres_grab_inflight_ref_new(dlm, res);
975
976 /* if this node does not become the master make sure to drop
977 * this inflight reference below */
978 drop_inflight_if_nonlocal = 1;
979
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800980 /* get an extra ref on the mle in case this is a BLOCK
981 * if so, the creator of the BLOCK may try to put the last
982 * ref at this time in the assert master handler, so we
983 * need an extra one to keep from a bad ptr deref. */
Kurt Hackela2bf0472006-04-27 18:51:26 -0700984 dlm_get_mle_inuse(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800985 spin_unlock(&dlm->master_lock);
986 spin_unlock(&dlm->spinlock);
987
Kurt Hackele7e69eb2006-05-01 11:49:52 -0700988redo_request:
Kurt Hackelc03872f2006-03-06 14:08:49 -0800989 while (wait_on_recovery) {
990 /* any cluster changes that occurred after dropping the
991 * dlm spinlock would be detectable be a change on the mle,
992 * so we only need to clear out the recovery map once. */
993 if (dlm_is_recovery_lock(lockid, namelen)) {
994 mlog(ML_NOTICE, "%s: recovery map is not empty, but "
995 "must master $RECOVERY lock now\n", dlm->name);
996 if (!dlm_pre_master_reco_lockres(dlm, res))
997 wait_on_recovery = 0;
998 else {
999 mlog(0, "%s: waiting 500ms for heartbeat state "
1000 "change\n", dlm->name);
1001 msleep(500);
1002 }
1003 continue;
1004 }
1005
1006 dlm_kick_recovery_thread(dlm);
Kurt Hackelaa087b82006-05-01 12:02:07 -07001007 msleep(1000);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001008 dlm_wait_for_recovery(dlm);
1009
1010 spin_lock(&dlm->spinlock);
1011 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
1012 if (bit < O2NM_MAX_NODES) {
Joe Perches27592362007-11-19 17:53:34 -08001013 mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
Kurt Hackelc03872f2006-03-06 14:08:49 -08001014 "recover before lock mastery can begin\n",
1015 dlm->name, namelen, (char *)lockid, bit);
1016 wait_on_recovery = 1;
1017 } else
1018 wait_on_recovery = 0;
1019 spin_unlock(&dlm->spinlock);
Kurt Hackelb7084ab2006-05-01 13:54:07 -07001020
1021 if (wait_on_recovery)
1022 dlm_wait_for_node_recovery(dlm, bit, 10000);
Kurt Hackelc03872f2006-03-06 14:08:49 -08001023 }
1024
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001025 /* must wait for lock to be mastered elsewhere */
1026 if (blocked)
1027 goto wait;
1028
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001029 ret = -EINVAL;
1030 dlm_node_iter_init(mle->vote_map, &iter);
1031 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
Kurt Hackelba2bf212006-12-01 14:47:20 -08001032 ret = dlm_do_master_request(res, mle, nodenum);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001033 if (ret < 0)
1034 mlog_errno(ret);
1035 if (mle->master != O2NM_MAX_NODES) {
1036 /* found a master ! */
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001037 if (mle->master <= nodenum)
1038 break;
1039 /* if our master request has not reached the master
1040 * yet, keep going until it does. this is how the
1041 * master will know that asserts are needed back to
1042 * the lower nodes. */
1043 mlog(0, "%s:%.*s: requests only up to %u but master "
1044 "is %u, keep going\n", dlm->name, namelen,
1045 lockid, nodenum, mle->master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001046 }
1047 }
1048
1049wait:
1050 /* keep going until the response map includes all nodes */
1051 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
1052 if (ret < 0) {
Kurt Hackele7e69eb2006-05-01 11:49:52 -07001053 wait_on_recovery = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001054 mlog(0, "%s:%.*s: node map changed, redo the "
1055 "master request now, blocked=%d\n",
1056 dlm->name, res->lockname.len,
1057 res->lockname.name, blocked);
1058 if (++tries > 20) {
1059 mlog(ML_ERROR, "%s:%.*s: spinning on "
1060 "dlm_wait_for_lock_mastery, blocked=%d\n",
1061 dlm->name, res->lockname.len,
1062 res->lockname.name, blocked);
1063 dlm_print_one_lock_resource(res);
Mark Fasheh8a9343f2006-05-01 14:55:10 -07001064 dlm_print_one_mle(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001065 tries = 0;
1066 }
1067 goto redo_request;
1068 }
1069
1070 mlog(0, "lockres mastered by %u\n", res->owner);
1071 /* make sure we never continue without this */
1072 BUG_ON(res->owner == O2NM_MAX_NODES);
1073
1074 /* master is known, detach if not already detached */
1075 dlm_mle_detach_hb_events(dlm, mle);
1076 dlm_put_mle(mle);
1077 /* put the extra ref */
Kurt Hackela2bf0472006-04-27 18:51:26 -07001078 dlm_put_mle_inuse(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001079
1080wake_waiters:
1081 spin_lock(&res->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -08001082 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
1083 dlm_lockres_drop_inflight_ref(dlm, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001084 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1085 spin_unlock(&res->spinlock);
1086 wake_up(&res->wq);
1087
1088leave:
1089 /* need to free the unused mle */
1090 if (alloc_mle)
1091 kmem_cache_free(dlm_mle_cache, alloc_mle);
1092
1093 return res;
1094}
1095
1096
1097#define DLM_MASTERY_TIMEOUT_MS 5000
1098
1099static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1100 struct dlm_lock_resource *res,
1101 struct dlm_master_list_entry *mle,
1102 int *blocked)
1103{
1104 u8 m;
1105 int ret, bit;
1106 int map_changed, voting_done;
1107 int assert, sleep;
1108
1109recheck:
1110 ret = 0;
1111 assert = 0;
1112
1113 /* check if another node has already become the owner */
1114 spin_lock(&res->spinlock);
1115 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001116 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1117 res->lockname.len, res->lockname.name, res->owner);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001118 spin_unlock(&res->spinlock);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001119 /* this will cause the master to re-assert across
1120 * the whole cluster, freeing up mles */
Kurt Hackel588e0092006-05-01 11:22:06 -07001121 if (res->owner != dlm->node_num) {
Kurt Hackelba2bf212006-12-01 14:47:20 -08001122 ret = dlm_do_master_request(res, mle, res->owner);
Kurt Hackel588e0092006-05-01 11:22:06 -07001123 if (ret < 0) {
1124 /* give recovery a chance to run */
1125 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1126 msleep(500);
1127 goto recheck;
1128 }
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001129 }
1130 ret = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001131 goto leave;
1132 }
1133 spin_unlock(&res->spinlock);
1134
1135 spin_lock(&mle->spinlock);
1136 m = mle->master;
1137 map_changed = (memcmp(mle->vote_map, mle->node_map,
1138 sizeof(mle->vote_map)) != 0);
1139 voting_done = (memcmp(mle->vote_map, mle->response_map,
1140 sizeof(mle->vote_map)) == 0);
1141
1142 /* restart if we hit any errors */
1143 if (map_changed) {
1144 int b;
1145 mlog(0, "%s: %.*s: node map changed, restarting\n",
1146 dlm->name, res->lockname.len, res->lockname.name);
1147 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1148 b = (mle->type == DLM_MLE_BLOCK);
1149 if ((*blocked && !b) || (!*blocked && b)) {
1150 mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
1151 dlm->name, res->lockname.len, res->lockname.name,
1152 *blocked, b);
1153 *blocked = b;
1154 }
1155 spin_unlock(&mle->spinlock);
1156 if (ret < 0) {
1157 mlog_errno(ret);
1158 goto leave;
1159 }
1160 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1161 "rechecking now\n", dlm->name, res->lockname.len,
1162 res->lockname.name);
1163 goto recheck;
Kurt Hackelaa852352006-04-27 19:04:49 -07001164 } else {
1165 if (!voting_done) {
1166 mlog(0, "map not changed and voting not done "
1167 "for %s:%.*s\n", dlm->name, res->lockname.len,
1168 res->lockname.name);
1169 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001170 }
1171
1172 if (m != O2NM_MAX_NODES) {
1173 /* another node has done an assert!
1174 * all done! */
1175 sleep = 0;
1176 } else {
1177 sleep = 1;
1178 /* have all nodes responded? */
1179 if (voting_done && !*blocked) {
1180 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1181 if (dlm->node_num <= bit) {
1182 /* my node number is lowest.
1183 * now tell other nodes that I am
1184 * mastering this. */
1185 mle->master = dlm->node_num;
Kurt Hackelba2bf212006-12-01 14:47:20 -08001186 /* ref was grabbed in get_lock_resource
1187 * will be dropped in dlmlock_master */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001188 assert = 1;
1189 sleep = 0;
1190 }
1191 /* if voting is done, but we have not received
1192 * an assert master yet, we must sleep */
1193 }
1194 }
1195
1196 spin_unlock(&mle->spinlock);
1197
1198 /* sleep if we haven't finished voting yet */
1199 if (sleep) {
1200 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1201
1202 /*
1203 if (atomic_read(&mle->mle_refs.refcount) < 2)
1204 mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1205 atomic_read(&mle->mle_refs.refcount),
1206 res->lockname.len, res->lockname.name);
1207 */
1208 atomic_set(&mle->woken, 0);
1209 (void)wait_event_timeout(mle->wq,
1210 (atomic_read(&mle->woken) == 1),
1211 timeo);
1212 if (res->owner == O2NM_MAX_NODES) {
Kurt Hackelba2bf212006-12-01 14:47:20 -08001213 mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1214 res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001215 goto recheck;
1216 }
1217 mlog(0, "done waiting, master is %u\n", res->owner);
1218 ret = 0;
1219 goto leave;
1220 }
1221
1222 ret = 0; /* done */
1223 if (assert) {
1224 m = dlm->node_num;
1225 mlog(0, "about to master %.*s here, this=%u\n",
1226 res->lockname.len, res->lockname.name, m);
Kurt Hackelba2bf212006-12-01 14:47:20 -08001227 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001228 if (ret) {
1229 /* This is a failure in the network path,
1230 * not in the response to the assert_master
1231 * (any nonzero response is a BUG on this node).
1232 * Most likely a socket just got disconnected
1233 * due to node death. */
1234 mlog_errno(ret);
1235 }
1236 /* no longer need to restart lock mastery.
1237 * all living nodes have been contacted. */
1238 ret = 0;
1239 }
1240
1241 /* set the lockres owner */
1242 spin_lock(&res->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -08001243 /* mastery reference obtained either during
1244 * assert_master_handler or in get_lock_resource */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001245 dlm_change_lockres_owner(dlm, res, m);
1246 spin_unlock(&res->spinlock);
1247
1248leave:
1249 return ret;
1250}
1251
1252struct dlm_bitmap_diff_iter
1253{
1254 int curnode;
1255 unsigned long *orig_bm;
1256 unsigned long *cur_bm;
1257 unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1258};
1259
1260enum dlm_node_state_change
1261{
1262 NODE_DOWN = -1,
1263 NODE_NO_CHANGE = 0,
1264 NODE_UP
1265};
1266
1267static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1268 unsigned long *orig_bm,
1269 unsigned long *cur_bm)
1270{
1271 unsigned long p1, p2;
1272 int i;
1273
1274 iter->curnode = -1;
1275 iter->orig_bm = orig_bm;
1276 iter->cur_bm = cur_bm;
1277
1278 for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1279 p1 = *(iter->orig_bm + i);
1280 p2 = *(iter->cur_bm + i);
1281 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1282 }
1283}
1284
1285static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1286 enum dlm_node_state_change *state)
1287{
1288 int bit;
1289
1290 if (iter->curnode >= O2NM_MAX_NODES)
1291 return -ENOENT;
1292
1293 bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1294 iter->curnode+1);
1295 if (bit >= O2NM_MAX_NODES) {
1296 iter->curnode = O2NM_MAX_NODES;
1297 return -ENOENT;
1298 }
1299
1300 /* if it was there in the original then this node died */
1301 if (test_bit(bit, iter->orig_bm))
1302 *state = NODE_DOWN;
1303 else
1304 *state = NODE_UP;
1305
1306 iter->curnode = bit;
1307 return bit;
1308}
1309
1310
1311static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1312 struct dlm_lock_resource *res,
1313 struct dlm_master_list_entry *mle,
1314 int blocked)
1315{
1316 struct dlm_bitmap_diff_iter bdi;
1317 enum dlm_node_state_change sc;
1318 int node;
1319 int ret = 0;
1320
1321 mlog(0, "something happened such that the "
1322 "master process may need to be restarted!\n");
1323
1324 assert_spin_locked(&mle->spinlock);
1325
1326 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1327 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1328 while (node >= 0) {
1329 if (sc == NODE_UP) {
Kurt Hackele2faea42006-01-12 14:24:55 -08001330 /* a node came up. clear any old vote from
1331 * the response map and set it in the vote map
1332 * then restart the mastery. */
1333 mlog(ML_NOTICE, "node %d up while restarting\n", node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001334
1335 /* redo the master request, but only for the new node */
1336 mlog(0, "sending request to new node\n");
1337 clear_bit(node, mle->response_map);
1338 set_bit(node, mle->vote_map);
1339 } else {
1340 mlog(ML_ERROR, "node down! %d\n", node);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001341 if (blocked) {
1342 int lowest = find_next_bit(mle->maybe_map,
1343 O2NM_MAX_NODES, 0);
1344
1345 /* act like it was never there */
1346 clear_bit(node, mle->maybe_map);
1347
Kurt Hackele7e69eb2006-05-01 11:49:52 -07001348 if (node == lowest) {
1349 mlog(0, "expected master %u died"
1350 " while this node was blocked "
1351 "waiting on it!\n", node);
1352 lowest = find_next_bit(mle->maybe_map,
1353 O2NM_MAX_NODES,
1354 lowest+1);
1355 if (lowest < O2NM_MAX_NODES) {
1356 mlog(0, "%s:%.*s:still "
1357 "blocked. waiting on %u "
1358 "now\n", dlm->name,
1359 res->lockname.len,
1360 res->lockname.name,
1361 lowest);
1362 } else {
1363 /* mle is an MLE_BLOCK, but
1364 * there is now nothing left to
1365 * block on. we need to return
1366 * all the way back out and try
1367 * again with an MLE_MASTER.
1368 * dlm_do_local_recovery_cleanup
1369 * has already run, so the mle
1370 * refcount is ok */
1371 mlog(0, "%s:%.*s: no "
1372 "longer blocking. try to "
1373 "master this here\n",
1374 dlm->name,
1375 res->lockname.len,
1376 res->lockname.name);
1377 mle->type = DLM_MLE_MASTER;
1378 mle->u.res = res;
1379 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001380 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001381 }
1382
Kurt Hackele7e69eb2006-05-01 11:49:52 -07001383 /* now blank out everything, as if we had never
1384 * contacted anyone */
1385 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1386 memset(mle->response_map, 0, sizeof(mle->response_map));
1387 /* reset the vote_map to the current node_map */
1388 memcpy(mle->vote_map, mle->node_map,
1389 sizeof(mle->node_map));
1390 /* put myself into the maybe map */
1391 if (mle->type != DLM_MLE_BLOCK)
1392 set_bit(dlm->node_num, mle->maybe_map);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001393 }
1394 ret = -EAGAIN;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001395 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1396 }
1397 return ret;
1398}
1399
1400
1401/*
1402 * DLM_MASTER_REQUEST_MSG
1403 *
1404 * returns: 0 on success,
1405 * -errno on a network error
1406 *
1407 * on error, the caller should assume the target node is "dead"
1408 *
1409 */
1410
Kurt Hackelba2bf212006-12-01 14:47:20 -08001411static int dlm_do_master_request(struct dlm_lock_resource *res,
1412 struct dlm_master_list_entry *mle, int to)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001413{
1414 struct dlm_ctxt *dlm = mle->dlm;
1415 struct dlm_master_request request;
1416 int ret, response=0, resend;
1417
1418 memset(&request, 0, sizeof(request));
1419 request.node_idx = dlm->node_num;
1420
1421 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1422
1423 if (mle->type != DLM_MLE_MASTER) {
1424 request.namelen = mle->u.name.len;
1425 memcpy(request.name, mle->u.name.name, request.namelen);
1426 } else {
1427 request.namelen = mle->u.res->lockname.len;
1428 memcpy(request.name, mle->u.res->lockname.name,
1429 request.namelen);
1430 }
1431
1432again:
1433 ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1434 sizeof(request), to, &response);
1435 if (ret < 0) {
1436 if (ret == -ESRCH) {
1437 /* should never happen */
1438 mlog(ML_ERROR, "TCP stack not ready!\n");
1439 BUG();
1440 } else if (ret == -EINVAL) {
1441 mlog(ML_ERROR, "bad args passed to o2net!\n");
1442 BUG();
1443 } else if (ret == -ENOMEM) {
1444 mlog(ML_ERROR, "out of memory while trying to send "
1445 "network message! retrying\n");
1446 /* this is totally crude */
1447 msleep(50);
1448 goto again;
1449 } else if (!dlm_is_host_down(ret)) {
1450 /* not a network error. bad. */
1451 mlog_errno(ret);
1452 mlog(ML_ERROR, "unhandled error!");
1453 BUG();
1454 }
1455 /* all other errors should be network errors,
1456 * and likely indicate node death */
1457 mlog(ML_ERROR, "link to %d went down!\n", to);
1458 goto out;
1459 }
1460
1461 ret = 0;
1462 resend = 0;
1463 spin_lock(&mle->spinlock);
1464 switch (response) {
1465 case DLM_MASTER_RESP_YES:
1466 set_bit(to, mle->response_map);
1467 mlog(0, "node %u is the master, response=YES\n", to);
Kurt Hackelba2bf212006-12-01 14:47:20 -08001468 mlog(0, "%s:%.*s: master node %u now knows I have a "
1469 "reference\n", dlm->name, res->lockname.len,
1470 res->lockname.name, to);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001471 mle->master = to;
1472 break;
1473 case DLM_MASTER_RESP_NO:
1474 mlog(0, "node %u not master, response=NO\n", to);
1475 set_bit(to, mle->response_map);
1476 break;
1477 case DLM_MASTER_RESP_MAYBE:
1478 mlog(0, "node %u not master, response=MAYBE\n", to);
1479 set_bit(to, mle->response_map);
1480 set_bit(to, mle->maybe_map);
1481 break;
1482 case DLM_MASTER_RESP_ERROR:
1483 mlog(0, "node %u hit an error, resending\n", to);
1484 resend = 1;
1485 response = 0;
1486 break;
1487 default:
1488 mlog(ML_ERROR, "bad response! %u\n", response);
1489 BUG();
1490 }
1491 spin_unlock(&mle->spinlock);
1492 if (resend) {
1493 /* this is also totally crude */
1494 msleep(50);
1495 goto again;
1496 }
1497
1498out:
1499 return ret;
1500}
1501
1502/*
1503 * locks that can be taken here:
1504 * dlm->spinlock
1505 * res->spinlock
1506 * mle->spinlock
1507 * dlm->master_list
1508 *
1509 * if possible, TRIM THIS DOWN!!!
1510 */
Kurt Hackeld74c9802007-01-17 17:04:25 -08001511int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1512 void **ret_data)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001513{
1514 u8 response = DLM_MASTER_RESP_MAYBE;
1515 struct dlm_ctxt *dlm = data;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001516 struct dlm_lock_resource *res = NULL;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001517 struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1518 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1519 char *name;
Mark Fasheha3d33292006-03-09 17:55:56 -08001520 unsigned int namelen, hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001521 int found, ret;
1522 int set_maybe;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001523 int dispatch_assert = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001524
1525 if (!dlm_grab(dlm))
1526 return DLM_MASTER_RESP_NO;
1527
1528 if (!dlm_domain_fully_joined(dlm)) {
1529 response = DLM_MASTER_RESP_NO;
1530 goto send_response;
1531 }
1532
1533 name = request->name;
1534 namelen = request->namelen;
Mark Fasheha3d33292006-03-09 17:55:56 -08001535 hash = dlm_lockid_hash(name, namelen);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001536
1537 if (namelen > DLM_LOCKID_NAME_MAX) {
1538 response = DLM_IVBUFLEN;
1539 goto send_response;
1540 }
1541
1542way_up_top:
1543 spin_lock(&dlm->spinlock);
Mark Fasheha3d33292006-03-09 17:55:56 -08001544 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001545 if (res) {
1546 spin_unlock(&dlm->spinlock);
1547
1548 /* take care of the easy cases up front */
1549 spin_lock(&res->spinlock);
Kurt Hackel1cd04db2007-01-17 14:53:37 -08001550 if (res->state & (DLM_LOCK_RES_RECOVERING|
1551 DLM_LOCK_RES_MIGRATING)) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001552 spin_unlock(&res->spinlock);
1553 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
Kurt Hackel1cd04db2007-01-17 14:53:37 -08001554 "being recovered/migrated\n");
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001555 response = DLM_MASTER_RESP_ERROR;
1556 if (mle)
1557 kmem_cache_free(dlm_mle_cache, mle);
1558 goto send_response;
1559 }
1560
1561 if (res->owner == dlm->node_num) {
Kurt Hackelba2bf212006-12-01 14:47:20 -08001562 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1563 dlm->name, namelen, name, request->node_idx);
1564 dlm_lockres_set_refmap_bit(request->node_idx, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001565 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001566 response = DLM_MASTER_RESP_YES;
1567 if (mle)
1568 kmem_cache_free(dlm_mle_cache, mle);
1569
1570 /* this node is the owner.
1571 * there is some extra work that needs to
1572 * happen now. the requesting node has
1573 * caused all nodes up to this one to
1574 * create mles. this node now needs to
1575 * go back and clean those up. */
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001576 dispatch_assert = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001577 goto send_response;
1578 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1579 spin_unlock(&res->spinlock);
1580 // mlog(0, "node %u is the master\n", res->owner);
1581 response = DLM_MASTER_RESP_NO;
1582 if (mle)
1583 kmem_cache_free(dlm_mle_cache, mle);
1584 goto send_response;
1585 }
1586
1587 /* ok, there is no owner. either this node is
1588 * being blocked, or it is actively trying to
1589 * master this lock. */
1590 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1591 mlog(ML_ERROR, "lock with no owner should be "
1592 "in-progress!\n");
1593 BUG();
1594 }
1595
1596 // mlog(0, "lockres is in progress...\n");
1597 spin_lock(&dlm->master_lock);
1598 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1599 if (!found) {
1600 mlog(ML_ERROR, "no mle found for this lock!\n");
1601 BUG();
1602 }
1603 set_maybe = 1;
1604 spin_lock(&tmpmle->spinlock);
1605 if (tmpmle->type == DLM_MLE_BLOCK) {
1606 // mlog(0, "this node is waiting for "
1607 // "lockres to be mastered\n");
1608 response = DLM_MASTER_RESP_NO;
1609 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1610 mlog(0, "node %u is master, but trying to migrate to "
1611 "node %u.\n", tmpmle->master, tmpmle->new_master);
1612 if (tmpmle->master == dlm->node_num) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001613 mlog(ML_ERROR, "no owner on lockres, but this "
1614 "node is trying to migrate it to %u?!\n",
1615 tmpmle->new_master);
1616 BUG();
1617 } else {
1618 /* the real master can respond on its own */
1619 response = DLM_MASTER_RESP_NO;
1620 }
1621 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1622 set_maybe = 0;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001623 if (tmpmle->master == dlm->node_num) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001624 response = DLM_MASTER_RESP_YES;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001625 /* this node will be the owner.
1626 * go back and clean the mles on any
1627 * other nodes */
1628 dispatch_assert = 1;
Kurt Hackelba2bf212006-12-01 14:47:20 -08001629 dlm_lockres_set_refmap_bit(request->node_idx, res);
1630 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1631 dlm->name, namelen, name,
1632 request->node_idx);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001633 } else
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001634 response = DLM_MASTER_RESP_NO;
1635 } else {
1636 // mlog(0, "this node is attempting to "
1637 // "master lockres\n");
1638 response = DLM_MASTER_RESP_MAYBE;
1639 }
1640 if (set_maybe)
1641 set_bit(request->node_idx, tmpmle->maybe_map);
1642 spin_unlock(&tmpmle->spinlock);
1643
1644 spin_unlock(&dlm->master_lock);
1645 spin_unlock(&res->spinlock);
1646
1647 /* keep the mle attached to heartbeat events */
1648 dlm_put_mle(tmpmle);
1649 if (mle)
1650 kmem_cache_free(dlm_mle_cache, mle);
1651 goto send_response;
1652 }
1653
1654 /*
1655 * lockres doesn't exist on this node
1656 * if there is an MLE_BLOCK, return NO
1657 * if there is an MLE_MASTER, return MAYBE
1658 * otherwise, add an MLE_BLOCK, return NO
1659 */
1660 spin_lock(&dlm->master_lock);
1661 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1662 if (!found) {
1663 /* this lockid has never been seen on this node yet */
1664 // mlog(0, "no mle found\n");
1665 if (!mle) {
1666 spin_unlock(&dlm->master_lock);
1667 spin_unlock(&dlm->spinlock);
1668
1669 mle = (struct dlm_master_list_entry *)
Kurt Hackelad8100e2006-05-01 14:25:21 -07001670 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001671 if (!mle) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001672 response = DLM_MASTER_RESP_ERROR;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001673 mlog_errno(-ENOMEM);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001674 goto send_response;
1675 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001676 goto way_up_top;
1677 }
1678
1679 // mlog(0, "this is second time thru, already allocated, "
1680 // "add the block.\n");
Kurt Hackel41b8c8a2006-04-27 19:00:26 -07001681 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001682 set_bit(request->node_idx, mle->maybe_map);
1683 list_add(&mle->list, &dlm->master_list);
1684 response = DLM_MASTER_RESP_NO;
1685 } else {
1686 // mlog(0, "mle was found\n");
1687 set_maybe = 1;
1688 spin_lock(&tmpmle->spinlock);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001689 if (tmpmle->master == dlm->node_num) {
1690 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1691 BUG();
1692 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001693 if (tmpmle->type == DLM_MLE_BLOCK)
1694 response = DLM_MASTER_RESP_NO;
1695 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1696 mlog(0, "migration mle was found (%u->%u)\n",
1697 tmpmle->master, tmpmle->new_master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001698 /* real master can respond on its own */
1699 response = DLM_MASTER_RESP_NO;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001700 } else
1701 response = DLM_MASTER_RESP_MAYBE;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001702 if (set_maybe)
1703 set_bit(request->node_idx, tmpmle->maybe_map);
1704 spin_unlock(&tmpmle->spinlock);
1705 }
1706 spin_unlock(&dlm->master_lock);
1707 spin_unlock(&dlm->spinlock);
1708
1709 if (found) {
1710 /* keep the mle attached to heartbeat events */
1711 dlm_put_mle(tmpmle);
1712 }
1713send_response:
Sunil Mushranb31cfc02008-03-01 14:04:22 -08001714 /*
1715 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1716 * The reference is released by dlm_assert_master_worker() under
1717 * the call to dlm_dispatch_assert_master(). If
1718 * dlm_assert_master_worker() isn't called, we drop it here.
1719 */
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001720 if (dispatch_assert) {
1721 if (response != DLM_MASTER_RESP_YES)
1722 mlog(ML_ERROR, "invalid response %d\n", response);
1723 if (!res) {
1724 mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1725 BUG();
1726 }
1727 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1728 dlm->node_num, res->lockname.len, res->lockname.name);
1729 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1730 DLM_ASSERT_MASTER_MLE_CLEANUP);
1731 if (ret < 0) {
1732 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1733 response = DLM_MASTER_RESP_ERROR;
Sunil Mushranb31cfc02008-03-01 14:04:22 -08001734 dlm_lockres_put(res);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001735 }
Sunil Mushranb31cfc02008-03-01 14:04:22 -08001736 } else {
1737 if (res)
1738 dlm_lockres_put(res);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001739 }
1740
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001741 dlm_put(dlm);
1742 return response;
1743}
1744
1745/*
1746 * DLM_ASSERT_MASTER_MSG
1747 */
1748
1749
1750/*
1751 * NOTE: this can be used for debugging
1752 * can periodically run all locks owned by this node
1753 * and re-assert across the cluster...
1754 */
Adrian Bunk05488bb2008-02-17 10:20:41 +02001755static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1756 struct dlm_lock_resource *res,
1757 void *nodemap, u32 flags)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001758{
1759 struct dlm_assert_master assert;
1760 int to, tmpret;
1761 struct dlm_node_iter iter;
1762 int ret = 0;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001763 int reassert;
Kurt Hackelba2bf212006-12-01 14:47:20 -08001764 const char *lockname = res->lockname.name;
1765 unsigned int namelen = res->lockname.len;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001766
1767 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
Sunil Mushranf3f85462007-01-29 15:19:16 -08001768
1769 spin_lock(&res->spinlock);
1770 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1771 spin_unlock(&res->spinlock);
1772
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001773again:
1774 reassert = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001775
1776 /* note that if this nodemap is empty, it returns 0 */
1777 dlm_node_iter_init(nodemap, &iter);
1778 while ((to = dlm_node_iter_next(&iter)) >= 0) {
1779 int r = 0;
Kurt Hackela9ee4c82006-04-27 19:26:15 -07001780 struct dlm_master_list_entry *mle = NULL;
1781
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001782 mlog(0, "sending assert master to %d (%.*s)\n", to,
1783 namelen, lockname);
1784 memset(&assert, 0, sizeof(assert));
1785 assert.node_idx = dlm->node_num;
1786 assert.namelen = namelen;
1787 memcpy(assert.name, lockname, namelen);
1788 assert.flags = cpu_to_be32(flags);
1789
1790 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1791 &assert, sizeof(assert), to, &r);
1792 if (tmpret < 0) {
Kurt Hackel3b3b84a2006-05-01 14:31:37 -07001793 mlog(0, "assert_master returned %d!\n", tmpret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001794 if (!dlm_is_host_down(tmpret)) {
Kurt Hackel3b3b84a2006-05-01 14:31:37 -07001795 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001796 BUG();
1797 }
1798 /* a node died. finish out the rest of the nodes. */
Kurt Hackel3b3b84a2006-05-01 14:31:37 -07001799 mlog(0, "link to %d went down!\n", to);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001800 /* any nonzero status return will do */
1801 ret = tmpret;
Kurt Hackelba2bf212006-12-01 14:47:20 -08001802 r = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001803 } else if (r < 0) {
1804 /* ok, something horribly messed. kill thyself. */
1805 mlog(ML_ERROR,"during assert master of %.*s to %u, "
1806 "got %d.\n", namelen, lockname, to, r);
Kurt Hackela9ee4c82006-04-27 19:26:15 -07001807 spin_lock(&dlm->spinlock);
1808 spin_lock(&dlm->master_lock);
1809 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1810 namelen)) {
1811 dlm_print_one_mle(mle);
1812 __dlm_put_mle(mle);
1813 }
1814 spin_unlock(&dlm->master_lock);
1815 spin_unlock(&dlm->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001816 BUG();
Kurt Hackelba2bf212006-12-01 14:47:20 -08001817 }
1818
1819 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1820 !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1821 mlog(ML_ERROR, "%.*s: very strange, "
1822 "master MLE but no lockres on %u\n",
1823 namelen, lockname, to);
1824 }
1825
1826 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001827 mlog(0, "%.*s: node %u create mles on other "
1828 "nodes and requests a re-assert\n",
1829 namelen, lockname, to);
1830 reassert = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001831 }
Kurt Hackelba2bf212006-12-01 14:47:20 -08001832 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1833 mlog(0, "%.*s: node %u has a reference to this "
1834 "lockres, set the bit in the refmap\n",
1835 namelen, lockname, to);
1836 spin_lock(&res->spinlock);
1837 dlm_lockres_set_refmap_bit(to, res);
1838 spin_unlock(&res->spinlock);
1839 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001840 }
1841
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001842 if (reassert)
1843 goto again;
1844
Sunil Mushranf3f85462007-01-29 15:19:16 -08001845 spin_lock(&res->spinlock);
1846 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1847 spin_unlock(&res->spinlock);
1848 wake_up(&res->wq);
1849
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001850 return ret;
1851}
1852
1853/*
1854 * locks that can be taken here:
1855 * dlm->spinlock
1856 * res->spinlock
1857 * mle->spinlock
1858 * dlm->master_list
1859 *
1860 * if possible, TRIM THIS DOWN!!!
1861 */
Kurt Hackeld74c9802007-01-17 17:04:25 -08001862int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1863 void **ret_data)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001864{
1865 struct dlm_ctxt *dlm = data;
1866 struct dlm_master_list_entry *mle = NULL;
1867 struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1868 struct dlm_lock_resource *res = NULL;
1869 char *name;
Mark Fasheha3d33292006-03-09 17:55:56 -08001870 unsigned int namelen, hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001871 u32 flags;
Kurt Hackelba2bf212006-12-01 14:47:20 -08001872 int master_request = 0, have_lockres_ref = 0;
Kurt Hackel9c6510a2006-03-02 18:09:26 -08001873 int ret = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001874
1875 if (!dlm_grab(dlm))
1876 return 0;
1877
1878 name = assert->name;
1879 namelen = assert->namelen;
Mark Fasheha3d33292006-03-09 17:55:56 -08001880 hash = dlm_lockid_hash(name, namelen);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001881 flags = be32_to_cpu(assert->flags);
1882
1883 if (namelen > DLM_LOCKID_NAME_MAX) {
1884 mlog(ML_ERROR, "Invalid name length!");
1885 goto done;
1886 }
1887
1888 spin_lock(&dlm->spinlock);
1889
1890 if (flags)
1891 mlog(0, "assert_master with flags: %u\n", flags);
1892
1893 /* find the MLE */
1894 spin_lock(&dlm->master_lock);
1895 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1896 /* not an error, could be master just re-asserting */
1897 mlog(0, "just got an assert_master from %u, but no "
1898 "MLE for it! (%.*s)\n", assert->node_idx,
1899 namelen, name);
1900 } else {
1901 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1902 if (bit >= O2NM_MAX_NODES) {
1903 /* not necessarily an error, though less likely.
1904 * could be master just re-asserting. */
Kurt Hackelaa852352006-04-27 19:04:49 -07001905 mlog(0, "no bits set in the maybe_map, but %u "
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001906 "is asserting! (%.*s)\n", assert->node_idx,
1907 namelen, name);
1908 } else if (bit != assert->node_idx) {
1909 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1910 mlog(0, "master %u was found, %u should "
1911 "back off\n", assert->node_idx, bit);
1912 } else {
1913 /* with the fix for bug 569, a higher node
1914 * number winning the mastery will respond
1915 * YES to mastery requests, but this node
1916 * had no way of knowing. let it pass. */
Kurt Hackelaa852352006-04-27 19:04:49 -07001917 mlog(0, "%u is the lowest node, "
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001918 "%u is asserting. (%.*s) %u must "
1919 "have begun after %u won.\n", bit,
1920 assert->node_idx, namelen, name, bit,
1921 assert->node_idx);
1922 }
1923 }
Kurt Hackel2d1a8682006-04-27 19:01:35 -07001924 if (mle->type == DLM_MLE_MIGRATION) {
1925 if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1926 mlog(0, "%s:%.*s: got cleanup assert"
1927 " from %u for migration\n",
1928 dlm->name, namelen, name,
1929 assert->node_idx);
1930 } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1931 mlog(0, "%s:%.*s: got unrelated assert"
1932 " from %u for migration, ignoring\n",
1933 dlm->name, namelen, name,
1934 assert->node_idx);
1935 __dlm_put_mle(mle);
1936 spin_unlock(&dlm->master_lock);
1937 spin_unlock(&dlm->spinlock);
1938 goto done;
1939 }
1940 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001941 }
1942 spin_unlock(&dlm->master_lock);
1943
1944 /* ok everything checks out with the MLE
1945 * now check to see if there is a lockres */
Mark Fasheha3d33292006-03-09 17:55:56 -08001946 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001947 if (res) {
1948 spin_lock(&res->spinlock);
1949 if (res->state & DLM_LOCK_RES_RECOVERING) {
1950 mlog(ML_ERROR, "%u asserting but %.*s is "
1951 "RECOVERING!\n", assert->node_idx, namelen, name);
1952 goto kill;
1953 }
1954 if (!mle) {
Kurt Hackeldc2ed192006-04-27 19:03:18 -07001955 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1956 res->owner != assert->node_idx) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001957 mlog(ML_ERROR, "assert_master from "
1958 "%u, but current owner is "
1959 "%u! (%.*s)\n",
1960 assert->node_idx, res->owner,
1961 namelen, name);
1962 goto kill;
1963 }
1964 } else if (mle->type != DLM_MLE_MIGRATION) {
1965 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1966 /* owner is just re-asserting */
1967 if (res->owner == assert->node_idx) {
1968 mlog(0, "owner %u re-asserting on "
1969 "lock %.*s\n", assert->node_idx,
1970 namelen, name);
1971 goto ok;
1972 }
1973 mlog(ML_ERROR, "got assert_master from "
1974 "node %u, but %u is the owner! "
1975 "(%.*s)\n", assert->node_idx,
1976 res->owner, namelen, name);
1977 goto kill;
1978 }
1979 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1980 mlog(ML_ERROR, "got assert from %u, but lock "
1981 "with no owner should be "
1982 "in-progress! (%.*s)\n",
1983 assert->node_idx,
1984 namelen, name);
1985 goto kill;
1986 }
1987 } else /* mle->type == DLM_MLE_MIGRATION */ {
1988 /* should only be getting an assert from new master */
1989 if (assert->node_idx != mle->new_master) {
1990 mlog(ML_ERROR, "got assert from %u, but "
1991 "new master is %u, and old master "
1992 "was %u (%.*s)\n",
1993 assert->node_idx, mle->new_master,
1994 mle->master, namelen, name);
1995 goto kill;
1996 }
1997
1998 }
1999ok:
2000 spin_unlock(&res->spinlock);
2001 }
2002 spin_unlock(&dlm->spinlock);
2003
2004 // mlog(0, "woo! got an assert_master from node %u!\n",
2005 // assert->node_idx);
2006 if (mle) {
Kurt Hackel9c6510a2006-03-02 18:09:26 -08002007 int extra_ref = 0;
2008 int nn = -1;
Kurt Hackela2bf0472006-04-27 18:51:26 -07002009 int rr, err = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002010
2011 spin_lock(&mle->spinlock);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08002012 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
2013 extra_ref = 1;
2014 else {
2015 /* MASTER mle: if any bits set in the response map
2016 * then the calling node needs to re-assert to clear
2017 * up nodes that this node contacted */
2018 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
2019 nn+1)) < O2NM_MAX_NODES) {
2020 if (nn != dlm->node_num && nn != assert->node_idx)
2021 master_request = 1;
2022 }
2023 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002024 mle->master = assert->node_idx;
2025 atomic_set(&mle->woken, 1);
2026 wake_up(&mle->wq);
2027 spin_unlock(&mle->spinlock);
2028
Kurt Hackela2bf0472006-04-27 18:51:26 -07002029 if (res) {
Kurt Hackela6fa3642007-01-17 14:59:12 -08002030 int wake = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002031 spin_lock(&res->spinlock);
Kurt Hackela2bf0472006-04-27 18:51:26 -07002032 if (mle->type == DLM_MLE_MIGRATION) {
2033 mlog(0, "finishing off migration of lockres %.*s, "
2034 "from %u to %u\n",
2035 res->lockname.len, res->lockname.name,
2036 dlm->node_num, mle->new_master);
2037 res->state &= ~DLM_LOCK_RES_MIGRATING;
Kurt Hackela6fa3642007-01-17 14:59:12 -08002038 wake = 1;
Kurt Hackela2bf0472006-04-27 18:51:26 -07002039 dlm_change_lockres_owner(dlm, res, mle->new_master);
2040 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
2041 } else {
2042 dlm_change_lockres_owner(dlm, res, mle->master);
2043 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002044 spin_unlock(&res->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -08002045 have_lockres_ref = 1;
Kurt Hackela6fa3642007-01-17 14:59:12 -08002046 if (wake)
2047 wake_up(&res->wq);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002048 }
Kurt Hackela2bf0472006-04-27 18:51:26 -07002049
2050 /* master is known, detach if not already detached.
2051 * ensures that only one assert_master call will happen
2052 * on this mle. */
2053 spin_lock(&dlm->spinlock);
2054 spin_lock(&dlm->master_lock);
2055
2056 rr = atomic_read(&mle->mle_refs.refcount);
2057 if (mle->inuse > 0) {
2058 if (extra_ref && rr < 3)
2059 err = 1;
2060 else if (!extra_ref && rr < 2)
2061 err = 1;
2062 } else {
2063 if (extra_ref && rr < 2)
2064 err = 1;
2065 else if (!extra_ref && rr < 1)
2066 err = 1;
2067 }
2068 if (err) {
2069 mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
2070 "that will mess up this node, refs=%d, extra=%d, "
2071 "inuse=%d\n", dlm->name, namelen, name,
2072 assert->node_idx, rr, extra_ref, mle->inuse);
2073 dlm_print_one_mle(mle);
2074 }
2075 list_del_init(&mle->list);
2076 __dlm_mle_detach_hb_events(dlm, mle);
2077 __dlm_put_mle(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002078 if (extra_ref) {
2079 /* the assert master message now balances the extra
2080 * ref given by the master / migration request message.
2081 * if this is the last put, it will be removed
2082 * from the list. */
Kurt Hackela2bf0472006-04-27 18:51:26 -07002083 __dlm_put_mle(mle);
2084 }
2085 spin_unlock(&dlm->master_lock);
2086 spin_unlock(&dlm->spinlock);
2087 } else if (res) {
2088 if (res->owner != assert->node_idx) {
2089 mlog(0, "assert_master from %u, but current "
2090 "owner is %u (%.*s), no mle\n", assert->node_idx,
2091 res->owner, namelen, name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002092 }
2093 }
2094
2095done:
Kurt Hackel9c6510a2006-03-02 18:09:26 -08002096 ret = 0;
Kurt Hackel3b8118c2007-01-17 17:05:53 -08002097 if (res) {
2098 spin_lock(&res->spinlock);
2099 res->state |= DLM_LOCK_RES_SETREF_INPROG;
2100 spin_unlock(&res->spinlock);
2101 *ret_data = (void *)res;
2102 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002103 dlm_put(dlm);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08002104 if (master_request) {
2105 mlog(0, "need to tell master to reassert\n");
Kurt Hackelba2bf212006-12-01 14:47:20 -08002106 /* positive. negative would shoot down the node. */
2107 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2108 if (!have_lockres_ref) {
2109 mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2110 "mle present here for %s:%.*s, but no lockres!\n",
2111 assert->node_idx, dlm->name, namelen, name);
2112 }
2113 }
2114 if (have_lockres_ref) {
2115 /* let the master know we have a reference to the lockres */
2116 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2117 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2118 dlm->name, namelen, name, assert->node_idx);
Kurt Hackel9c6510a2006-03-02 18:09:26 -08002119 }
2120 return ret;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002121
2122kill:
2123 /* kill the caller! */
Kurt Hackela9ee4c82006-04-27 19:26:15 -07002124 mlog(ML_ERROR, "Bad message received from another node. Dumping state "
2125 "and killing the other node now! This node is OK and can continue.\n");
2126 __dlm_print_one_lock_resource(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002127 spin_unlock(&res->spinlock);
2128 spin_unlock(&dlm->spinlock);
Kurt Hackel3b8118c2007-01-17 17:05:53 -08002129 *ret_data = (void *)res;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002130 dlm_put(dlm);
2131 return -EINVAL;
2132}
2133
Kurt Hackel3b8118c2007-01-17 17:05:53 -08002134void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2135{
2136 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2137
2138 if (ret_data) {
2139 spin_lock(&res->spinlock);
2140 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2141 spin_unlock(&res->spinlock);
2142 wake_up(&res->wq);
2143 dlm_lockres_put(res);
2144 }
2145 return;
2146}
2147
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002148int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2149 struct dlm_lock_resource *res,
2150 int ignore_higher, u8 request_from, u32 flags)
2151{
2152 struct dlm_work_item *item;
Robert P. J. Daycd861282006-12-13 00:34:52 -08002153 item = kzalloc(sizeof(*item), GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002154 if (!item)
2155 return -ENOMEM;
2156
2157
2158 /* queue up work for dlm_assert_master_worker */
2159 dlm_grab(dlm); /* get an extra ref for the work item */
2160 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2161 item->u.am.lockres = res; /* already have a ref */
2162 /* can optionally ignore node numbers higher than this node */
2163 item->u.am.ignore_higher = ignore_higher;
2164 item->u.am.request_from = request_from;
2165 item->u.am.flags = flags;
2166
Kurt Hackel9c6510a2006-03-02 18:09:26 -08002167 if (ignore_higher)
2168 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2169 res->lockname.name);
2170
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002171 spin_lock(&dlm->work_lock);
2172 list_add_tail(&item->list, &dlm->work_list);
2173 spin_unlock(&dlm->work_lock);
2174
Kurt Hackel3156d262006-05-01 14:39:29 -07002175 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002176 return 0;
2177}
2178
2179static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2180{
2181 struct dlm_ctxt *dlm = data;
2182 int ret = 0;
2183 struct dlm_lock_resource *res;
2184 unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2185 int ignore_higher;
2186 int bit;
2187 u8 request_from;
2188 u32 flags;
2189
2190 dlm = item->dlm;
2191 res = item->u.am.lockres;
2192 ignore_higher = item->u.am.ignore_higher;
2193 request_from = item->u.am.request_from;
2194 flags = item->u.am.flags;
2195
2196 spin_lock(&dlm->spinlock);
2197 memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2198 spin_unlock(&dlm->spinlock);
2199
2200 clear_bit(dlm->node_num, nodemap);
2201 if (ignore_higher) {
2202 /* if is this just to clear up mles for nodes below
2203 * this node, do not send the message to the original
2204 * caller or any node number higher than this */
2205 clear_bit(request_from, nodemap);
2206 bit = dlm->node_num;
2207 while (1) {
2208 bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2209 bit+1);
2210 if (bit >= O2NM_MAX_NODES)
2211 break;
2212 clear_bit(bit, nodemap);
2213 }
2214 }
2215
Kurt Hackel36407482006-05-01 13:32:27 -07002216 /*
2217 * If we're migrating this lock to someone else, we are no
2218 * longer allowed to assert out own mastery. OTOH, we need to
2219 * prevent migration from starting while we're still asserting
2220 * our dominance. The reserved ast delays migration.
2221 */
2222 spin_lock(&res->spinlock);
2223 if (res->state & DLM_LOCK_RES_MIGRATING) {
2224 mlog(0, "Someone asked us to assert mastery, but we're "
2225 "in the middle of migration. Skipping assert, "
2226 "the new master will handle that.\n");
2227 spin_unlock(&res->spinlock);
2228 goto put;
2229 } else
2230 __dlm_lockres_reserve_ast(res);
2231 spin_unlock(&res->spinlock);
2232
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002233 /* this call now finishes out the nodemap
2234 * even if one or more nodes die */
2235 mlog(0, "worker about to master %.*s here, this=%u\n",
2236 res->lockname.len, res->lockname.name, dlm->node_num);
Kurt Hackelba2bf212006-12-01 14:47:20 -08002237 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002238 if (ret < 0) {
2239 /* no need to restart, we are done */
Kurt Hackel3b3b84a2006-05-01 14:31:37 -07002240 if (!dlm_is_host_down(ret))
2241 mlog_errno(ret);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002242 }
2243
Kurt Hackel36407482006-05-01 13:32:27 -07002244 /* Ok, we've asserted ourselves. Let's let migration start. */
2245 dlm_lockres_release_ast(dlm, res);
2246
2247put:
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002248 dlm_lockres_put(res);
2249
2250 mlog(0, "finished with dlm_assert_master_worker\n");
2251}
2252
Kurt Hackelc03872f2006-03-06 14:08:49 -08002253/* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2254 * We cannot wait for node recovery to complete to begin mastering this
2255 * lockres because this lockres is used to kick off recovery! ;-)
2256 * So, do a pre-check on all living nodes to see if any of those nodes
2257 * think that $RECOVERY is currently mastered by a dead node. If so,
2258 * we wait a short time to allow that node to get notified by its own
2259 * heartbeat stack, then check again. All $RECOVERY lock resources
2260 * mastered by dead nodes are purged when the hearbeat callback is
2261 * fired, so we can know for sure that it is safe to continue once
2262 * the node returns a live node or no node. */
2263static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2264 struct dlm_lock_resource *res)
2265{
2266 struct dlm_node_iter iter;
2267 int nodenum;
2268 int ret = 0;
2269 u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2270
2271 spin_lock(&dlm->spinlock);
2272 dlm_node_iter_init(dlm->domain_map, &iter);
2273 spin_unlock(&dlm->spinlock);
2274
2275 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2276 /* do not send to self */
2277 if (nodenum == dlm->node_num)
2278 continue;
2279 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2280 if (ret < 0) {
2281 mlog_errno(ret);
2282 if (!dlm_is_host_down(ret))
2283 BUG();
2284 /* host is down, so answer for that node would be
2285 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
Kurt Hackelf42a1002006-05-01 11:53:33 -07002286 ret = 0;
Kurt Hackelc03872f2006-03-06 14:08:49 -08002287 }
2288
2289 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2290 /* check to see if this master is in the recovery map */
2291 spin_lock(&dlm->spinlock);
2292 if (test_bit(master, dlm->recovery_map)) {
2293 mlog(ML_NOTICE, "%s: node %u has not seen "
2294 "node %u go down yet, and thinks the "
2295 "dead node is mastering the recovery "
2296 "lock. must wait.\n", dlm->name,
2297 nodenum, master);
2298 ret = -EAGAIN;
2299 }
2300 spin_unlock(&dlm->spinlock);
2301 mlog(0, "%s: reco lock master is %u\n", dlm->name,
2302 master);
2303 break;
2304 }
2305 }
2306 return ret;
2307}
2308
Kurt Hackelba2bf212006-12-01 14:47:20 -08002309/*
2310 * DLM_DEREF_LOCKRES_MSG
2311 */
2312
2313int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2314{
2315 struct dlm_deref_lockres deref;
2316 int ret = 0, r;
2317 const char *lockname;
2318 unsigned int namelen;
2319
2320 lockname = res->lockname.name;
2321 namelen = res->lockname.len;
2322 BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2323
2324 mlog(0, "%s:%.*s: sending deref to %d\n",
2325 dlm->name, namelen, lockname, res->owner);
2326 memset(&deref, 0, sizeof(deref));
2327 deref.node_idx = dlm->node_num;
2328 deref.namelen = namelen;
2329 memcpy(deref.name, lockname, namelen);
2330
2331 ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2332 &deref, sizeof(deref), res->owner, &r);
2333 if (ret < 0)
2334 mlog_errno(ret);
2335 else if (r < 0) {
2336 /* BAD. other node says I did not have a ref. */
2337 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2338 "(master=%u) got %d.\n", dlm->name, namelen,
2339 lockname, res->owner, r);
2340 dlm_print_one_lock_resource(res);
2341 BUG();
2342 }
2343 return ret;
2344}
2345
Kurt Hackeld74c9802007-01-17 17:04:25 -08002346int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2347 void **ret_data)
Kurt Hackelba2bf212006-12-01 14:47:20 -08002348{
2349 struct dlm_ctxt *dlm = data;
2350 struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2351 struct dlm_lock_resource *res = NULL;
2352 char *name;
2353 unsigned int namelen;
2354 int ret = -EINVAL;
2355 u8 node;
2356 unsigned int hash;
Sunil Mushranf3f85462007-01-29 15:19:16 -08002357 struct dlm_work_item *item;
2358 int cleared = 0;
2359 int dispatch = 0;
Kurt Hackelba2bf212006-12-01 14:47:20 -08002360
2361 if (!dlm_grab(dlm))
2362 return 0;
2363
2364 name = deref->name;
2365 namelen = deref->namelen;
2366 node = deref->node_idx;
2367
2368 if (namelen > DLM_LOCKID_NAME_MAX) {
2369 mlog(ML_ERROR, "Invalid name length!");
2370 goto done;
2371 }
2372 if (deref->node_idx >= O2NM_MAX_NODES) {
2373 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2374 goto done;
2375 }
2376
2377 hash = dlm_lockid_hash(name, namelen);
2378
2379 spin_lock(&dlm->spinlock);
2380 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2381 if (!res) {
2382 spin_unlock(&dlm->spinlock);
2383 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2384 dlm->name, namelen, name);
2385 goto done;
2386 }
2387 spin_unlock(&dlm->spinlock);
2388
2389 spin_lock(&res->spinlock);
Sunil Mushranf3f85462007-01-29 15:19:16 -08002390 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2391 dispatch = 1;
2392 else {
2393 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2394 if (test_bit(node, res->refmap)) {
2395 dlm_lockres_clear_refmap_bit(node, res);
2396 cleared = 1;
2397 }
Kurt Hackelba2bf212006-12-01 14:47:20 -08002398 }
2399 spin_unlock(&res->spinlock);
2400
Sunil Mushranf3f85462007-01-29 15:19:16 -08002401 if (!dispatch) {
2402 if (cleared)
2403 dlm_lockres_calc_usage(dlm, res);
2404 else {
2405 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2406 "but it is already dropped!\n", dlm->name,
2407 res->lockname.len, res->lockname.name, node);
Tao Ma2af37ce2008-02-28 10:41:55 +08002408 dlm_print_one_lock_resource(res);
Sunil Mushranf3f85462007-01-29 15:19:16 -08002409 }
2410 ret = 0;
2411 goto done;
2412 }
2413
2414 item = kzalloc(sizeof(*item), GFP_NOFS);
2415 if (!item) {
2416 ret = -ENOMEM;
2417 mlog_errno(ret);
2418 goto done;
2419 }
2420
2421 dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2422 item->u.dl.deref_res = res;
2423 item->u.dl.deref_node = node;
2424
2425 spin_lock(&dlm->work_lock);
2426 list_add_tail(&item->list, &dlm->work_list);
2427 spin_unlock(&dlm->work_lock);
2428
2429 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2430 return 0;
2431
Kurt Hackelba2bf212006-12-01 14:47:20 -08002432done:
2433 if (res)
2434 dlm_lockres_put(res);
2435 dlm_put(dlm);
Sunil Mushranf3f85462007-01-29 15:19:16 -08002436
Kurt Hackelba2bf212006-12-01 14:47:20 -08002437 return ret;
2438}
2439
Sunil Mushranf3f85462007-01-29 15:19:16 -08002440static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2441{
2442 struct dlm_ctxt *dlm;
2443 struct dlm_lock_resource *res;
2444 u8 node;
2445 u8 cleared = 0;
2446
2447 dlm = item->dlm;
2448 res = item->u.dl.deref_res;
2449 node = item->u.dl.deref_node;
2450
2451 spin_lock(&res->spinlock);
2452 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2453 if (test_bit(node, res->refmap)) {
2454 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2455 dlm_lockres_clear_refmap_bit(node, res);
2456 cleared = 1;
2457 }
2458 spin_unlock(&res->spinlock);
2459
2460 if (cleared) {
2461 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2462 dlm->name, res->lockname.len, res->lockname.name, node);
2463 dlm_lockres_calc_usage(dlm, res);
2464 } else {
2465 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2466 "but it is already dropped!\n", dlm->name,
2467 res->lockname.len, res->lockname.name, node);
Tao Ma2af37ce2008-02-28 10:41:55 +08002468 dlm_print_one_lock_resource(res);
Sunil Mushranf3f85462007-01-29 15:19:16 -08002469 }
2470
2471 dlm_lockres_put(res);
2472}
2473
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002474/* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2475 * if not. If 0, numlocks is set to the number of locks in the lockres.
2476 */
2477static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2478 struct dlm_lock_resource *res,
2479 int *numlocks)
2480{
2481 int ret;
2482 int i;
2483 int count = 0;
Christoph Hellwig800deef2007-05-17 16:03:13 +02002484 struct list_head *queue;
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002485 struct dlm_lock *lock;
2486
2487 assert_spin_locked(&res->spinlock);
2488
2489 ret = -EINVAL;
2490 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2491 mlog(0, "cannot migrate lockres with unknown owner!\n");
2492 goto leave;
2493 }
2494
2495 if (res->owner != dlm->node_num) {
2496 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2497 goto leave;
2498 }
2499
2500 ret = 0;
2501 queue = &res->granted;
2502 for (i = 0; i < 3; i++) {
Christoph Hellwig800deef2007-05-17 16:03:13 +02002503 list_for_each_entry(lock, queue, list) {
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002504 ++count;
2505 if (lock->ml.node == dlm->node_num) {
2506 mlog(0, "found a lock owned by this node still "
2507 "on the %s queue! will not migrate this "
2508 "lockres\n", (i == 0 ? "granted" :
2509 (i == 1 ? "converting" :
2510 "blocked")));
2511 ret = -ENOTEMPTY;
2512 goto leave;
2513 }
2514 }
2515 queue++;
2516 }
2517
2518 *numlocks = count;
2519 mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2520
2521leave:
2522 return ret;
2523}
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002524
2525/*
2526 * DLM_MIGRATE_LOCKRES
2527 */
2528
2529
Adrian Bunkfaf0ec92006-12-14 00:17:32 +01002530static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2531 struct dlm_lock_resource *res,
2532 u8 target)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002533{
2534 struct dlm_master_list_entry *mle = NULL;
2535 struct dlm_master_list_entry *oldmle = NULL;
2536 struct dlm_migratable_lockres *mres = NULL;
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002537 int ret = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002538 const char *name;
2539 unsigned int namelen;
2540 int mle_added = 0;
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002541 int numlocks;
2542 int wake = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002543
2544 if (!dlm_grab(dlm))
2545 return -EINVAL;
2546
2547 name = res->lockname.name;
2548 namelen = res->lockname.len;
2549
2550 mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2551
2552 /*
2553 * ensure this lockres is a proper candidate for migration
2554 */
2555 spin_lock(&res->spinlock);
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002556 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2557 if (ret < 0) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002558 spin_unlock(&res->spinlock);
2559 goto leave;
2560 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002561 spin_unlock(&res->spinlock);
2562
2563 /* no work to do */
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002564 if (numlocks == 0) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002565 mlog(0, "no locks were found on this lockres! done!\n");
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002566 goto leave;
2567 }
2568
2569 /*
2570 * preallocate up front
2571 * if this fails, abort
2572 */
2573
2574 ret = -ENOMEM;
Kurt Hackelad8100e2006-05-01 14:25:21 -07002575 mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002576 if (!mres) {
2577 mlog_errno(ret);
2578 goto leave;
2579 }
2580
2581 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
Kurt Hackelad8100e2006-05-01 14:25:21 -07002582 GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002583 if (!mle) {
2584 mlog_errno(ret);
2585 goto leave;
2586 }
2587 ret = 0;
2588
2589 /*
2590 * find a node to migrate the lockres to
2591 */
2592
2593 mlog(0, "picking a migration node\n");
2594 spin_lock(&dlm->spinlock);
2595 /* pick a new node */
2596 if (!test_bit(target, dlm->domain_map) ||
2597 target >= O2NM_MAX_NODES) {
2598 target = dlm_pick_migration_target(dlm, res);
2599 }
2600 mlog(0, "node %u chosen for migration\n", target);
2601
2602 if (target >= O2NM_MAX_NODES ||
2603 !test_bit(target, dlm->domain_map)) {
2604 /* target chosen is not alive */
2605 ret = -EINVAL;
2606 }
2607
2608 if (ret) {
2609 spin_unlock(&dlm->spinlock);
2610 goto fail;
2611 }
2612
2613 mlog(0, "continuing with target = %u\n", target);
2614
2615 /*
2616 * clear any existing master requests and
2617 * add the migration mle to the list
2618 */
2619 spin_lock(&dlm->master_lock);
2620 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2621 namelen, target, dlm->node_num);
2622 spin_unlock(&dlm->master_lock);
2623 spin_unlock(&dlm->spinlock);
2624
2625 if (ret == -EEXIST) {
2626 mlog(0, "another process is already migrating it\n");
2627 goto fail;
2628 }
2629 mle_added = 1;
2630
2631 /*
2632 * set the MIGRATING flag and flush asts
2633 * if we fail after this we need to re-dirty the lockres
2634 */
2635 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2636 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2637 "the target went down.\n", res->lockname.len,
2638 res->lockname.name, target);
2639 spin_lock(&res->spinlock);
2640 res->state &= ~DLM_LOCK_RES_MIGRATING;
Kurt Hackela6fa3642007-01-17 14:59:12 -08002641 wake = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002642 spin_unlock(&res->spinlock);
2643 ret = -EINVAL;
2644 }
2645
2646fail:
2647 if (oldmle) {
2648 /* master is known, detach if not already detached */
2649 dlm_mle_detach_hb_events(dlm, oldmle);
2650 dlm_put_mle(oldmle);
2651 }
2652
2653 if (ret < 0) {
2654 if (mle_added) {
2655 dlm_mle_detach_hb_events(dlm, mle);
2656 dlm_put_mle(mle);
2657 } else if (mle) {
2658 kmem_cache_free(dlm_mle_cache, mle);
2659 }
2660 goto leave;
2661 }
2662
2663 /*
2664 * at this point, we have a migration target, an mle
2665 * in the master list, and the MIGRATING flag set on
2666 * the lockres
2667 */
2668
Kurt Hackel1cd04db2007-01-17 14:53:37 -08002669 /* now that remote nodes are spinning on the MIGRATING flag,
2670 * ensure that all assert_master work is flushed. */
2671 flush_workqueue(dlm->dlm_worker);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002672
2673 /* get an extra reference on the mle.
2674 * otherwise the assert_master from the new
2675 * master will destroy this.
2676 * also, make sure that all callers of dlm_get_mle
2677 * take both dlm->spinlock and dlm->master_lock */
2678 spin_lock(&dlm->spinlock);
2679 spin_lock(&dlm->master_lock);
Kurt Hackela2bf0472006-04-27 18:51:26 -07002680 dlm_get_mle_inuse(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002681 spin_unlock(&dlm->master_lock);
2682 spin_unlock(&dlm->spinlock);
2683
2684 /* notify new node and send all lock state */
2685 /* call send_one_lockres with migration flag.
2686 * this serves as notice to the target node that a
2687 * migration is starting. */
2688 ret = dlm_send_one_lockres(dlm, res, mres, target,
2689 DLM_MRES_MIGRATION);
2690
2691 if (ret < 0) {
2692 mlog(0, "migration to node %u failed with %d\n",
2693 target, ret);
2694 /* migration failed, detach and clean up mle */
2695 dlm_mle_detach_hb_events(dlm, mle);
2696 dlm_put_mle(mle);
Kurt Hackela2bf0472006-04-27 18:51:26 -07002697 dlm_put_mle_inuse(mle);
2698 spin_lock(&res->spinlock);
2699 res->state &= ~DLM_LOCK_RES_MIGRATING;
Kurt Hackela6fa3642007-01-17 14:59:12 -08002700 wake = 1;
Kurt Hackela2bf0472006-04-27 18:51:26 -07002701 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002702 goto leave;
2703 }
2704
2705 /* at this point, the target sends a message to all nodes,
2706 * (using dlm_do_migrate_request). this node is skipped since
2707 * we had to put an mle in the list to begin the process. this
2708 * node now waits for target to do an assert master. this node
2709 * will be the last one notified, ensuring that the migration
2710 * is complete everywhere. if the target dies while this is
2711 * going on, some nodes could potentially see the target as the
2712 * master, so it is important that my recovery finds the migration
2713 * mle and sets the master to UNKNONWN. */
2714
2715
2716 /* wait for new node to assert master */
2717 while (1) {
2718 ret = wait_event_interruptible_timeout(mle->wq,
2719 (atomic_read(&mle->woken) == 1),
2720 msecs_to_jiffies(5000));
2721
2722 if (ret >= 0) {
2723 if (atomic_read(&mle->woken) == 1 ||
2724 res->owner == target)
2725 break;
2726
Kurt Hackel1cd04db2007-01-17 14:53:37 -08002727 mlog(0, "%s:%.*s: timed out during migration\n",
2728 dlm->name, res->lockname.len, res->lockname.name);
Kurt Hackele2faea42006-01-12 14:24:55 -08002729 /* avoid hang during shutdown when migrating lockres
2730 * to a node which also goes down */
2731 if (dlm_is_node_dead(dlm, target)) {
Kurt Hackelaa852352006-04-27 19:04:49 -07002732 mlog(0, "%s:%.*s: expected migration "
2733 "target %u is no longer up, restarting\n",
Kurt Hackele2faea42006-01-12 14:24:55 -08002734 dlm->name, res->lockname.len,
2735 res->lockname.name, target);
Kurt Hackel1cd04db2007-01-17 14:53:37 -08002736 ret = -EINVAL;
2737 /* migration failed, detach and clean up mle */
2738 dlm_mle_detach_hb_events(dlm, mle);
2739 dlm_put_mle(mle);
2740 dlm_put_mle_inuse(mle);
2741 spin_lock(&res->spinlock);
2742 res->state &= ~DLM_LOCK_RES_MIGRATING;
Kurt Hackela6fa3642007-01-17 14:59:12 -08002743 wake = 1;
Kurt Hackel1cd04db2007-01-17 14:53:37 -08002744 spin_unlock(&res->spinlock);
2745 goto leave;
Kurt Hackele2faea42006-01-12 14:24:55 -08002746 }
Kurt Hackel1cd04db2007-01-17 14:53:37 -08002747 } else
2748 mlog(0, "%s:%.*s: caught signal during migration\n",
2749 dlm->name, res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002750 }
2751
2752 /* all done, set the owner, clear the flag */
2753 spin_lock(&res->spinlock);
2754 dlm_set_lockres_owner(dlm, res, target);
2755 res->state &= ~DLM_LOCK_RES_MIGRATING;
2756 dlm_remove_nonlocal_locks(dlm, res);
2757 spin_unlock(&res->spinlock);
2758 wake_up(&res->wq);
2759
2760 /* master is known, detach if not already detached */
2761 dlm_mle_detach_hb_events(dlm, mle);
Kurt Hackela2bf0472006-04-27 18:51:26 -07002762 dlm_put_mle_inuse(mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002763 ret = 0;
2764
2765 dlm_lockres_calc_usage(dlm, res);
2766
2767leave:
2768 /* re-dirty the lockres if we failed */
2769 if (ret < 0)
2770 dlm_kick_thread(dlm, res);
2771
Kurt Hackela6fa3642007-01-17 14:59:12 -08002772 /* wake up waiters if the MIGRATING flag got set
2773 * but migration failed */
2774 if (wake)
2775 wake_up(&res->wq);
2776
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002777 /* TODO: cleanup */
2778 if (mres)
2779 free_page((unsigned long)mres);
2780
2781 dlm_put(dlm);
2782
2783 mlog(0, "returning %d\n", ret);
2784 return ret;
2785}
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002786
Kurt Hackelba2bf212006-12-01 14:47:20 -08002787#define DLM_MIGRATION_RETRY_MS 100
2788
2789/* Should be called only after beginning the domain leave process.
2790 * There should not be any remaining locks on nonlocal lock resources,
2791 * and there should be no local locks left on locally mastered resources.
2792 *
2793 * Called with the dlm spinlock held, may drop it to do migration, but
2794 * will re-acquire before exit.
2795 *
2796 * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2797int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2798{
2799 int ret;
2800 int lock_dropped = 0;
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002801 int numlocks;
Kurt Hackelba2bf212006-12-01 14:47:20 -08002802
Sunil Mushranb36c3f82007-03-12 13:25:44 -07002803 spin_lock(&res->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -08002804 if (res->owner != dlm->node_num) {
2805 if (!__dlm_lockres_unused(res)) {
2806 mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2807 "trying to free this but locks remain\n",
2808 dlm->name, res->lockname.len, res->lockname.name);
2809 }
Sunil Mushranb36c3f82007-03-12 13:25:44 -07002810 spin_unlock(&res->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -08002811 goto leave;
2812 }
Sunil Mushran2f5bf1f2007-03-22 17:08:32 -07002813
2814 /* No need to migrate a lockres having no locks */
2815 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2816 if (ret >= 0 && numlocks == 0) {
2817 spin_unlock(&res->spinlock);
2818 goto leave;
2819 }
Sunil Mushranb36c3f82007-03-12 13:25:44 -07002820 spin_unlock(&res->spinlock);
Kurt Hackelba2bf212006-12-01 14:47:20 -08002821
2822 /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2823 spin_unlock(&dlm->spinlock);
2824 lock_dropped = 1;
2825 while (1) {
2826 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2827 if (ret >= 0)
2828 break;
2829 if (ret == -ENOTEMPTY) {
2830 mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2831 res->lockname.len, res->lockname.name);
2832 BUG();
2833 }
2834
2835 mlog(0, "lockres %.*s: migrate failed, "
2836 "retrying\n", res->lockname.len,
2837 res->lockname.name);
2838 msleep(DLM_MIGRATION_RETRY_MS);
2839 }
2840 spin_lock(&dlm->spinlock);
2841leave:
2842 return lock_dropped;
2843}
2844
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002845int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2846{
2847 int ret;
2848 spin_lock(&dlm->ast_lock);
2849 spin_lock(&lock->spinlock);
2850 ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2851 spin_unlock(&lock->spinlock);
2852 spin_unlock(&dlm->ast_lock);
2853 return ret;
2854}
2855
2856static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2857 struct dlm_lock_resource *res,
2858 u8 mig_target)
2859{
2860 int can_proceed;
2861 spin_lock(&res->spinlock);
2862 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2863 spin_unlock(&res->spinlock);
2864
2865 /* target has died, so make the caller break out of the
2866 * wait_event, but caller must recheck the domain_map */
2867 spin_lock(&dlm->spinlock);
2868 if (!test_bit(mig_target, dlm->domain_map))
2869 can_proceed = 1;
2870 spin_unlock(&dlm->spinlock);
2871 return can_proceed;
2872}
2873
Adrian Bunkfaf0ec92006-12-14 00:17:32 +01002874static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2875 struct dlm_lock_resource *res)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002876{
2877 int ret;
2878 spin_lock(&res->spinlock);
2879 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2880 spin_unlock(&res->spinlock);
2881 return ret;
2882}
2883
2884
2885static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2886 struct dlm_lock_resource *res,
2887 u8 target)
2888{
2889 int ret = 0;
2890
2891 mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2892 res->lockname.len, res->lockname.name, dlm->node_num,
2893 target);
2894 /* need to set MIGRATING flag on lockres. this is done by
2895 * ensuring that all asts have been flushed for this lockres. */
2896 spin_lock(&res->spinlock);
2897 BUG_ON(res->migration_pending);
2898 res->migration_pending = 1;
2899 /* strategy is to reserve an extra ast then release
2900 * it below, letting the release do all of the work */
2901 __dlm_lockres_reserve_ast(res);
2902 spin_unlock(&res->spinlock);
2903
Kurt Hackelddc09c82007-01-05 15:00:17 -08002904 /* now flush all the pending asts */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002905 dlm_kick_thread(dlm, res);
Kurt Hackelddc09c82007-01-05 15:00:17 -08002906 /* before waiting on DIRTY, block processes which may
2907 * try to dirty the lockres before MIGRATING is set */
2908 spin_lock(&res->spinlock);
2909 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2910 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2911 spin_unlock(&res->spinlock);
2912 /* now wait on any pending asts and the DIRTY state */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002913 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2914 dlm_lockres_release_ast(dlm, res);
2915
2916 mlog(0, "about to wait on migration_wq, dirty=%s\n",
2917 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2918 /* if the extra ref we just put was the final one, this
2919 * will pass thru immediately. otherwise, we need to wait
2920 * for the last ast to finish. */
2921again:
2922 ret = wait_event_interruptible_timeout(dlm->migration_wq,
2923 dlm_migration_can_proceed(dlm, res, target),
2924 msecs_to_jiffies(1000));
2925 if (ret < 0) {
2926 mlog(0, "woken again: migrating? %s, dead? %s\n",
2927 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2928 test_bit(target, dlm->domain_map) ? "no":"yes");
2929 } else {
2930 mlog(0, "all is well: migrating? %s, dead? %s\n",
2931 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2932 test_bit(target, dlm->domain_map) ? "no":"yes");
2933 }
2934 if (!dlm_migration_can_proceed(dlm, res, target)) {
2935 mlog(0, "trying again...\n");
2936 goto again;
2937 }
Kurt Hackelddc09c82007-01-05 15:00:17 -08002938 /* now that we are sure the MIGRATING state is there, drop
2939 * the unneded state which blocked threads trying to DIRTY */
2940 spin_lock(&res->spinlock);
2941 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2942 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2943 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2944 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002945
2946 /* did the target go down or die? */
2947 spin_lock(&dlm->spinlock);
2948 if (!test_bit(target, dlm->domain_map)) {
2949 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2950 target);
2951 ret = -EHOSTDOWN;
2952 }
2953 spin_unlock(&dlm->spinlock);
2954
2955 /*
2956 * at this point:
2957 *
2958 * o the DLM_LOCK_RES_MIGRATING flag is set
2959 * o there are no pending asts on this lockres
2960 * o all processes trying to reserve an ast on this
2961 * lockres must wait for the MIGRATING flag to clear
2962 */
2963 return ret;
2964}
2965
2966/* last step in the migration process.
2967 * original master calls this to free all of the dlm_lock
2968 * structures that used to be for other nodes. */
2969static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2970 struct dlm_lock_resource *res)
2971{
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002972 struct list_head *queue = &res->granted;
Kurt Hackelba2bf212006-12-01 14:47:20 -08002973 int i, bit;
Christoph Hellwig800deef2007-05-17 16:03:13 +02002974 struct dlm_lock *lock, *next;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002975
2976 assert_spin_locked(&res->spinlock);
2977
2978 BUG_ON(res->owner == dlm->node_num);
2979
2980 for (i=0; i<3; i++) {
Christoph Hellwig800deef2007-05-17 16:03:13 +02002981 list_for_each_entry_safe(lock, next, queue, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002982 if (lock->ml.node != dlm->node_num) {
2983 mlog(0, "putting lock for node %u\n",
2984 lock->ml.node);
2985 /* be extra careful */
2986 BUG_ON(!list_empty(&lock->ast_list));
2987 BUG_ON(!list_empty(&lock->bast_list));
2988 BUG_ON(lock->ast_pending);
2989 BUG_ON(lock->bast_pending);
Kurt Hackelba2bf212006-12-01 14:47:20 -08002990 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002991 list_del_init(&lock->list);
2992 dlm_lock_put(lock);
Sunil Mushran2c5c54a2008-03-01 14:04:20 -08002993 /* In a normal unlock, we would have added a
2994 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2995 dlm_lock_put(lock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08002996 }
2997 }
2998 queue++;
2999 }
Kurt Hackelba2bf212006-12-01 14:47:20 -08003000 bit = 0;
3001 while (1) {
3002 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
3003 if (bit >= O2NM_MAX_NODES)
3004 break;
3005 /* do not clear the local node reference, if there is a
3006 * process holding this, let it drop the ref itself */
3007 if (bit != dlm->node_num) {
3008 mlog(0, "%s:%.*s: node %u had a ref to this "
3009 "migrating lockres, clearing\n", dlm->name,
3010 res->lockname.len, res->lockname.name, bit);
3011 dlm_lockres_clear_refmap_bit(bit, res);
3012 }
3013 bit++;
3014 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003015}
3016
3017/* for now this is not too intelligent. we will
3018 * need stats to make this do the right thing.
3019 * this just finds the first lock on one of the
3020 * queues and uses that node as the target. */
3021static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
3022 struct dlm_lock_resource *res)
3023{
3024 int i;
3025 struct list_head *queue = &res->granted;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003026 struct dlm_lock *lock;
3027 int nodenum;
3028
3029 assert_spin_locked(&dlm->spinlock);
3030
3031 spin_lock(&res->spinlock);
3032 for (i=0; i<3; i++) {
Christoph Hellwig800deef2007-05-17 16:03:13 +02003033 list_for_each_entry(lock, queue, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003034 /* up to the caller to make sure this node
3035 * is alive */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003036 if (lock->ml.node != dlm->node_num) {
3037 spin_unlock(&res->spinlock);
3038 return lock->ml.node;
3039 }
3040 }
3041 queue++;
3042 }
3043 spin_unlock(&res->spinlock);
3044 mlog(0, "have not found a suitable target yet! checking domain map\n");
3045
3046 /* ok now we're getting desperate. pick anyone alive. */
3047 nodenum = -1;
3048 while (1) {
3049 nodenum = find_next_bit(dlm->domain_map,
3050 O2NM_MAX_NODES, nodenum+1);
3051 mlog(0, "found %d in domain map\n", nodenum);
3052 if (nodenum >= O2NM_MAX_NODES)
3053 break;
3054 if (nodenum != dlm->node_num) {
3055 mlog(0, "picking %d\n", nodenum);
3056 return nodenum;
3057 }
3058 }
3059
3060 mlog(0, "giving up. no master to migrate to\n");
3061 return DLM_LOCK_RES_OWNER_UNKNOWN;
3062}
3063
3064
3065
3066/* this is called by the new master once all lockres
3067 * data has been received */
3068static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
3069 struct dlm_lock_resource *res,
3070 u8 master, u8 new_master,
3071 struct dlm_node_iter *iter)
3072{
3073 struct dlm_migrate_request migrate;
3074 int ret, status = 0;
3075 int nodenum;
3076
3077 memset(&migrate, 0, sizeof(migrate));
3078 migrate.namelen = res->lockname.len;
3079 memcpy(migrate.name, res->lockname.name, migrate.namelen);
3080 migrate.new_master = new_master;
3081 migrate.master = master;
3082
3083 ret = 0;
3084
3085 /* send message to all nodes, except the master and myself */
3086 while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3087 if (nodenum == master ||
3088 nodenum == new_master)
3089 continue;
3090
3091 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3092 &migrate, sizeof(migrate), nodenum,
3093 &status);
3094 if (ret < 0)
3095 mlog_errno(ret);
3096 else if (status < 0) {
3097 mlog(0, "migrate request (node %u) returned %d!\n",
3098 nodenum, status);
3099 ret = status;
Kurt Hackelba2bf212006-12-01 14:47:20 -08003100 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3101 /* during the migration request we short-circuited
3102 * the mastery of the lockres. make sure we have
3103 * a mastery ref for nodenum */
3104 mlog(0, "%s:%.*s: need ref for node %u\n",
3105 dlm->name, res->lockname.len, res->lockname.name,
3106 nodenum);
3107 spin_lock(&res->spinlock);
3108 dlm_lockres_set_refmap_bit(nodenum, res);
3109 spin_unlock(&res->spinlock);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003110 }
3111 }
3112
3113 if (ret < 0)
3114 mlog_errno(ret);
3115
3116 mlog(0, "returning ret=%d\n", ret);
3117 return ret;
3118}
3119
3120
3121/* if there is an existing mle for this lockres, we now know who the master is.
3122 * (the one who sent us *this* message) we can clear it up right away.
3123 * since the process that put the mle on the list still has a reference to it,
3124 * we can unhash it now, set the master and wake the process. as a result,
3125 * we will have no mle in the list to start with. now we can add an mle for
3126 * the migration and this should be the only one found for those scanning the
3127 * list. */
Kurt Hackeld74c9802007-01-17 17:04:25 -08003128int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3129 void **ret_data)
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003130{
3131 struct dlm_ctxt *dlm = data;
3132 struct dlm_lock_resource *res = NULL;
3133 struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3134 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3135 const char *name;
Mark Fasheha3d33292006-03-09 17:55:56 -08003136 unsigned int namelen, hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003137 int ret = 0;
3138
3139 if (!dlm_grab(dlm))
3140 return -EINVAL;
3141
3142 name = migrate->name;
3143 namelen = migrate->namelen;
Mark Fasheha3d33292006-03-09 17:55:56 -08003144 hash = dlm_lockid_hash(name, namelen);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003145
3146 /* preallocate.. if this fails, abort */
3147 mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
Kurt Hackelad8100e2006-05-01 14:25:21 -07003148 GFP_NOFS);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003149
3150 if (!mle) {
3151 ret = -ENOMEM;
3152 goto leave;
3153 }
3154
3155 /* check for pre-existing lock */
3156 spin_lock(&dlm->spinlock);
Mark Fasheha3d33292006-03-09 17:55:56 -08003157 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003158 spin_lock(&dlm->master_lock);
3159
3160 if (res) {
3161 spin_lock(&res->spinlock);
3162 if (res->state & DLM_LOCK_RES_RECOVERING) {
3163 /* if all is working ok, this can only mean that we got
3164 * a migrate request from a node that we now see as
3165 * dead. what can we do here? drop it to the floor? */
3166 spin_unlock(&res->spinlock);
3167 mlog(ML_ERROR, "Got a migrate request, but the "
3168 "lockres is marked as recovering!");
3169 kmem_cache_free(dlm_mle_cache, mle);
3170 ret = -EINVAL; /* need a better solution */
3171 goto unlock;
3172 }
3173 res->state |= DLM_LOCK_RES_MIGRATING;
3174 spin_unlock(&res->spinlock);
3175 }
3176
3177 /* ignore status. only nonzero status would BUG. */
3178 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3179 name, namelen,
3180 migrate->new_master,
3181 migrate->master);
3182
3183unlock:
3184 spin_unlock(&dlm->master_lock);
3185 spin_unlock(&dlm->spinlock);
3186
3187 if (oldmle) {
3188 /* master is known, detach if not already detached */
3189 dlm_mle_detach_hb_events(dlm, oldmle);
3190 dlm_put_mle(oldmle);
3191 }
3192
3193 if (res)
3194 dlm_lockres_put(res);
3195leave:
3196 dlm_put(dlm);
3197 return ret;
3198}
3199
3200/* must be holding dlm->spinlock and dlm->master_lock
3201 * when adding a migration mle, we can clear any other mles
3202 * in the master list because we know with certainty that
3203 * the master is "master". so we remove any old mle from
3204 * the list after setting it's master field, and then add
3205 * the new migration mle. this way we can hold with the rule
3206 * of having only one mle for a given lock name at all times. */
3207static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3208 struct dlm_lock_resource *res,
3209 struct dlm_master_list_entry *mle,
3210 struct dlm_master_list_entry **oldmle,
3211 const char *name, unsigned int namelen,
3212 u8 new_master, u8 master)
3213{
3214 int found;
3215 int ret = 0;
3216
3217 *oldmle = NULL;
3218
3219 mlog_entry_void();
3220
3221 assert_spin_locked(&dlm->spinlock);
3222 assert_spin_locked(&dlm->master_lock);
3223
3224 /* caller is responsible for any ref taken here on oldmle */
3225 found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3226 if (found) {
3227 struct dlm_master_list_entry *tmp = *oldmle;
3228 spin_lock(&tmp->spinlock);
3229 if (tmp->type == DLM_MLE_MIGRATION) {
3230 if (master == dlm->node_num) {
3231 /* ah another process raced me to it */
3232 mlog(0, "tried to migrate %.*s, but some "
3233 "process beat me to it\n",
3234 namelen, name);
3235 ret = -EEXIST;
3236 } else {
3237 /* bad. 2 NODES are trying to migrate! */
3238 mlog(ML_ERROR, "migration error mle: "
3239 "master=%u new_master=%u // request: "
3240 "master=%u new_master=%u // "
3241 "lockres=%.*s\n",
3242 tmp->master, tmp->new_master,
3243 master, new_master,
3244 namelen, name);
3245 BUG();
3246 }
3247 } else {
3248 /* this is essentially what assert_master does */
3249 tmp->master = master;
3250 atomic_set(&tmp->woken, 1);
3251 wake_up(&tmp->wq);
3252 /* remove it from the list so that only one
3253 * mle will be found */
3254 list_del_init(&tmp->list);
Kurt Hackelba2bf212006-12-01 14:47:20 -08003255 /* this was obviously WRONG. mle is uninited here. should be tmp. */
3256 __dlm_mle_detach_hb_events(dlm, tmp);
3257 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3258 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3259 "telling master to get ref for cleared out mle "
3260 "during migration\n", dlm->name, namelen, name,
3261 master, new_master);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003262 }
3263 spin_unlock(&tmp->spinlock);
3264 }
3265
3266 /* now add a migration mle to the tail of the list */
3267 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3268 mle->new_master = new_master;
Kurt Hackelba2bf212006-12-01 14:47:20 -08003269 /* the new master will be sending an assert master for this.
3270 * at that point we will get the refmap reference */
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003271 mle->master = master;
3272 /* do this for consistency with other mle types */
3273 set_bit(new_master, mle->maybe_map);
3274 list_add(&mle->list, &dlm->master_list);
3275
3276 return ret;
3277}
3278
3279
3280void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3281{
Christoph Hellwig800deef2007-05-17 16:03:13 +02003282 struct dlm_master_list_entry *mle, *next;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003283 struct dlm_lock_resource *res;
Mark Fasheha3d33292006-03-09 17:55:56 -08003284 unsigned int hash;
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003285
3286 mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3287top:
3288 assert_spin_locked(&dlm->spinlock);
3289
3290 /* clean the master list */
3291 spin_lock(&dlm->master_lock);
Christoph Hellwig800deef2007-05-17 16:03:13 +02003292 list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003293 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3294 mle->type != DLM_MLE_MASTER &&
3295 mle->type != DLM_MLE_MIGRATION);
3296
3297 /* MASTER mles are initiated locally. the waiting
3298 * process will notice the node map change
3299 * shortly. let that happen as normal. */
3300 if (mle->type == DLM_MLE_MASTER)
3301 continue;
3302
3303
3304 /* BLOCK mles are initiated by other nodes.
3305 * need to clean up if the dead node would have
3306 * been the master. */
3307 if (mle->type == DLM_MLE_BLOCK) {
3308 int bit;
3309
3310 spin_lock(&mle->spinlock);
3311 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3312 if (bit != dead_node) {
3313 mlog(0, "mle found, but dead node %u would "
3314 "not have been master\n", dead_node);
3315 spin_unlock(&mle->spinlock);
3316 } else {
3317 /* must drop the refcount by one since the
3318 * assert_master will never arrive. this
3319 * may result in the mle being unlinked and
3320 * freed, but there may still be a process
3321 * waiting in the dlmlock path which is fine. */
Kurt Hackel3b3b84a2006-05-01 14:31:37 -07003322 mlog(0, "node %u was expected master\n",
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003323 dead_node);
3324 atomic_set(&mle->woken, 1);
3325 spin_unlock(&mle->spinlock);
3326 wake_up(&mle->wq);
Kurt Hackelf671c092006-02-14 11:45:21 -08003327 /* do not need events any longer, so detach
3328 * from heartbeat */
3329 __dlm_mle_detach_hb_events(dlm, mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003330 __dlm_put_mle(mle);
3331 }
3332 continue;
3333 }
3334
3335 /* everything else is a MIGRATION mle */
3336
3337 /* the rule for MIGRATION mles is that the master
3338 * becomes UNKNOWN if *either* the original or
3339 * the new master dies. all UNKNOWN lockreses
3340 * are sent to whichever node becomes the recovery
3341 * master. the new master is responsible for
3342 * determining if there is still a master for
3343 * this lockres, or if he needs to take over
3344 * mastery. either way, this node should expect
3345 * another message to resolve this. */
3346 if (mle->master != dead_node &&
3347 mle->new_master != dead_node)
3348 continue;
3349
3350 /* if we have reached this point, this mle needs to
3351 * be removed from the list and freed. */
3352
3353 /* remove from the list early. NOTE: unlinking
3354 * list_head while in list_for_each_safe */
Kurt Hackelda01ad02006-04-27 18:53:04 -07003355 __dlm_mle_detach_hb_events(dlm, mle);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003356 spin_lock(&mle->spinlock);
3357 list_del_init(&mle->list);
3358 atomic_set(&mle->woken, 1);
3359 spin_unlock(&mle->spinlock);
3360 wake_up(&mle->wq);
3361
Kurt Hackelaa852352006-04-27 19:04:49 -07003362 mlog(0, "%s: node %u died during migration from "
3363 "%u to %u!\n", dlm->name, dead_node,
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003364 mle->master, mle->new_master);
3365 /* if there is a lockres associated with this
3366 * mle, find it and set its owner to UNKNOWN */
Mark Fasheha3d33292006-03-09 17:55:56 -08003367 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003368 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
Mark Fasheha3d33292006-03-09 17:55:56 -08003369 mle->u.name.len, hash);
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003370 if (res) {
3371 /* unfortunately if we hit this rare case, our
3372 * lock ordering is messed. we need to drop
3373 * the master lock so that we can take the
3374 * lockres lock, meaning that we will have to
3375 * restart from the head of list. */
3376 spin_unlock(&dlm->master_lock);
3377
3378 /* move lockres onto recovery list */
3379 spin_lock(&res->spinlock);
3380 dlm_set_lockres_owner(dlm, res,
3381 DLM_LOCK_RES_OWNER_UNKNOWN);
3382 dlm_move_lockres_to_recovery_list(dlm, res);
3383 spin_unlock(&res->spinlock);
3384 dlm_lockres_put(res);
3385
Kurt Hackelf671c092006-02-14 11:45:21 -08003386 /* about to get rid of mle, detach from heartbeat */
3387 __dlm_mle_detach_hb_events(dlm, mle);
3388
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003389 /* dump the mle */
3390 spin_lock(&dlm->master_lock);
3391 __dlm_put_mle(mle);
3392 spin_unlock(&dlm->master_lock);
3393
3394 /* restart */
3395 goto top;
3396 }
3397
3398 /* this may be the last reference */
3399 __dlm_put_mle(mle);
3400 }
3401 spin_unlock(&dlm->master_lock);
3402}
3403
3404
3405int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3406 u8 old_master)
3407{
3408 struct dlm_node_iter iter;
3409 int ret = 0;
3410
3411 spin_lock(&dlm->spinlock);
3412 dlm_node_iter_init(dlm->domain_map, &iter);
3413 clear_bit(old_master, iter.node_map);
3414 clear_bit(dlm->node_num, iter.node_map);
3415 spin_unlock(&dlm->spinlock);
3416
Kurt Hackelba2bf212006-12-01 14:47:20 -08003417 /* ownership of the lockres is changing. account for the
3418 * mastery reference here since old_master will briefly have
3419 * a reference after the migration completes */
3420 spin_lock(&res->spinlock);
3421 dlm_lockres_set_refmap_bit(old_master, res);
3422 spin_unlock(&res->spinlock);
3423
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003424 mlog(0, "now time to do a migrate request to other nodes\n");
3425 ret = dlm_do_migrate_request(dlm, res, old_master,
3426 dlm->node_num, &iter);
3427 if (ret < 0) {
3428 mlog_errno(ret);
3429 goto leave;
3430 }
3431
3432 mlog(0, "doing assert master of %.*s to all except the original node\n",
3433 res->lockname.len, res->lockname.name);
3434 /* this call now finishes out the nodemap
3435 * even if one or more nodes die */
Kurt Hackelba2bf212006-12-01 14:47:20 -08003436 ret = dlm_do_assert_master(dlm, res, iter.node_map,
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003437 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3438 if (ret < 0) {
3439 /* no longer need to retry. all living nodes contacted. */
3440 mlog_errno(ret);
3441 ret = 0;
3442 }
3443
3444 memset(iter.node_map, 0, sizeof(iter.node_map));
3445 set_bit(old_master, iter.node_map);
3446 mlog(0, "doing assert master of %.*s back to %u\n",
3447 res->lockname.len, res->lockname.name, old_master);
Kurt Hackelba2bf212006-12-01 14:47:20 -08003448 ret = dlm_do_assert_master(dlm, res, iter.node_map,
Kurt Hackel6714d8e2005-12-15 14:31:23 -08003449 DLM_ASSERT_MASTER_FINISH_MIGRATION);
3450 if (ret < 0) {
3451 mlog(0, "assert master to original master failed "
3452 "with %d.\n", ret);
3453 /* the only nonzero status here would be because of
3454 * a dead original node. we're done. */
3455 ret = 0;
3456 }
3457
3458 /* all done, set the owner, clear the flag */
3459 spin_lock(&res->spinlock);
3460 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3461 res->state &= ~DLM_LOCK_RES_MIGRATING;
3462 spin_unlock(&res->spinlock);
3463 /* re-dirty it on the new master */
3464 dlm_kick_thread(dlm, res);
3465 wake_up(&res->wq);
3466leave:
3467 return ret;
3468}
3469
3470/*
3471 * LOCKRES AST REFCOUNT
3472 * this is integral to migration
3473 */
3474
3475/* for future intent to call an ast, reserve one ahead of time.
3476 * this should be called only after waiting on the lockres
3477 * with dlm_wait_on_lockres, and while still holding the
3478 * spinlock after the call. */
3479void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3480{
3481 assert_spin_locked(&res->spinlock);
3482 if (res->state & DLM_LOCK_RES_MIGRATING) {
3483 __dlm_print_one_lock_resource(res);
3484 }
3485 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3486
3487 atomic_inc(&res->asts_reserved);
3488}
3489
3490/*
3491 * used to drop the reserved ast, either because it went unused,
3492 * or because the ast/bast was actually called.
3493 *
3494 * also, if there is a pending migration on this lockres,
3495 * and this was the last pending ast on the lockres,
3496 * atomically set the MIGRATING flag before we drop the lock.
3497 * this is how we ensure that migration can proceed with no
3498 * asts in progress. note that it is ok if the state of the
3499 * queues is such that a lock should be granted in the future
3500 * or that a bast should be fired, because the new master will
3501 * shuffle the lists on this lockres as soon as it is migrated.
3502 */
3503void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3504 struct dlm_lock_resource *res)
3505{
3506 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3507 return;
3508
3509 if (!res->migration_pending) {
3510 spin_unlock(&res->spinlock);
3511 return;
3512 }
3513
3514 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3515 res->migration_pending = 0;
3516 res->state |= DLM_LOCK_RES_MIGRATING;
3517 spin_unlock(&res->spinlock);
3518 wake_up(&res->wq);
3519 wake_up(&dlm->migration_wq);
3520}