blob: eebc52aae82e1613f5a1b1669dab666403708d8e [file] [log] [blame]
David Teiglande7fd4172006-01-18 09:30:29 +00001/******************************************************************************
2*******************************************************************************
3**
Christine Caulfield391fbdc2009-05-07 10:54:16 -05004** Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved.
David Teiglande7fd4172006-01-18 09:30:29 +00005**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "lockspace.h"
15#include "member.h"
16#include "recoverd.h"
17#include "recover.h"
David Teiglande7fd4172006-01-18 09:30:29 +000018#include "rcom.h"
19#include "config.h"
Christine Caulfield391fbdc2009-05-07 10:54:16 -050020#include "lowcomms.h"
David Teiglande7fd4172006-01-18 09:30:29 +000021
David Teigland757a4272011-10-20 13:26:28 -050022int dlm_slots_version(struct dlm_header *h)
23{
24 if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS)
25 return 0;
26 return 1;
27}
28
29void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
30 struct dlm_member *memb)
31{
32 struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
33
34 if (!dlm_slots_version(&rc->rc_header))
35 return;
36
37 memb->slot = le16_to_cpu(rf->rf_our_slot);
38 memb->generation = le32_to_cpu(rf->rf_generation);
39}
40
41void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc)
42{
43 struct dlm_slot *slot;
44 struct rcom_slot *ro;
45 int i;
46
47 ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
48
49 /* ls_slots array is sparse, but not rcom_slots */
50
51 for (i = 0; i < ls->ls_slots_size; i++) {
52 slot = &ls->ls_slots[i];
53 if (!slot->nodeid)
54 continue;
55 ro->ro_nodeid = cpu_to_le32(slot->nodeid);
56 ro->ro_slot = cpu_to_le16(slot->slot);
57 ro++;
58 }
59}
60
61#define SLOT_DEBUG_LINE 128
62
63static void log_debug_slots(struct dlm_ls *ls, uint32_t gen, int num_slots,
64 struct rcom_slot *ro0, struct dlm_slot *array,
65 int array_size)
66{
67 char line[SLOT_DEBUG_LINE];
68 int len = SLOT_DEBUG_LINE - 1;
69 int pos = 0;
70 int ret, i;
71
72 if (!dlm_config.ci_log_debug)
73 return;
74
75 memset(line, 0, sizeof(line));
76
77 if (array) {
78 for (i = 0; i < array_size; i++) {
79 if (!array[i].nodeid)
80 continue;
81
82 ret = snprintf(line + pos, len - pos, " %d:%d",
83 array[i].slot, array[i].nodeid);
84 if (ret >= len - pos)
85 break;
86 pos += ret;
87 }
88 } else if (ro0) {
89 for (i = 0; i < num_slots; i++) {
90 ret = snprintf(line + pos, len - pos, " %d:%d",
91 ro0[i].ro_slot, ro0[i].ro_nodeid);
92 if (ret >= len - pos)
93 break;
94 pos += ret;
95 }
96 }
97
98 log_debug(ls, "generation %u slots %d%s", gen, num_slots, line);
99}
100
101int dlm_slots_copy_in(struct dlm_ls *ls)
102{
103 struct dlm_member *memb;
104 struct dlm_rcom *rc = ls->ls_recover_buf;
105 struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
106 struct rcom_slot *ro0, *ro;
107 int our_nodeid = dlm_our_nodeid();
108 int i, num_slots;
109 uint32_t gen;
110
111 if (!dlm_slots_version(&rc->rc_header))
112 return -1;
113
114 gen = le32_to_cpu(rf->rf_generation);
115 if (gen <= ls->ls_generation) {
116 log_error(ls, "dlm_slots_copy_in gen %u old %u",
117 gen, ls->ls_generation);
118 }
119 ls->ls_generation = gen;
120
121 num_slots = le16_to_cpu(rf->rf_num_slots);
122 if (!num_slots)
123 return -1;
124
125 ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
126
127 for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
128 ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid);
129 ro->ro_slot = le16_to_cpu(ro->ro_slot);
130 }
131
132 log_debug_slots(ls, gen, num_slots, ro0, NULL, 0);
133
134 list_for_each_entry(memb, &ls->ls_nodes, list) {
135 for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
136 if (ro->ro_nodeid != memb->nodeid)
137 continue;
138 memb->slot = ro->ro_slot;
139 memb->slot_prev = memb->slot;
140 break;
141 }
142
143 if (memb->nodeid == our_nodeid) {
144 if (ls->ls_slot && ls->ls_slot != memb->slot) {
145 log_error(ls, "dlm_slots_copy_in our slot "
146 "changed %d %d", ls->ls_slot,
147 memb->slot);
148 return -1;
149 }
150
151 if (!ls->ls_slot)
152 ls->ls_slot = memb->slot;
153 }
154
155 if (!memb->slot) {
156 log_error(ls, "dlm_slots_copy_in nodeid %d no slot",
157 memb->nodeid);
158 return -1;
159 }
160 }
161
162 return 0;
163}
164
165/* for any nodes that do not support slots, we will not have set memb->slot
166 in wait_status_all(), so memb->slot will remain -1, and we will not
167 assign slots or set ls_num_slots here */
168
169int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
170 struct dlm_slot **slots_out, uint32_t *gen_out)
171{
172 struct dlm_member *memb;
173 struct dlm_slot *array;
174 int our_nodeid = dlm_our_nodeid();
175 int array_size, max_slots, i;
176 int need = 0;
177 int max = 0;
178 int num = 0;
179 uint32_t gen = 0;
180
181 /* our own memb struct will have slot -1 gen 0 */
182
183 list_for_each_entry(memb, &ls->ls_nodes, list) {
184 if (memb->nodeid == our_nodeid) {
185 memb->slot = ls->ls_slot;
186 memb->generation = ls->ls_generation;
187 break;
188 }
189 }
190
191 list_for_each_entry(memb, &ls->ls_nodes, list) {
192 if (memb->generation > gen)
193 gen = memb->generation;
194
195 /* node doesn't support slots */
196
197 if (memb->slot == -1)
198 return -1;
199
200 /* node needs a slot assigned */
201
202 if (!memb->slot)
203 need++;
204
205 /* node has a slot assigned */
206
207 num++;
208
209 if (!max || max < memb->slot)
210 max = memb->slot;
211
212 /* sanity check, once slot is assigned it shouldn't change */
213
214 if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) {
215 log_error(ls, "nodeid %d slot changed %d %d",
216 memb->nodeid, memb->slot_prev, memb->slot);
217 return -1;
218 }
219 memb->slot_prev = memb->slot;
220 }
221
222 array_size = max + need;
223
224 array = kzalloc(array_size * sizeof(struct dlm_slot), GFP_NOFS);
225 if (!array)
226 return -ENOMEM;
227
228 num = 0;
229
230 /* fill in slots (offsets) that are used */
231
232 list_for_each_entry(memb, &ls->ls_nodes, list) {
233 if (!memb->slot)
234 continue;
235
236 if (memb->slot > array_size) {
237 log_error(ls, "invalid slot number %d", memb->slot);
238 kfree(array);
239 return -1;
240 }
241
242 array[memb->slot - 1].nodeid = memb->nodeid;
243 array[memb->slot - 1].slot = memb->slot;
244 num++;
245 }
246
247 /* assign new slots from unused offsets */
248
249 list_for_each_entry(memb, &ls->ls_nodes, list) {
250 if (memb->slot)
251 continue;
252
253 for (i = 0; i < array_size; i++) {
254 if (array[i].nodeid)
255 continue;
256
257 memb->slot = i + 1;
258 memb->slot_prev = memb->slot;
259 array[i].nodeid = memb->nodeid;
260 array[i].slot = memb->slot;
261 num++;
262
263 if (!ls->ls_slot && memb->nodeid == our_nodeid)
264 ls->ls_slot = memb->slot;
265 break;
266 }
267
268 if (!memb->slot) {
269 log_error(ls, "no free slot found");
270 kfree(array);
271 return -1;
272 }
273 }
274
275 gen++;
276
277 log_debug_slots(ls, gen, num, NULL, array, array_size);
278
279 max_slots = (dlm_config.ci_buffer_size - sizeof(struct dlm_rcom) -
280 sizeof(struct rcom_config)) / sizeof(struct rcom_slot);
281
282 if (num > max_slots) {
283 log_error(ls, "num_slots %d exceeds max_slots %d",
284 num, max_slots);
285 kfree(array);
286 return -1;
287 }
288
289 *gen_out = gen;
290 *slots_out = array;
291 *slots_size = array_size;
292 *num_slots = num;
293 return 0;
294}
295
David Teiglande7fd4172006-01-18 09:30:29 +0000296static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
297{
298 struct dlm_member *memb = NULL;
299 struct list_head *tmp;
300 struct list_head *newlist = &new->list;
301 struct list_head *head = &ls->ls_nodes;
302
303 list_for_each(tmp, head) {
304 memb = list_entry(tmp, struct dlm_member, list);
305 if (new->nodeid < memb->nodeid)
306 break;
307 }
308
309 if (!memb)
310 list_add_tail(newlist, head);
311 else {
312 /* FIXME: can use list macro here */
313 newlist->prev = tmp->prev;
314 newlist->next = tmp;
315 tmp->prev->next = newlist;
316 tmp->prev = newlist;
317 }
318}
319
320static int dlm_add_member(struct dlm_ls *ls, int nodeid)
321{
322 struct dlm_member *memb;
Christine Caulfield391fbdc2009-05-07 10:54:16 -0500323 int w, error;
David Teiglande7fd4172006-01-18 09:30:29 +0000324
David Teigland573c24c2009-11-30 16:34:43 -0600325 memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
David Teiglande7fd4172006-01-18 09:30:29 +0000326 if (!memb)
327 return -ENOMEM;
328
329 w = dlm_node_weight(ls->ls_name, nodeid);
Jesper Juhl1a2bf2e2007-07-19 00:27:43 +0200330 if (w < 0) {
331 kfree(memb);
David Teiglande7fd4172006-01-18 09:30:29 +0000332 return w;
Jesper Juhl1a2bf2e2007-07-19 00:27:43 +0200333 }
David Teiglande7fd4172006-01-18 09:30:29 +0000334
Christine Caulfield391fbdc2009-05-07 10:54:16 -0500335 error = dlm_lowcomms_connect_node(nodeid);
336 if (error < 0) {
337 kfree(memb);
338 return error;
339 }
340
David Teiglande7fd4172006-01-18 09:30:29 +0000341 memb->nodeid = nodeid;
342 memb->weight = w;
343 add_ordered_member(ls, memb);
344 ls->ls_num_nodes++;
345 return 0;
346}
347
348static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
349{
350 list_move(&memb->list, &ls->ls_nodes_gone);
351 ls->ls_num_nodes--;
352}
353
David Teigland46b43ee2008-01-08 16:24:00 -0600354int dlm_is_member(struct dlm_ls *ls, int nodeid)
David Teiglande7fd4172006-01-18 09:30:29 +0000355{
356 struct dlm_member *memb;
357
358 list_for_each_entry(memb, &ls->ls_nodes, list) {
359 if (memb->nodeid == nodeid)
David Teigland90135922006-01-20 08:47:07 +0000360 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +0000361 }
David Teigland90135922006-01-20 08:47:07 +0000362 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000363}
364
365int dlm_is_removed(struct dlm_ls *ls, int nodeid)
366{
367 struct dlm_member *memb;
368
369 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
370 if (memb->nodeid == nodeid)
David Teigland90135922006-01-20 08:47:07 +0000371 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +0000372 }
David Teigland90135922006-01-20 08:47:07 +0000373 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000374}
375
376static void clear_memb_list(struct list_head *head)
377{
378 struct dlm_member *memb;
379
380 while (!list_empty(head)) {
381 memb = list_entry(head->next, struct dlm_member, list);
382 list_del(&memb->list);
383 kfree(memb);
384 }
385}
386
387void dlm_clear_members(struct dlm_ls *ls)
388{
389 clear_memb_list(&ls->ls_nodes);
390 ls->ls_num_nodes = 0;
391}
392
393void dlm_clear_members_gone(struct dlm_ls *ls)
394{
395 clear_memb_list(&ls->ls_nodes_gone);
396}
397
398static void make_member_array(struct dlm_ls *ls)
399{
400 struct dlm_member *memb;
401 int i, w, x = 0, total = 0, all_zero = 0, *array;
402
403 kfree(ls->ls_node_array);
404 ls->ls_node_array = NULL;
405
406 list_for_each_entry(memb, &ls->ls_nodes, list) {
407 if (memb->weight)
408 total += memb->weight;
409 }
410
411 /* all nodes revert to weight of 1 if all have weight 0 */
412
413 if (!total) {
414 total = ls->ls_num_nodes;
415 all_zero = 1;
416 }
417
418 ls->ls_total_weight = total;
419
David Teigland573c24c2009-11-30 16:34:43 -0600420 array = kmalloc(sizeof(int) * total, GFP_NOFS);
David Teiglande7fd4172006-01-18 09:30:29 +0000421 if (!array)
422 return;
423
424 list_for_each_entry(memb, &ls->ls_nodes, list) {
425 if (!all_zero && !memb->weight)
426 continue;
427
428 if (all_zero)
429 w = 1;
430 else
431 w = memb->weight;
432
433 DLM_ASSERT(x < total, printk("total %d x %d\n", total, x););
434
435 for (i = 0; i < w; i++)
436 array[x++] = memb->nodeid;
437 }
438
439 ls->ls_node_array = array;
440}
441
442/* send a status request to all members just to establish comms connections */
443
David Teiglandf6db1b82006-08-08 17:06:07 -0500444static int ping_members(struct dlm_ls *ls)
David Teiglande7fd4172006-01-18 09:30:29 +0000445{
446 struct dlm_member *memb;
David Teiglandf6db1b82006-08-08 17:06:07 -0500447 int error = 0;
448
449 list_for_each_entry(memb, &ls->ls_nodes, list) {
450 error = dlm_recovery_stopped(ls);
451 if (error)
452 break;
David Teigland757a4272011-10-20 13:26:28 -0500453 error = dlm_rcom_status(ls, memb->nodeid, 0);
David Teiglandf6db1b82006-08-08 17:06:07 -0500454 if (error)
455 break;
456 }
457 if (error)
David Teiglandfaa0f262006-08-08 17:08:42 -0500458 log_debug(ls, "ping_members aborted %d last nodeid %d",
459 error, ls->ls_recover_nodeid);
David Teiglandf6db1b82006-08-08 17:06:07 -0500460 return error;
David Teiglande7fd4172006-01-18 09:30:29 +0000461}
462
463int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
464{
465 struct dlm_member *memb, *safe;
466 int i, error, found, pos = 0, neg = 0, low = -1;
467
David Teigland91c0dc92006-10-31 11:56:01 -0600468 /* previously removed members that we've not finished removing need to
469 count as a negative change so the "neg" recovery steps will happen */
470
471 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
472 log_debug(ls, "prev removed member %d", memb->nodeid);
473 neg++;
474 }
475
David Teiglande7fd4172006-01-18 09:30:29 +0000476 /* move departed members from ls_nodes to ls_nodes_gone */
477
478 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
David Teigland90135922006-01-20 08:47:07 +0000479 found = 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000480 for (i = 0; i < rv->node_count; i++) {
481 if (memb->nodeid == rv->nodeids[i]) {
David Teigland90135922006-01-20 08:47:07 +0000482 found = 1;
David Teiglande7fd4172006-01-18 09:30:29 +0000483 break;
484 }
485 }
486
487 if (!found) {
488 neg++;
489 dlm_remove_member(ls, memb);
490 log_debug(ls, "remove member %d", memb->nodeid);
491 }
492 }
493
David Teiglandd44e0fc2008-03-18 14:22:11 -0500494 /* Add an entry to ls_nodes_gone for members that were removed and
495 then added again, so that previous state for these nodes will be
496 cleared during recovery. */
497
498 for (i = 0; i < rv->new_count; i++) {
499 if (!dlm_is_member(ls, rv->new[i]))
500 continue;
501 log_debug(ls, "new nodeid %d is a re-added member", rv->new[i]);
502
David Teigland573c24c2009-11-30 16:34:43 -0600503 memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS);
David Teiglandd44e0fc2008-03-18 14:22:11 -0500504 if (!memb)
505 return -ENOMEM;
506 memb->nodeid = rv->new[i];
507 list_add_tail(&memb->list, &ls->ls_nodes_gone);
508 neg++;
509 }
510
David Teiglande7fd4172006-01-18 09:30:29 +0000511 /* add new members to ls_nodes */
512
513 for (i = 0; i < rv->node_count; i++) {
514 if (dlm_is_member(ls, rv->nodeids[i]))
515 continue;
516 dlm_add_member(ls, rv->nodeids[i]);
517 pos++;
518 log_debug(ls, "add member %d", rv->nodeids[i]);
519 }
520
521 list_for_each_entry(memb, &ls->ls_nodes, list) {
522 if (low == -1 || memb->nodeid < low)
523 low = memb->nodeid;
524 }
525 ls->ls_low_nodeid = low;
526
527 make_member_array(ls);
David Teiglande7fd4172006-01-18 09:30:29 +0000528 *neg_out = neg;
529
David Teiglandf6db1b82006-08-08 17:06:07 -0500530 error = ping_members(ls);
David Teigland8b0e7b22007-05-18 09:03:35 -0500531 if (!error || error == -EPROTO) {
532 /* new_lockspace() may be waiting to know if the config
533 is good or bad */
534 ls->ls_members_result = error;
535 complete(&ls->ls_members_done);
536 }
David Teiglande7fd4172006-01-18 09:30:29 +0000537
David Teiglandf95a34c2011-10-14 12:34:58 -0500538 log_debug(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
David Teiglande7fd4172006-01-18 09:30:29 +0000539 return error;
540}
541
David Teiglandc36258b2007-09-27 15:53:38 -0500542/* Userspace guarantees that dlm_ls_stop() has completed on all nodes before
543 dlm_ls_start() is called on any of them to start the new recovery. */
David Teiglande7fd4172006-01-18 09:30:29 +0000544
545int dlm_ls_stop(struct dlm_ls *ls)
546{
547 int new;
548
549 /*
David Teiglandc36258b2007-09-27 15:53:38 -0500550 * Prevent dlm_recv from being in the middle of something when we do
551 * the stop. This includes ensuring dlm_recv isn't processing a
552 * recovery message (rcom), while dlm_recoverd is aborting and
553 * resetting things from an in-progress recovery. i.e. we want
554 * dlm_recoverd to abort its recovery without worrying about dlm_recv
555 * processing an rcom at the same time. Stopping dlm_recv also makes
556 * it easy for dlm_receive_message() to check locking stopped and add a
557 * message to the requestqueue without races.
558 */
559
560 down_write(&ls->ls_recv_active);
561
562 /*
563 * Abort any recovery that's in progress (see RECOVERY_STOP,
564 * dlm_recovery_stopped()) and tell any other threads running in the
565 * dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
David Teiglande7fd4172006-01-18 09:30:29 +0000566 */
567
568 spin_lock(&ls->ls_recover_lock);
569 set_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
570 new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
571 ls->ls_recover_seq++;
572 spin_unlock(&ls->ls_recover_lock);
573
574 /*
David Teiglandc36258b2007-09-27 15:53:38 -0500575 * Let dlm_recv run again, now any normal messages will be saved on the
576 * requestqueue for later.
577 */
578
579 up_write(&ls->ls_recv_active);
580
581 /*
David Teiglande7fd4172006-01-18 09:30:29 +0000582 * This in_recovery lock does two things:
David Teiglande7fd4172006-01-18 09:30:29 +0000583 * 1) Keeps this function from returning until all threads are out
Adam Buchbinderc41b20e2009-12-11 16:35:39 -0500584 * of locking routines and locking is truly stopped.
David Teiglande7fd4172006-01-18 09:30:29 +0000585 * 2) Keeps any new requests from being processed until it's unlocked
586 * when recovery is complete.
587 */
588
589 if (new)
590 down_write(&ls->ls_in_recovery);
591
592 /*
593 * The recoverd suspend/resume makes sure that dlm_recoverd (if
David Teiglandc36258b2007-09-27 15:53:38 -0500594 * running) has noticed RECOVERY_STOP above and quit processing the
595 * previous recovery.
David Teiglande7fd4172006-01-18 09:30:29 +0000596 */
597
598 dlm_recoverd_suspend(ls);
David Teigland757a4272011-10-20 13:26:28 -0500599
600 spin_lock(&ls->ls_recover_lock);
601 kfree(ls->ls_slots);
602 ls->ls_slots = NULL;
603 ls->ls_num_slots = 0;
604 ls->ls_slots_size = 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000605 ls->ls_recover_status = 0;
David Teigland757a4272011-10-20 13:26:28 -0500606 spin_unlock(&ls->ls_recover_lock);
607
David Teiglande7fd4172006-01-18 09:30:29 +0000608 dlm_recoverd_resume(ls);
David Teigland3ae1acf2007-05-18 08:59:31 -0500609
610 if (!ls->ls_recover_begin)
611 ls->ls_recover_begin = jiffies;
David Teiglande7fd4172006-01-18 09:30:29 +0000612 return 0;
613}
614
615int dlm_ls_start(struct dlm_ls *ls)
616{
617 struct dlm_recover *rv = NULL, *rv_old;
David Teiglandd44e0fc2008-03-18 14:22:11 -0500618 int *ids = NULL, *new = NULL;
619 int error, ids_count = 0, new_count = 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000620
David Teigland573c24c2009-11-30 16:34:43 -0600621 rv = kzalloc(sizeof(struct dlm_recover), GFP_NOFS);
David Teiglande7fd4172006-01-18 09:30:29 +0000622 if (!rv)
623 return -ENOMEM;
David Teiglande7fd4172006-01-18 09:30:29 +0000624
David Teiglandd44e0fc2008-03-18 14:22:11 -0500625 error = dlm_nodeid_list(ls->ls_name, &ids, &ids_count,
626 &new, &new_count);
627 if (error < 0)
David Teiglande7fd4172006-01-18 09:30:29 +0000628 goto fail;
629
630 spin_lock(&ls->ls_recover_lock);
631
632 /* the lockspace needs to be stopped before it can be started */
633
634 if (!dlm_locking_stopped(ls)) {
635 spin_unlock(&ls->ls_recover_lock);
636 log_error(ls, "start ignored: lockspace running");
637 error = -EINVAL;
638 goto fail;
639 }
640
641 rv->nodeids = ids;
David Teiglandd44e0fc2008-03-18 14:22:11 -0500642 rv->node_count = ids_count;
643 rv->new = new;
644 rv->new_count = new_count;
David Teiglande7fd4172006-01-18 09:30:29 +0000645 rv->seq = ++ls->ls_recover_seq;
646 rv_old = ls->ls_recover_args;
647 ls->ls_recover_args = rv;
648 spin_unlock(&ls->ls_recover_lock);
649
650 if (rv_old) {
David Teiglandd44e0fc2008-03-18 14:22:11 -0500651 log_error(ls, "unused recovery %llx %d",
652 (unsigned long long)rv_old->seq, rv_old->node_count);
David Teiglande7fd4172006-01-18 09:30:29 +0000653 kfree(rv_old->nodeids);
David Teiglandd44e0fc2008-03-18 14:22:11 -0500654 kfree(rv_old->new);
David Teiglande7fd4172006-01-18 09:30:29 +0000655 kfree(rv_old);
656 }
657
658 dlm_recoverd_kick(ls);
659 return 0;
660
661 fail:
662 kfree(rv);
663 kfree(ids);
David Teiglandd44e0fc2008-03-18 14:22:11 -0500664 kfree(new);
David Teiglande7fd4172006-01-18 09:30:29 +0000665 return error;
666}
667