blob: b865a46059ddd27aac86dc05eec54c62865e00f6 [file] [log] [blame]
David Teiglande7fd4172006-01-18 09:30:29 +00001/******************************************************************************
2*******************************************************************************
3**
David Teiglandef0c2bb2007-03-28 09:56:46 -05004** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
David Teiglande7fd4172006-01-18 09:30:29 +00005**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13/* Central locking logic has four stages:
14
15 dlm_lock()
16 dlm_unlock()
17
18 request_lock(ls, lkb)
19 convert_lock(ls, lkb)
20 unlock_lock(ls, lkb)
21 cancel_lock(ls, lkb)
22
23 _request_lock(r, lkb)
24 _convert_lock(r, lkb)
25 _unlock_lock(r, lkb)
26 _cancel_lock(r, lkb)
27
28 do_request(r, lkb)
29 do_convert(r, lkb)
30 do_unlock(r, lkb)
31 do_cancel(r, lkb)
32
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
35
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
40
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
43
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
46
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
49
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
53
54 L: send_xxxx() -> R: receive_xxxx()
55 R: do_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
57*/
David Teigland597d0ca2006-07-12 16:44:04 -050058#include <linux/types.h>
David Teiglande7fd4172006-01-18 09:30:29 +000059#include "dlm_internal.h"
David Teigland597d0ca2006-07-12 16:44:04 -050060#include <linux/dlm_device.h>
David Teiglande7fd4172006-01-18 09:30:29 +000061#include "memory.h"
62#include "lowcomms.h"
63#include "requestqueue.h"
64#include "util.h"
65#include "dir.h"
66#include "member.h"
67#include "lockspace.h"
68#include "ast.h"
69#include "lock.h"
70#include "rcom.h"
71#include "recover.h"
72#include "lvb_table.h"
David Teigland597d0ca2006-07-12 16:44:04 -050073#include "user.h"
David Teiglande7fd4172006-01-18 09:30:29 +000074#include "config.h"
75
76static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83static int send_remove(struct dlm_rsb *r);
84static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
86 struct dlm_message *ms);
87static int receive_extralen(struct dlm_message *ms);
88
89/*
90 * Lock compatibilty matrix - thanks Steve
91 * UN = Unlocked state. Not really a state, used as a flag
92 * PD = Padding. Used to make the matrix a nice power of two in size
93 * Other states are the same as the VMS DLM.
94 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
95 */
96
97static const int __dlm_compat_matrix[8][8] = {
98 /* UN NL CR CW PR PW EX PD */
99 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
100 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
101 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
102 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
103 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
104 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
105 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
106 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
107};
108
109/*
110 * This defines the direction of transfer of LVB data.
111 * Granted mode is the row; requested mode is the column.
112 * Usage: matrix[grmode+1][rqmode+1]
113 * 1 = LVB is returned to the caller
114 * 0 = LVB is written to the resource
115 * -1 = nothing happens to the LVB
116 */
117
118const int dlm_lvb_operations[8][8] = {
119 /* UN NL CR CW PR PW EX PD*/
120 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
121 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
122 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
123 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
124 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
125 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
126 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
127 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
128};
David Teiglande7fd4172006-01-18 09:30:29 +0000129
130#define modes_compat(gr, rq) \
131 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
132
133int dlm_modes_compat(int mode1, int mode2)
134{
135 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
136}
137
138/*
139 * Compatibility matrix for conversions with QUECVT set.
140 * Granted mode is the row; requested mode is the column.
141 * Usage: matrix[grmode+1][rqmode+1]
142 */
143
144static const int __quecvt_compat_matrix[8][8] = {
145 /* UN NL CR CW PR PW EX PD */
146 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
147 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
148 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
149 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
150 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
151 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
153 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
154};
155
David Teigland597d0ca2006-07-12 16:44:04 -0500156void dlm_print_lkb(struct dlm_lkb *lkb)
David Teiglande7fd4172006-01-18 09:30:29 +0000157{
158 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
159 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
160 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
161 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
162 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
163}
164
165void dlm_print_rsb(struct dlm_rsb *r)
166{
167 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
168 r->res_nodeid, r->res_flags, r->res_first_lkid,
169 r->res_recover_locks_count, r->res_name);
170}
171
David Teiglanda345da32006-08-18 11:54:25 -0500172void dlm_dump_rsb(struct dlm_rsb *r)
173{
174 struct dlm_lkb *lkb;
175
176 dlm_print_rsb(r);
177
178 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
179 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
180 printk(KERN_ERR "rsb lookup list\n");
181 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
182 dlm_print_lkb(lkb);
183 printk(KERN_ERR "rsb grant queue:\n");
184 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
185 dlm_print_lkb(lkb);
186 printk(KERN_ERR "rsb convert queue:\n");
187 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
188 dlm_print_lkb(lkb);
189 printk(KERN_ERR "rsb wait queue:\n");
190 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
191 dlm_print_lkb(lkb);
192}
193
David Teiglande7fd4172006-01-18 09:30:29 +0000194/* Threads cannot use the lockspace while it's being recovered */
195
196static inline void lock_recovery(struct dlm_ls *ls)
197{
198 down_read(&ls->ls_in_recovery);
199}
200
201static inline void unlock_recovery(struct dlm_ls *ls)
202{
203 up_read(&ls->ls_in_recovery);
204}
205
206static inline int lock_recovery_try(struct dlm_ls *ls)
207{
208 return down_read_trylock(&ls->ls_in_recovery);
209}
210
211static inline int can_be_queued(struct dlm_lkb *lkb)
212{
213 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
214}
215
216static inline int force_blocking_asts(struct dlm_lkb *lkb)
217{
218 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
219}
220
221static inline int is_demoted(struct dlm_lkb *lkb)
222{
223 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
224}
225
226static inline int is_remote(struct dlm_rsb *r)
227{
228 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
229 return !!r->res_nodeid;
230}
231
232static inline int is_process_copy(struct dlm_lkb *lkb)
233{
234 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
235}
236
237static inline int is_master_copy(struct dlm_lkb *lkb)
238{
239 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
240 DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
David Teigland90135922006-01-20 08:47:07 +0000241 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000242}
243
244static inline int middle_conversion(struct dlm_lkb *lkb)
245{
246 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
247 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
David Teigland90135922006-01-20 08:47:07 +0000248 return 1;
249 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000250}
251
252static inline int down_conversion(struct dlm_lkb *lkb)
253{
254 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
255}
256
David Teiglandef0c2bb2007-03-28 09:56:46 -0500257static inline int is_overlap_unlock(struct dlm_lkb *lkb)
258{
259 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
260}
261
262static inline int is_overlap_cancel(struct dlm_lkb *lkb)
263{
264 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
265}
266
267static inline int is_overlap(struct dlm_lkb *lkb)
268{
269 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
270 DLM_IFL_OVERLAP_CANCEL));
271}
272
David Teiglande7fd4172006-01-18 09:30:29 +0000273static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
274{
275 if (is_master_copy(lkb))
276 return;
277
278 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
279
280 lkb->lkb_lksb->sb_status = rv;
281 lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
282
283 dlm_add_ast(lkb, AST_COMP);
284}
285
David Teiglandef0c2bb2007-03-28 09:56:46 -0500286static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
287{
288 queue_cast(r, lkb,
289 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
290}
291
David Teiglande7fd4172006-01-18 09:30:29 +0000292static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
293{
294 if (is_master_copy(lkb))
295 send_bast(r, lkb, rqmode);
296 else {
297 lkb->lkb_bastmode = rqmode;
298 dlm_add_ast(lkb, AST_BAST);
299 }
300}
301
302/*
303 * Basic operations on rsb's and lkb's
304 */
305
306static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
307{
308 struct dlm_rsb *r;
309
310 r = allocate_rsb(ls, len);
311 if (!r)
312 return NULL;
313
314 r->res_ls = ls;
315 r->res_length = len;
316 memcpy(r->res_name, name, len);
David Teigland90135922006-01-20 08:47:07 +0000317 mutex_init(&r->res_mutex);
David Teiglande7fd4172006-01-18 09:30:29 +0000318
319 INIT_LIST_HEAD(&r->res_lookup);
320 INIT_LIST_HEAD(&r->res_grantqueue);
321 INIT_LIST_HEAD(&r->res_convertqueue);
322 INIT_LIST_HEAD(&r->res_waitqueue);
323 INIT_LIST_HEAD(&r->res_root_list);
324 INIT_LIST_HEAD(&r->res_recover_list);
325
326 return r;
327}
328
329static int search_rsb_list(struct list_head *head, char *name, int len,
330 unsigned int flags, struct dlm_rsb **r_ret)
331{
332 struct dlm_rsb *r;
333 int error = 0;
334
335 list_for_each_entry(r, head, res_hashchain) {
336 if (len == r->res_length && !memcmp(name, r->res_name, len))
337 goto found;
338 }
David Teigland597d0ca2006-07-12 16:44:04 -0500339 return -EBADR;
David Teiglande7fd4172006-01-18 09:30:29 +0000340
341 found:
342 if (r->res_nodeid && (flags & R_MASTER))
343 error = -ENOTBLK;
344 *r_ret = r;
345 return error;
346}
347
348static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
349 unsigned int flags, struct dlm_rsb **r_ret)
350{
351 struct dlm_rsb *r;
352 int error;
353
354 error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
355 if (!error) {
356 kref_get(&r->res_ref);
357 goto out;
358 }
359 error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
360 if (error)
361 goto out;
362
363 list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
364
365 if (dlm_no_directory(ls))
366 goto out;
367
368 if (r->res_nodeid == -1) {
369 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
370 r->res_first_lkid = 0;
371 } else if (r->res_nodeid > 0) {
372 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
373 r->res_first_lkid = 0;
374 } else {
375 DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
376 DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
377 }
378 out:
379 *r_ret = r;
380 return error;
381}
382
383static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
384 unsigned int flags, struct dlm_rsb **r_ret)
385{
386 int error;
387 write_lock(&ls->ls_rsbtbl[b].lock);
388 error = _search_rsb(ls, name, len, b, flags, r_ret);
389 write_unlock(&ls->ls_rsbtbl[b].lock);
390 return error;
391}
392
393/*
394 * Find rsb in rsbtbl and potentially create/add one
395 *
396 * Delaying the release of rsb's has a similar benefit to applications keeping
397 * NL locks on an rsb, but without the guarantee that the cached master value
398 * will still be valid when the rsb is reused. Apps aren't always smart enough
399 * to keep NL locks on an rsb that they may lock again shortly; this can lead
400 * to excessive master lookups and removals if we don't delay the release.
401 *
402 * Searching for an rsb means looking through both the normal list and toss
403 * list. When found on the toss list the rsb is moved to the normal list with
404 * ref count of 1; when found on normal list the ref count is incremented.
405 */
406
407static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
408 unsigned int flags, struct dlm_rsb **r_ret)
409{
410 struct dlm_rsb *r, *tmp;
411 uint32_t hash, bucket;
412 int error = 0;
413
414 if (dlm_no_directory(ls))
415 flags |= R_CREATE;
416
417 hash = jhash(name, namelen, 0);
418 bucket = hash & (ls->ls_rsbtbl_size - 1);
419
420 error = search_rsb(ls, name, namelen, bucket, flags, &r);
421 if (!error)
422 goto out;
423
David Teigland597d0ca2006-07-12 16:44:04 -0500424 if (error == -EBADR && !(flags & R_CREATE))
David Teiglande7fd4172006-01-18 09:30:29 +0000425 goto out;
426
427 /* the rsb was found but wasn't a master copy */
428 if (error == -ENOTBLK)
429 goto out;
430
431 error = -ENOMEM;
432 r = create_rsb(ls, name, namelen);
433 if (!r)
434 goto out;
435
436 r->res_hash = hash;
437 r->res_bucket = bucket;
438 r->res_nodeid = -1;
439 kref_init(&r->res_ref);
440
441 /* With no directory, the master can be set immediately */
442 if (dlm_no_directory(ls)) {
443 int nodeid = dlm_dir_nodeid(r);
444 if (nodeid == dlm_our_nodeid())
445 nodeid = 0;
446 r->res_nodeid = nodeid;
447 }
448
449 write_lock(&ls->ls_rsbtbl[bucket].lock);
450 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
451 if (!error) {
452 write_unlock(&ls->ls_rsbtbl[bucket].lock);
453 free_rsb(r);
454 r = tmp;
455 goto out;
456 }
457 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
458 write_unlock(&ls->ls_rsbtbl[bucket].lock);
459 error = 0;
460 out:
461 *r_ret = r;
462 return error;
463}
464
465int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
466 unsigned int flags, struct dlm_rsb **r_ret)
467{
468 return find_rsb(ls, name, namelen, flags, r_ret);
469}
470
471/* This is only called to add a reference when the code already holds
472 a valid reference to the rsb, so there's no need for locking. */
473
474static inline void hold_rsb(struct dlm_rsb *r)
475{
476 kref_get(&r->res_ref);
477}
478
479void dlm_hold_rsb(struct dlm_rsb *r)
480{
481 hold_rsb(r);
482}
483
484static void toss_rsb(struct kref *kref)
485{
486 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
487 struct dlm_ls *ls = r->res_ls;
488
489 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
490 kref_init(&r->res_ref);
491 list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
492 r->res_toss_time = jiffies;
493 if (r->res_lvbptr) {
494 free_lvb(r->res_lvbptr);
495 r->res_lvbptr = NULL;
496 }
497}
498
499/* When all references to the rsb are gone it's transfered to
500 the tossed list for later disposal. */
501
502static void put_rsb(struct dlm_rsb *r)
503{
504 struct dlm_ls *ls = r->res_ls;
505 uint32_t bucket = r->res_bucket;
506
507 write_lock(&ls->ls_rsbtbl[bucket].lock);
508 kref_put(&r->res_ref, toss_rsb);
509 write_unlock(&ls->ls_rsbtbl[bucket].lock);
510}
511
512void dlm_put_rsb(struct dlm_rsb *r)
513{
514 put_rsb(r);
515}
516
517/* See comment for unhold_lkb */
518
519static void unhold_rsb(struct dlm_rsb *r)
520{
521 int rv;
522 rv = kref_put(&r->res_ref, toss_rsb);
David Teiglanda345da32006-08-18 11:54:25 -0500523 DLM_ASSERT(!rv, dlm_dump_rsb(r););
David Teiglande7fd4172006-01-18 09:30:29 +0000524}
525
526static void kill_rsb(struct kref *kref)
527{
528 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
529
530 /* All work is done after the return from kref_put() so we
531 can release the write_lock before the remove and free. */
532
David Teiglanda345da32006-08-18 11:54:25 -0500533 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
534 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
535 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
536 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
537 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
538 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
David Teiglande7fd4172006-01-18 09:30:29 +0000539}
540
541/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
542 The rsb must exist as long as any lkb's for it do. */
543
544static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
545{
546 hold_rsb(r);
547 lkb->lkb_resource = r;
548}
549
550static void detach_lkb(struct dlm_lkb *lkb)
551{
552 if (lkb->lkb_resource) {
553 put_rsb(lkb->lkb_resource);
554 lkb->lkb_resource = NULL;
555 }
556}
557
558static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
559{
560 struct dlm_lkb *lkb, *tmp;
561 uint32_t lkid = 0;
562 uint16_t bucket;
563
564 lkb = allocate_lkb(ls);
565 if (!lkb)
566 return -ENOMEM;
567
568 lkb->lkb_nodeid = -1;
569 lkb->lkb_grmode = DLM_LOCK_IV;
570 kref_init(&lkb->lkb_ref);
David Teigland34e22be2006-07-18 11:24:04 -0500571 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
David Teiglandef0c2bb2007-03-28 09:56:46 -0500572 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
David Teiglande7fd4172006-01-18 09:30:29 +0000573
574 get_random_bytes(&bucket, sizeof(bucket));
575 bucket &= (ls->ls_lkbtbl_size - 1);
576
577 write_lock(&ls->ls_lkbtbl[bucket].lock);
578
579 /* counter can roll over so we must verify lkid is not in use */
580
581 while (lkid == 0) {
582 lkid = bucket | (ls->ls_lkbtbl[bucket].counter++ << 16);
583
584 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
585 lkb_idtbl_list) {
586 if (tmp->lkb_id != lkid)
587 continue;
588 lkid = 0;
589 break;
590 }
591 }
592
593 lkb->lkb_id = lkid;
594 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
595 write_unlock(&ls->ls_lkbtbl[bucket].lock);
596
597 *lkb_ret = lkb;
598 return 0;
599}
600
601static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
602{
603 uint16_t bucket = lkid & 0xFFFF;
604 struct dlm_lkb *lkb;
605
606 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
607 if (lkb->lkb_id == lkid)
608 return lkb;
609 }
610 return NULL;
611}
612
613static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
614{
615 struct dlm_lkb *lkb;
616 uint16_t bucket = lkid & 0xFFFF;
617
618 if (bucket >= ls->ls_lkbtbl_size)
619 return -EBADSLT;
620
621 read_lock(&ls->ls_lkbtbl[bucket].lock);
622 lkb = __find_lkb(ls, lkid);
623 if (lkb)
624 kref_get(&lkb->lkb_ref);
625 read_unlock(&ls->ls_lkbtbl[bucket].lock);
626
627 *lkb_ret = lkb;
628 return lkb ? 0 : -ENOENT;
629}
630
631static void kill_lkb(struct kref *kref)
632{
633 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
634
635 /* All work is done after the return from kref_put() so we
636 can release the write_lock before the detach_lkb */
637
638 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
639}
640
David Teiglandb3f58d82006-02-28 11:16:37 -0500641/* __put_lkb() is used when an lkb may not have an rsb attached to
642 it so we need to provide the lockspace explicitly */
643
644static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
David Teiglande7fd4172006-01-18 09:30:29 +0000645{
David Teiglande7fd4172006-01-18 09:30:29 +0000646 uint16_t bucket = lkb->lkb_id & 0xFFFF;
647
648 write_lock(&ls->ls_lkbtbl[bucket].lock);
649 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
650 list_del(&lkb->lkb_idtbl_list);
651 write_unlock(&ls->ls_lkbtbl[bucket].lock);
652
653 detach_lkb(lkb);
654
655 /* for local/process lkbs, lvbptr points to caller's lksb */
656 if (lkb->lkb_lvbptr && is_master_copy(lkb))
657 free_lvb(lkb->lkb_lvbptr);
David Teiglande7fd4172006-01-18 09:30:29 +0000658 free_lkb(lkb);
659 return 1;
660 } else {
661 write_unlock(&ls->ls_lkbtbl[bucket].lock);
662 return 0;
663 }
664}
665
666int dlm_put_lkb(struct dlm_lkb *lkb)
667{
David Teiglandb3f58d82006-02-28 11:16:37 -0500668 struct dlm_ls *ls;
669
670 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
671 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
672
673 ls = lkb->lkb_resource->res_ls;
674 return __put_lkb(ls, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +0000675}
676
677/* This is only called to add a reference when the code already holds
678 a valid reference to the lkb, so there's no need for locking. */
679
680static inline void hold_lkb(struct dlm_lkb *lkb)
681{
682 kref_get(&lkb->lkb_ref);
683}
684
685/* This is called when we need to remove a reference and are certain
686 it's not the last ref. e.g. del_lkb is always called between a
687 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
688 put_lkb would work fine, but would involve unnecessary locking */
689
690static inline void unhold_lkb(struct dlm_lkb *lkb)
691{
692 int rv;
693 rv = kref_put(&lkb->lkb_ref, kill_lkb);
694 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
695}
696
697static void lkb_add_ordered(struct list_head *new, struct list_head *head,
698 int mode)
699{
700 struct dlm_lkb *lkb = NULL;
701
702 list_for_each_entry(lkb, head, lkb_statequeue)
703 if (lkb->lkb_rqmode < mode)
704 break;
705
706 if (!lkb)
707 list_add_tail(new, head);
708 else
709 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
710}
711
712/* add/remove lkb to rsb's grant/convert/wait queue */
713
714static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
715{
716 kref_get(&lkb->lkb_ref);
717
718 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
719
720 lkb->lkb_status = status;
721
722 switch (status) {
723 case DLM_LKSTS_WAITING:
724 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
725 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
726 else
727 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
728 break;
729 case DLM_LKSTS_GRANTED:
730 /* convention says granted locks kept in order of grmode */
731 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
732 lkb->lkb_grmode);
733 break;
734 case DLM_LKSTS_CONVERT:
735 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
736 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
737 else
738 list_add_tail(&lkb->lkb_statequeue,
739 &r->res_convertqueue);
740 break;
741 default:
742 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
743 }
744}
745
746static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
747{
748 lkb->lkb_status = 0;
749 list_del(&lkb->lkb_statequeue);
750 unhold_lkb(lkb);
751}
752
753static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
754{
755 hold_lkb(lkb);
756 del_lkb(r, lkb);
757 add_lkb(r, lkb, sts);
758 unhold_lkb(lkb);
759}
760
David Teiglandef0c2bb2007-03-28 09:56:46 -0500761static int msg_reply_type(int mstype)
762{
763 switch (mstype) {
764 case DLM_MSG_REQUEST:
765 return DLM_MSG_REQUEST_REPLY;
766 case DLM_MSG_CONVERT:
767 return DLM_MSG_CONVERT_REPLY;
768 case DLM_MSG_UNLOCK:
769 return DLM_MSG_UNLOCK_REPLY;
770 case DLM_MSG_CANCEL:
771 return DLM_MSG_CANCEL_REPLY;
772 case DLM_MSG_LOOKUP:
773 return DLM_MSG_LOOKUP_REPLY;
774 }
775 return -1;
776}
777
David Teiglande7fd4172006-01-18 09:30:29 +0000778/* add/remove lkb from global waiters list of lkb's waiting for
779 a reply from a remote node */
780
David Teiglandef0c2bb2007-03-28 09:56:46 -0500781static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
David Teiglande7fd4172006-01-18 09:30:29 +0000782{
783 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
David Teiglandef0c2bb2007-03-28 09:56:46 -0500784 int error = 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000785
David Teigland90135922006-01-20 08:47:07 +0000786 mutex_lock(&ls->ls_waiters_mutex);
David Teiglandef0c2bb2007-03-28 09:56:46 -0500787
788 if (is_overlap_unlock(lkb) ||
789 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
790 error = -EINVAL;
David Teiglande7fd4172006-01-18 09:30:29 +0000791 goto out;
792 }
David Teiglandef0c2bb2007-03-28 09:56:46 -0500793
794 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
795 switch (mstype) {
796 case DLM_MSG_UNLOCK:
797 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
798 break;
799 case DLM_MSG_CANCEL:
800 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
801 break;
802 default:
803 error = -EBUSY;
804 goto out;
805 }
806 lkb->lkb_wait_count++;
807 hold_lkb(lkb);
808
809 log_debug(ls, "add overlap %x cur %d new %d count %d flags %x",
810 lkb->lkb_id, lkb->lkb_wait_type, mstype,
811 lkb->lkb_wait_count, lkb->lkb_flags);
812 goto out;
813 }
814
815 DLM_ASSERT(!lkb->lkb_wait_count,
816 dlm_print_lkb(lkb);
817 printk("wait_count %d\n", lkb->lkb_wait_count););
818
819 lkb->lkb_wait_count++;
David Teiglande7fd4172006-01-18 09:30:29 +0000820 lkb->lkb_wait_type = mstype;
David Teiglandef0c2bb2007-03-28 09:56:46 -0500821 hold_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +0000822 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
823 out:
David Teiglandef0c2bb2007-03-28 09:56:46 -0500824 if (error)
825 log_error(ls, "add_to_waiters %x error %d flags %x %d %d %s",
826 lkb->lkb_id, error, lkb->lkb_flags, mstype,
827 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
David Teigland90135922006-01-20 08:47:07 +0000828 mutex_unlock(&ls->ls_waiters_mutex);
David Teiglandef0c2bb2007-03-28 09:56:46 -0500829 return error;
David Teiglande7fd4172006-01-18 09:30:29 +0000830}
831
David Teiglandb790c3b2007-01-24 10:21:33 -0600832/* We clear the RESEND flag because we might be taking an lkb off the waiters
833 list as part of process_requestqueue (e.g. a lookup that has an optimized
834 request reply on the requestqueue) between dlm_recover_waiters_pre() which
835 set RESEND and dlm_recover_waiters_post() */
836
David Teiglandef0c2bb2007-03-28 09:56:46 -0500837static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype)
David Teiglande7fd4172006-01-18 09:30:29 +0000838{
David Teiglandef0c2bb2007-03-28 09:56:46 -0500839 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
840 int overlap_done = 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000841
David Teiglandef0c2bb2007-03-28 09:56:46 -0500842 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
843 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
844 overlap_done = 1;
845 goto out_del;
David Teiglande7fd4172006-01-18 09:30:29 +0000846 }
David Teiglandef0c2bb2007-03-28 09:56:46 -0500847
848 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
849 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
850 overlap_done = 1;
851 goto out_del;
852 }
853
854 /* N.B. type of reply may not always correspond to type of original
855 msg due to lookup->request optimization, verify others? */
856
857 if (lkb->lkb_wait_type) {
858 lkb->lkb_wait_type = 0;
859 goto out_del;
860 }
861
862 log_error(ls, "remove_from_waiters lkid %x flags %x types %d %d",
863 lkb->lkb_id, lkb->lkb_flags, mstype, lkb->lkb_wait_type);
864 return -1;
865
866 out_del:
867 /* the force-unlock/cancel has completed and we haven't recvd a reply
868 to the op that was in progress prior to the unlock/cancel; we
869 give up on any reply to the earlier op. FIXME: not sure when/how
870 this would happen */
871
872 if (overlap_done && lkb->lkb_wait_type) {
873 log_error(ls, "remove_from_waiters %x reply %d give up on %d",
874 lkb->lkb_id, mstype, lkb->lkb_wait_type);
875 lkb->lkb_wait_count--;
876 lkb->lkb_wait_type = 0;
877 }
878
879 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
880
David Teiglandb790c3b2007-01-24 10:21:33 -0600881 lkb->lkb_flags &= ~DLM_IFL_RESEND;
David Teiglandef0c2bb2007-03-28 09:56:46 -0500882 lkb->lkb_wait_count--;
883 if (!lkb->lkb_wait_count)
884 list_del_init(&lkb->lkb_wait_reply);
David Teiglande7fd4172006-01-18 09:30:29 +0000885 unhold_lkb(lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -0500886 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000887}
888
David Teiglandef0c2bb2007-03-28 09:56:46 -0500889static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
David Teiglande7fd4172006-01-18 09:30:29 +0000890{
891 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
892 int error;
893
David Teigland90135922006-01-20 08:47:07 +0000894 mutex_lock(&ls->ls_waiters_mutex);
David Teiglandef0c2bb2007-03-28 09:56:46 -0500895 error = _remove_from_waiters(lkb, mstype);
David Teigland90135922006-01-20 08:47:07 +0000896 mutex_unlock(&ls->ls_waiters_mutex);
David Teiglande7fd4172006-01-18 09:30:29 +0000897 return error;
898}
899
David Teiglandef0c2bb2007-03-28 09:56:46 -0500900/* Handles situations where we might be processing a "fake" or "stub" reply in
901 which we can't try to take waiters_mutex again. */
902
903static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
904{
905 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
906 int error;
907
908 if (ms != &ls->ls_stub_ms)
909 mutex_lock(&ls->ls_waiters_mutex);
910 error = _remove_from_waiters(lkb, ms->m_type);
911 if (ms != &ls->ls_stub_ms)
912 mutex_unlock(&ls->ls_waiters_mutex);
913 return error;
914}
915
David Teiglande7fd4172006-01-18 09:30:29 +0000916static void dir_remove(struct dlm_rsb *r)
917{
918 int to_nodeid;
919
920 if (dlm_no_directory(r->res_ls))
921 return;
922
923 to_nodeid = dlm_dir_nodeid(r);
924 if (to_nodeid != dlm_our_nodeid())
925 send_remove(r);
926 else
927 dlm_dir_remove_entry(r->res_ls, to_nodeid,
928 r->res_name, r->res_length);
929}
930
931/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
932 found since they are in order of newest to oldest? */
933
934static int shrink_bucket(struct dlm_ls *ls, int b)
935{
936 struct dlm_rsb *r;
937 int count = 0, found;
938
939 for (;;) {
David Teigland90135922006-01-20 08:47:07 +0000940 found = 0;
David Teiglande7fd4172006-01-18 09:30:29 +0000941 write_lock(&ls->ls_rsbtbl[b].lock);
942 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
943 res_hashchain) {
944 if (!time_after_eq(jiffies, r->res_toss_time +
David Teigland68c817a2007-01-09 09:41:48 -0600945 dlm_config.ci_toss_secs * HZ))
David Teiglande7fd4172006-01-18 09:30:29 +0000946 continue;
David Teigland90135922006-01-20 08:47:07 +0000947 found = 1;
David Teiglande7fd4172006-01-18 09:30:29 +0000948 break;
949 }
950
951 if (!found) {
952 write_unlock(&ls->ls_rsbtbl[b].lock);
953 break;
954 }
955
956 if (kref_put(&r->res_ref, kill_rsb)) {
957 list_del(&r->res_hashchain);
958 write_unlock(&ls->ls_rsbtbl[b].lock);
959
960 if (is_master(r))
961 dir_remove(r);
962 free_rsb(r);
963 count++;
964 } else {
965 write_unlock(&ls->ls_rsbtbl[b].lock);
966 log_error(ls, "tossed rsb in use %s", r->res_name);
967 }
968 }
969
970 return count;
971}
972
973void dlm_scan_rsbs(struct dlm_ls *ls)
974{
975 int i;
976
977 if (dlm_locking_stopped(ls))
978 return;
979
980 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
981 shrink_bucket(ls, i);
982 cond_resched();
983 }
984}
985
986/* lkb is master or local copy */
987
988static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
989{
990 int b, len = r->res_ls->ls_lvblen;
991
992 /* b=1 lvb returned to caller
993 b=0 lvb written to rsb or invalidated
994 b=-1 do nothing */
995
996 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
997
998 if (b == 1) {
999 if (!lkb->lkb_lvbptr)
1000 return;
1001
1002 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1003 return;
1004
1005 if (!r->res_lvbptr)
1006 return;
1007
1008 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1009 lkb->lkb_lvbseq = r->res_lvbseq;
1010
1011 } else if (b == 0) {
1012 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1013 rsb_set_flag(r, RSB_VALNOTVALID);
1014 return;
1015 }
1016
1017 if (!lkb->lkb_lvbptr)
1018 return;
1019
1020 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1021 return;
1022
1023 if (!r->res_lvbptr)
1024 r->res_lvbptr = allocate_lvb(r->res_ls);
1025
1026 if (!r->res_lvbptr)
1027 return;
1028
1029 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1030 r->res_lvbseq++;
1031 lkb->lkb_lvbseq = r->res_lvbseq;
1032 rsb_clear_flag(r, RSB_VALNOTVALID);
1033 }
1034
1035 if (rsb_flag(r, RSB_VALNOTVALID))
1036 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1037}
1038
1039static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1040{
1041 if (lkb->lkb_grmode < DLM_LOCK_PW)
1042 return;
1043
1044 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1045 rsb_set_flag(r, RSB_VALNOTVALID);
1046 return;
1047 }
1048
1049 if (!lkb->lkb_lvbptr)
1050 return;
1051
1052 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1053 return;
1054
1055 if (!r->res_lvbptr)
1056 r->res_lvbptr = allocate_lvb(r->res_ls);
1057
1058 if (!r->res_lvbptr)
1059 return;
1060
1061 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1062 r->res_lvbseq++;
1063 rsb_clear_flag(r, RSB_VALNOTVALID);
1064}
1065
1066/* lkb is process copy (pc) */
1067
1068static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1069 struct dlm_message *ms)
1070{
1071 int b;
1072
1073 if (!lkb->lkb_lvbptr)
1074 return;
1075
1076 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1077 return;
1078
David Teigland597d0ca2006-07-12 16:44:04 -05001079 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
David Teiglande7fd4172006-01-18 09:30:29 +00001080 if (b == 1) {
1081 int len = receive_extralen(ms);
1082 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1083 lkb->lkb_lvbseq = ms->m_lvbseq;
1084 }
1085}
1086
1087/* Manipulate lkb's on rsb's convert/granted/waiting queues
1088 remove_lock -- used for unlock, removes lkb from granted
1089 revert_lock -- used for cancel, moves lkb from convert to granted
1090 grant_lock -- used for request and convert, adds lkb to granted or
1091 moves lkb from convert or waiting to granted
1092
1093 Each of these is used for master or local copy lkb's. There is
1094 also a _pc() variation used to make the corresponding change on
1095 a process copy (pc) lkb. */
1096
1097static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1098{
1099 del_lkb(r, lkb);
1100 lkb->lkb_grmode = DLM_LOCK_IV;
1101 /* this unhold undoes the original ref from create_lkb()
1102 so this leads to the lkb being freed */
1103 unhold_lkb(lkb);
1104}
1105
1106static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1107{
1108 set_lvb_unlock(r, lkb);
1109 _remove_lock(r, lkb);
1110}
1111
1112static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1113{
1114 _remove_lock(r, lkb);
1115}
1116
David Teiglandef0c2bb2007-03-28 09:56:46 -05001117/* returns: 0 did nothing
1118 1 moved lock to granted
1119 -1 removed lock */
1120
1121static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
David Teiglande7fd4172006-01-18 09:30:29 +00001122{
David Teiglandef0c2bb2007-03-28 09:56:46 -05001123 int rv = 0;
1124
David Teiglande7fd4172006-01-18 09:30:29 +00001125 lkb->lkb_rqmode = DLM_LOCK_IV;
1126
1127 switch (lkb->lkb_status) {
David Teigland597d0ca2006-07-12 16:44:04 -05001128 case DLM_LKSTS_GRANTED:
1129 break;
David Teiglande7fd4172006-01-18 09:30:29 +00001130 case DLM_LKSTS_CONVERT:
1131 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
David Teiglandef0c2bb2007-03-28 09:56:46 -05001132 rv = 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001133 break;
1134 case DLM_LKSTS_WAITING:
1135 del_lkb(r, lkb);
1136 lkb->lkb_grmode = DLM_LOCK_IV;
1137 /* this unhold undoes the original ref from create_lkb()
1138 so this leads to the lkb being freed */
1139 unhold_lkb(lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -05001140 rv = -1;
David Teiglande7fd4172006-01-18 09:30:29 +00001141 break;
1142 default:
1143 log_print("invalid status for revert %d", lkb->lkb_status);
1144 }
David Teiglandef0c2bb2007-03-28 09:56:46 -05001145 return rv;
David Teiglande7fd4172006-01-18 09:30:29 +00001146}
1147
David Teiglandef0c2bb2007-03-28 09:56:46 -05001148static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
David Teiglande7fd4172006-01-18 09:30:29 +00001149{
David Teiglandef0c2bb2007-03-28 09:56:46 -05001150 return revert_lock(r, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00001151}
1152
1153static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1154{
1155 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1156 lkb->lkb_grmode = lkb->lkb_rqmode;
1157 if (lkb->lkb_status)
1158 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1159 else
1160 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1161 }
1162
1163 lkb->lkb_rqmode = DLM_LOCK_IV;
David Teiglande7fd4172006-01-18 09:30:29 +00001164}
1165
1166static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1167{
1168 set_lvb_lock(r, lkb);
1169 _grant_lock(r, lkb);
1170 lkb->lkb_highbast = 0;
1171}
1172
1173static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1174 struct dlm_message *ms)
1175{
1176 set_lvb_lock_pc(r, lkb, ms);
1177 _grant_lock(r, lkb);
1178}
1179
1180/* called by grant_pending_locks() which means an async grant message must
1181 be sent to the requesting node in addition to granting the lock if the
1182 lkb belongs to a remote node. */
1183
1184static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1185{
1186 grant_lock(r, lkb);
1187 if (is_master_copy(lkb))
1188 send_grant(r, lkb);
1189 else
1190 queue_cast(r, lkb, 0);
1191}
1192
1193static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1194{
1195 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1196 lkb_statequeue);
1197 if (lkb->lkb_id == first->lkb_id)
David Teigland90135922006-01-20 08:47:07 +00001198 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001199
David Teigland90135922006-01-20 08:47:07 +00001200 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +00001201}
1202
David Teiglande7fd4172006-01-18 09:30:29 +00001203/* Check if the given lkb conflicts with another lkb on the queue. */
1204
1205static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1206{
1207 struct dlm_lkb *this;
1208
1209 list_for_each_entry(this, head, lkb_statequeue) {
1210 if (this == lkb)
1211 continue;
David Teigland3bcd3682006-02-23 09:56:38 +00001212 if (!modes_compat(this, lkb))
David Teigland90135922006-01-20 08:47:07 +00001213 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001214 }
David Teigland90135922006-01-20 08:47:07 +00001215 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +00001216}
1217
1218/*
1219 * "A conversion deadlock arises with a pair of lock requests in the converting
1220 * queue for one resource. The granted mode of each lock blocks the requested
1221 * mode of the other lock."
1222 *
1223 * Part 2: if the granted mode of lkb is preventing the first lkb in the
1224 * convert queue from being granted, then demote lkb (set grmode to NL).
1225 * This second form requires that we check for conv-deadlk even when
1226 * now == 0 in _can_be_granted().
1227 *
1228 * Example:
1229 * Granted Queue: empty
1230 * Convert Queue: NL->EX (first lock)
1231 * PR->EX (second lock)
1232 *
1233 * The first lock can't be granted because of the granted mode of the second
1234 * lock and the second lock can't be granted because it's not first in the
1235 * list. We demote the granted mode of the second lock (the lkb passed to this
1236 * function).
1237 *
1238 * After the resolution, the "grant pending" function needs to go back and try
1239 * to grant locks on the convert queue again since the first lock can now be
1240 * granted.
1241 */
1242
1243static int conversion_deadlock_detect(struct dlm_rsb *rsb, struct dlm_lkb *lkb)
1244{
1245 struct dlm_lkb *this, *first = NULL, *self = NULL;
1246
1247 list_for_each_entry(this, &rsb->res_convertqueue, lkb_statequeue) {
1248 if (!first)
1249 first = this;
1250 if (this == lkb) {
1251 self = lkb;
1252 continue;
1253 }
1254
David Teiglande7fd4172006-01-18 09:30:29 +00001255 if (!modes_compat(this, lkb) && !modes_compat(lkb, this))
David Teigland90135922006-01-20 08:47:07 +00001256 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001257 }
1258
1259 /* if lkb is on the convert queue and is preventing the first
1260 from being granted, then there's deadlock and we demote lkb.
1261 multiple converting locks may need to do this before the first
1262 converting lock can be granted. */
1263
1264 if (self && self != first) {
1265 if (!modes_compat(lkb, first) &&
1266 !queue_conflict(&rsb->res_grantqueue, first))
David Teigland90135922006-01-20 08:47:07 +00001267 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001268 }
1269
David Teigland90135922006-01-20 08:47:07 +00001270 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +00001271}
1272
1273/*
1274 * Return 1 if the lock can be granted, 0 otherwise.
1275 * Also detect and resolve conversion deadlocks.
1276 *
1277 * lkb is the lock to be granted
1278 *
1279 * now is 1 if the function is being called in the context of the
1280 * immediate request, it is 0 if called later, after the lock has been
1281 * queued.
1282 *
1283 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1284 */
1285
1286static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1287{
1288 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1289
1290 /*
1291 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1292 * a new request for a NL mode lock being blocked.
1293 *
1294 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1295 * request, then it would be granted. In essence, the use of this flag
1296 * tells the Lock Manager to expedite theis request by not considering
1297 * what may be in the CONVERTING or WAITING queues... As of this
1298 * writing, the EXPEDITE flag can be used only with new requests for NL
1299 * mode locks. This flag is not valid for conversion requests.
1300 *
1301 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
1302 * conversion or used with a non-NL requested mode. We also know an
1303 * EXPEDITE request is always granted immediately, so now must always
1304 * be 1. The full condition to grant an expedite request: (now &&
1305 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1306 * therefore be shortened to just checking the flag.
1307 */
1308
1309 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
David Teigland90135922006-01-20 08:47:07 +00001310 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001311
1312 /*
1313 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1314 * added to the remaining conditions.
1315 */
1316
1317 if (queue_conflict(&r->res_grantqueue, lkb))
1318 goto out;
1319
1320 /*
1321 * 6-3: By default, a conversion request is immediately granted if the
1322 * requested mode is compatible with the modes of all other granted
1323 * locks
1324 */
1325
1326 if (queue_conflict(&r->res_convertqueue, lkb))
1327 goto out;
1328
1329 /*
1330 * 6-5: But the default algorithm for deciding whether to grant or
1331 * queue conversion requests does not by itself guarantee that such
1332 * requests are serviced on a "first come first serve" basis. This, in
1333 * turn, can lead to a phenomenon known as "indefinate postponement".
1334 *
1335 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1336 * the system service employed to request a lock conversion. This flag
1337 * forces certain conversion requests to be queued, even if they are
1338 * compatible with the granted modes of other locks on the same
1339 * resource. Thus, the use of this flag results in conversion requests
1340 * being ordered on a "first come first servce" basis.
1341 *
1342 * DCT: This condition is all about new conversions being able to occur
1343 * "in place" while the lock remains on the granted queue (assuming
1344 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
1345 * doesn't _have_ to go onto the convert queue where it's processed in
1346 * order. The "now" variable is necessary to distinguish converts
1347 * being received and processed for the first time now, because once a
1348 * convert is moved to the conversion queue the condition below applies
1349 * requiring fifo granting.
1350 */
1351
1352 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
David Teigland90135922006-01-20 08:47:07 +00001353 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001354
1355 /*
David Teigland3bcd3682006-02-23 09:56:38 +00001356 * The NOORDER flag is set to avoid the standard vms rules on grant
1357 * order.
David Teiglande7fd4172006-01-18 09:30:29 +00001358 */
1359
1360 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
David Teigland90135922006-01-20 08:47:07 +00001361 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001362
1363 /*
1364 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1365 * granted until all other conversion requests ahead of it are granted
1366 * and/or canceled.
1367 */
1368
1369 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
David Teigland90135922006-01-20 08:47:07 +00001370 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001371
1372 /*
1373 * 6-4: By default, a new request is immediately granted only if all
1374 * three of the following conditions are satisfied when the request is
1375 * issued:
1376 * - The queue of ungranted conversion requests for the resource is
1377 * empty.
1378 * - The queue of ungranted new requests for the resource is empty.
1379 * - The mode of the new request is compatible with the most
1380 * restrictive mode of all granted locks on the resource.
1381 */
1382
1383 if (now && !conv && list_empty(&r->res_convertqueue) &&
1384 list_empty(&r->res_waitqueue))
David Teigland90135922006-01-20 08:47:07 +00001385 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001386
1387 /*
1388 * 6-4: Once a lock request is in the queue of ungranted new requests,
1389 * it cannot be granted until the queue of ungranted conversion
1390 * requests is empty, all ungranted new requests ahead of it are
1391 * granted and/or canceled, and it is compatible with the granted mode
1392 * of the most restrictive lock granted on the resource.
1393 */
1394
1395 if (!now && !conv && list_empty(&r->res_convertqueue) &&
1396 first_in_list(lkb, &r->res_waitqueue))
David Teigland90135922006-01-20 08:47:07 +00001397 return 1;
David Teiglande7fd4172006-01-18 09:30:29 +00001398
1399 out:
1400 /*
1401 * The following, enabled by CONVDEADLK, departs from VMS.
1402 */
1403
1404 if (conv && (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) &&
1405 conversion_deadlock_detect(r, lkb)) {
1406 lkb->lkb_grmode = DLM_LOCK_NL;
1407 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1408 }
1409
David Teigland90135922006-01-20 08:47:07 +00001410 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +00001411}
1412
1413/*
1414 * The ALTPR and ALTCW flags aren't traditional lock manager flags, but are a
1415 * simple way to provide a big optimization to applications that can use them.
1416 */
1417
1418static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1419{
1420 uint32_t flags = lkb->lkb_exflags;
1421 int rv;
1422 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1423
1424 rv = _can_be_granted(r, lkb, now);
1425 if (rv)
1426 goto out;
1427
1428 if (lkb->lkb_sbflags & DLM_SBF_DEMOTED)
1429 goto out;
1430
1431 if (rqmode != DLM_LOCK_PR && flags & DLM_LKF_ALTPR)
1432 alt = DLM_LOCK_PR;
1433 else if (rqmode != DLM_LOCK_CW && flags & DLM_LKF_ALTCW)
1434 alt = DLM_LOCK_CW;
1435
1436 if (alt) {
1437 lkb->lkb_rqmode = alt;
1438 rv = _can_be_granted(r, lkb, now);
1439 if (rv)
1440 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1441 else
1442 lkb->lkb_rqmode = rqmode;
1443 }
1444 out:
1445 return rv;
1446}
1447
1448static int grant_pending_convert(struct dlm_rsb *r, int high)
1449{
1450 struct dlm_lkb *lkb, *s;
1451 int hi, demoted, quit, grant_restart, demote_restart;
1452
1453 quit = 0;
1454 restart:
1455 grant_restart = 0;
1456 demote_restart = 0;
1457 hi = DLM_LOCK_IV;
1458
1459 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1460 demoted = is_demoted(lkb);
David Teigland90135922006-01-20 08:47:07 +00001461 if (can_be_granted(r, lkb, 0)) {
David Teiglande7fd4172006-01-18 09:30:29 +00001462 grant_lock_pending(r, lkb);
1463 grant_restart = 1;
1464 } else {
1465 hi = max_t(int, lkb->lkb_rqmode, hi);
1466 if (!demoted && is_demoted(lkb))
1467 demote_restart = 1;
1468 }
1469 }
1470
1471 if (grant_restart)
1472 goto restart;
1473 if (demote_restart && !quit) {
1474 quit = 1;
1475 goto restart;
1476 }
1477
1478 return max_t(int, high, hi);
1479}
1480
1481static int grant_pending_wait(struct dlm_rsb *r, int high)
1482{
1483 struct dlm_lkb *lkb, *s;
1484
1485 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
David Teigland90135922006-01-20 08:47:07 +00001486 if (can_be_granted(r, lkb, 0))
David Teiglande7fd4172006-01-18 09:30:29 +00001487 grant_lock_pending(r, lkb);
1488 else
1489 high = max_t(int, lkb->lkb_rqmode, high);
1490 }
1491
1492 return high;
1493}
1494
1495static void grant_pending_locks(struct dlm_rsb *r)
1496{
1497 struct dlm_lkb *lkb, *s;
1498 int high = DLM_LOCK_IV;
1499
David Teiglanda345da32006-08-18 11:54:25 -05001500 DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
David Teiglande7fd4172006-01-18 09:30:29 +00001501
1502 high = grant_pending_convert(r, high);
1503 high = grant_pending_wait(r, high);
1504
1505 if (high == DLM_LOCK_IV)
1506 return;
1507
1508 /*
1509 * If there are locks left on the wait/convert queue then send blocking
1510 * ASTs to granted locks based on the largest requested mode (high)
David Teigland3bcd3682006-02-23 09:56:38 +00001511 * found above. FIXME: highbast < high comparison not valid for PR/CW.
David Teiglande7fd4172006-01-18 09:30:29 +00001512 */
1513
1514 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1515 if (lkb->lkb_bastaddr && (lkb->lkb_highbast < high) &&
1516 !__dlm_compat_matrix[lkb->lkb_grmode+1][high+1]) {
1517 queue_bast(r, lkb, high);
1518 lkb->lkb_highbast = high;
1519 }
1520 }
1521}
1522
1523static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1524 struct dlm_lkb *lkb)
1525{
1526 struct dlm_lkb *gr;
1527
1528 list_for_each_entry(gr, head, lkb_statequeue) {
1529 if (gr->lkb_bastaddr &&
1530 gr->lkb_highbast < lkb->lkb_rqmode &&
David Teigland3bcd3682006-02-23 09:56:38 +00001531 !modes_compat(gr, lkb)) {
David Teiglande7fd4172006-01-18 09:30:29 +00001532 queue_bast(r, gr, lkb->lkb_rqmode);
1533 gr->lkb_highbast = lkb->lkb_rqmode;
1534 }
1535 }
1536}
1537
1538static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1539{
1540 send_bast_queue(r, &r->res_grantqueue, lkb);
1541}
1542
1543static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1544{
1545 send_bast_queue(r, &r->res_grantqueue, lkb);
1546 send_bast_queue(r, &r->res_convertqueue, lkb);
1547}
1548
1549/* set_master(r, lkb) -- set the master nodeid of a resource
1550
1551 The purpose of this function is to set the nodeid field in the given
1552 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
1553 known, it can just be copied to the lkb and the function will return
1554 0. If the rsb's nodeid is _not_ known, it needs to be looked up
1555 before it can be copied to the lkb.
1556
1557 When the rsb nodeid is being looked up remotely, the initial lkb
1558 causing the lookup is kept on the ls_waiters list waiting for the
1559 lookup reply. Other lkb's waiting for the same rsb lookup are kept
1560 on the rsb's res_lookup list until the master is verified.
1561
1562 Return values:
1563 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1564 1: the rsb master is not available and the lkb has been placed on
1565 a wait queue
1566*/
1567
1568static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1569{
1570 struct dlm_ls *ls = r->res_ls;
1571 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1572
1573 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1574 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1575 r->res_first_lkid = lkb->lkb_id;
1576 lkb->lkb_nodeid = r->res_nodeid;
1577 return 0;
1578 }
1579
1580 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1581 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1582 return 1;
1583 }
1584
1585 if (r->res_nodeid == 0) {
1586 lkb->lkb_nodeid = 0;
1587 return 0;
1588 }
1589
1590 if (r->res_nodeid > 0) {
1591 lkb->lkb_nodeid = r->res_nodeid;
1592 return 0;
1593 }
1594
David Teiglanda345da32006-08-18 11:54:25 -05001595 DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
David Teiglande7fd4172006-01-18 09:30:29 +00001596
1597 dir_nodeid = dlm_dir_nodeid(r);
1598
1599 if (dir_nodeid != our_nodeid) {
1600 r->res_first_lkid = lkb->lkb_id;
1601 send_lookup(r, lkb);
1602 return 1;
1603 }
1604
1605 for (;;) {
1606 /* It's possible for dlm_scand to remove an old rsb for
1607 this same resource from the toss list, us to create
1608 a new one, look up the master locally, and find it
1609 already exists just before dlm_scand does the
1610 dir_remove() on the previous rsb. */
1611
1612 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1613 r->res_length, &ret_nodeid);
1614 if (!error)
1615 break;
1616 log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1617 schedule();
1618 }
1619
1620 if (ret_nodeid == our_nodeid) {
1621 r->res_first_lkid = 0;
1622 r->res_nodeid = 0;
1623 lkb->lkb_nodeid = 0;
1624 } else {
1625 r->res_first_lkid = lkb->lkb_id;
1626 r->res_nodeid = ret_nodeid;
1627 lkb->lkb_nodeid = ret_nodeid;
1628 }
1629 return 0;
1630}
1631
1632static void process_lookup_list(struct dlm_rsb *r)
1633{
1634 struct dlm_lkb *lkb, *safe;
1635
1636 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
David Teiglandef0c2bb2007-03-28 09:56:46 -05001637 list_del_init(&lkb->lkb_rsb_lookup);
David Teiglande7fd4172006-01-18 09:30:29 +00001638 _request_lock(r, lkb);
1639 schedule();
1640 }
1641}
1642
1643/* confirm_master -- confirm (or deny) an rsb's master nodeid */
1644
1645static void confirm_master(struct dlm_rsb *r, int error)
1646{
1647 struct dlm_lkb *lkb;
1648
1649 if (!r->res_first_lkid)
1650 return;
1651
1652 switch (error) {
1653 case 0:
1654 case -EINPROGRESS:
1655 r->res_first_lkid = 0;
1656 process_lookup_list(r);
1657 break;
1658
1659 case -EAGAIN:
1660 /* the remote master didn't queue our NOQUEUE request;
1661 make a waiting lkb the first_lkid */
1662
1663 r->res_first_lkid = 0;
1664
1665 if (!list_empty(&r->res_lookup)) {
1666 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1667 lkb_rsb_lookup);
David Teiglandef0c2bb2007-03-28 09:56:46 -05001668 list_del_init(&lkb->lkb_rsb_lookup);
David Teiglande7fd4172006-01-18 09:30:29 +00001669 r->res_first_lkid = lkb->lkb_id;
1670 _request_lock(r, lkb);
1671 } else
1672 r->res_nodeid = -1;
1673 break;
1674
1675 default:
1676 log_error(r->res_ls, "confirm_master unknown error %d", error);
1677 }
1678}
1679
1680static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1681 int namelen, uint32_t parent_lkid, void *ast,
David Teigland3bcd3682006-02-23 09:56:38 +00001682 void *astarg, void *bast, struct dlm_args *args)
David Teiglande7fd4172006-01-18 09:30:29 +00001683{
1684 int rv = -EINVAL;
1685
1686 /* check for invalid arg usage */
1687
1688 if (mode < 0 || mode > DLM_LOCK_EX)
1689 goto out;
1690
1691 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1692 goto out;
1693
1694 if (flags & DLM_LKF_CANCEL)
1695 goto out;
1696
1697 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1698 goto out;
1699
1700 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1701 goto out;
1702
1703 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1704 goto out;
1705
1706 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1707 goto out;
1708
1709 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1710 goto out;
1711
1712 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1713 goto out;
1714
1715 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
1716 goto out;
1717
1718 if (!ast || !lksb)
1719 goto out;
1720
1721 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
1722 goto out;
1723
1724 /* parent/child locks not yet supported */
1725 if (parent_lkid)
1726 goto out;
1727
1728 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
1729 goto out;
1730
1731 /* these args will be copied to the lkb in validate_lock_args,
1732 it cannot be done now because when converting locks, fields in
1733 an active lkb cannot be modified before locking the rsb */
1734
1735 args->flags = flags;
1736 args->astaddr = ast;
1737 args->astparam = (long) astarg;
1738 args->bastaddr = bast;
1739 args->mode = mode;
1740 args->lksb = lksb;
David Teiglande7fd4172006-01-18 09:30:29 +00001741 rv = 0;
1742 out:
1743 return rv;
1744}
1745
1746static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
1747{
1748 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
1749 DLM_LKF_FORCEUNLOCK))
1750 return -EINVAL;
1751
David Teiglandef0c2bb2007-03-28 09:56:46 -05001752 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
1753 return -EINVAL;
1754
David Teiglande7fd4172006-01-18 09:30:29 +00001755 args->flags = flags;
1756 args->astparam = (long) astarg;
1757 return 0;
1758}
1759
1760static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
1761 struct dlm_args *args)
1762{
1763 int rv = -EINVAL;
1764
1765 if (args->flags & DLM_LKF_CONVERT) {
1766 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1767 goto out;
1768
1769 if (args->flags & DLM_LKF_QUECVT &&
1770 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
1771 goto out;
1772
1773 rv = -EBUSY;
1774 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
1775 goto out;
1776
1777 if (lkb->lkb_wait_type)
1778 goto out;
David Teiglandef0c2bb2007-03-28 09:56:46 -05001779
1780 if (is_overlap(lkb))
1781 goto out;
David Teiglande7fd4172006-01-18 09:30:29 +00001782 }
1783
1784 lkb->lkb_exflags = args->flags;
1785 lkb->lkb_sbflags = 0;
1786 lkb->lkb_astaddr = args->astaddr;
1787 lkb->lkb_astparam = args->astparam;
1788 lkb->lkb_bastaddr = args->bastaddr;
1789 lkb->lkb_rqmode = args->mode;
1790 lkb->lkb_lksb = args->lksb;
1791 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
1792 lkb->lkb_ownpid = (int) current->pid;
David Teiglande7fd4172006-01-18 09:30:29 +00001793 rv = 0;
1794 out:
1795 return rv;
1796}
1797
David Teiglandef0c2bb2007-03-28 09:56:46 -05001798/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
1799 for success */
1800
1801/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
1802 because there may be a lookup in progress and it's valid to do
1803 cancel/unlockf on it */
1804
David Teiglande7fd4172006-01-18 09:30:29 +00001805static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
1806{
David Teiglandef0c2bb2007-03-28 09:56:46 -05001807 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
David Teiglande7fd4172006-01-18 09:30:29 +00001808 int rv = -EINVAL;
1809
David Teiglandef0c2bb2007-03-28 09:56:46 -05001810 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
1811 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
1812 dlm_print_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00001813 goto out;
David Teiglandef0c2bb2007-03-28 09:56:46 -05001814 }
David Teiglande7fd4172006-01-18 09:30:29 +00001815
David Teiglandef0c2bb2007-03-28 09:56:46 -05001816 /* an lkb may still exist even though the lock is EOL'ed due to a
1817 cancel, unlock or failed noqueue request; an app can't use these
1818 locks; return same error as if the lkid had not been found at all */
1819
1820 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
1821 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
1822 rv = -ENOENT;
1823 goto out;
1824 }
1825
1826 /* an lkb may be waiting for an rsb lookup to complete where the
1827 lookup was initiated by another lock */
1828
1829 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
1830 if (!list_empty(&lkb->lkb_rsb_lookup)) {
1831 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
1832 list_del_init(&lkb->lkb_rsb_lookup);
1833 queue_cast(lkb->lkb_resource, lkb,
1834 args->flags & DLM_LKF_CANCEL ?
1835 -DLM_ECANCEL : -DLM_EUNLOCK);
1836 unhold_lkb(lkb); /* undoes create_lkb() */
1837 rv = -EBUSY;
1838 goto out;
1839 }
1840 }
1841
1842 /* cancel not allowed with another cancel/unlock in progress */
1843
1844 if (args->flags & DLM_LKF_CANCEL) {
1845 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
1846 goto out;
1847
1848 if (is_overlap(lkb))
1849 goto out;
1850
1851 if (lkb->lkb_flags & DLM_IFL_RESEND) {
1852 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1853 rv = -EBUSY;
1854 goto out;
1855 }
1856
1857 switch (lkb->lkb_wait_type) {
1858 case DLM_MSG_LOOKUP:
1859 case DLM_MSG_REQUEST:
1860 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1861 rv = -EBUSY;
1862 goto out;
1863 case DLM_MSG_UNLOCK:
1864 case DLM_MSG_CANCEL:
1865 goto out;
1866 }
1867 /* add_to_waiters() will set OVERLAP_CANCEL */
David Teiglande7fd4172006-01-18 09:30:29 +00001868 goto out_ok;
David Teiglandef0c2bb2007-03-28 09:56:46 -05001869 }
David Teiglande7fd4172006-01-18 09:30:29 +00001870
David Teiglandef0c2bb2007-03-28 09:56:46 -05001871 /* do we need to allow a force-unlock if there's a normal unlock
1872 already in progress? in what conditions could the normal unlock
1873 fail such that we'd want to send a force-unlock to be sure? */
David Teiglande7fd4172006-01-18 09:30:29 +00001874
David Teiglandef0c2bb2007-03-28 09:56:46 -05001875 if (args->flags & DLM_LKF_FORCEUNLOCK) {
1876 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
1877 goto out;
David Teiglande7fd4172006-01-18 09:30:29 +00001878
David Teiglandef0c2bb2007-03-28 09:56:46 -05001879 if (is_overlap_unlock(lkb))
1880 goto out;
1881
1882 if (lkb->lkb_flags & DLM_IFL_RESEND) {
1883 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1884 rv = -EBUSY;
1885 goto out;
1886 }
1887
1888 switch (lkb->lkb_wait_type) {
1889 case DLM_MSG_LOOKUP:
1890 case DLM_MSG_REQUEST:
1891 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1892 rv = -EBUSY;
1893 goto out;
1894 case DLM_MSG_UNLOCK:
1895 goto out;
1896 }
1897 /* add_to_waiters() will set OVERLAP_UNLOCK */
1898 goto out_ok;
1899 }
1900
1901 /* normal unlock not allowed if there's any op in progress */
David Teiglande7fd4172006-01-18 09:30:29 +00001902 rv = -EBUSY;
David Teiglandef0c2bb2007-03-28 09:56:46 -05001903 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
David Teiglande7fd4172006-01-18 09:30:29 +00001904 goto out;
1905
1906 out_ok:
David Teiglandef0c2bb2007-03-28 09:56:46 -05001907 /* an overlapping op shouldn't blow away exflags from other op */
1908 lkb->lkb_exflags |= args->flags;
David Teiglande7fd4172006-01-18 09:30:29 +00001909 lkb->lkb_sbflags = 0;
1910 lkb->lkb_astparam = args->astparam;
David Teiglande7fd4172006-01-18 09:30:29 +00001911 rv = 0;
1912 out:
David Teiglandef0c2bb2007-03-28 09:56:46 -05001913 if (rv)
1914 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
1915 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
1916 args->flags, lkb->lkb_wait_type,
1917 lkb->lkb_resource->res_name);
David Teiglande7fd4172006-01-18 09:30:29 +00001918 return rv;
1919}
1920
1921/*
1922 * Four stage 4 varieties:
1923 * do_request(), do_convert(), do_unlock(), do_cancel()
1924 * These are called on the master node for the given lock and
1925 * from the central locking logic.
1926 */
1927
1928static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
1929{
1930 int error = 0;
1931
David Teigland90135922006-01-20 08:47:07 +00001932 if (can_be_granted(r, lkb, 1)) {
David Teiglande7fd4172006-01-18 09:30:29 +00001933 grant_lock(r, lkb);
1934 queue_cast(r, lkb, 0);
1935 goto out;
1936 }
1937
1938 if (can_be_queued(lkb)) {
1939 error = -EINPROGRESS;
1940 add_lkb(r, lkb, DLM_LKSTS_WAITING);
1941 send_blocking_asts(r, lkb);
1942 goto out;
1943 }
1944
1945 error = -EAGAIN;
1946 if (force_blocking_asts(lkb))
1947 send_blocking_asts_all(r, lkb);
1948 queue_cast(r, lkb, -EAGAIN);
1949
1950 out:
1951 return error;
1952}
1953
1954static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
1955{
1956 int error = 0;
1957
1958 /* changing an existing lock may allow others to be granted */
1959
David Teigland90135922006-01-20 08:47:07 +00001960 if (can_be_granted(r, lkb, 1)) {
David Teiglande7fd4172006-01-18 09:30:29 +00001961 grant_lock(r, lkb);
1962 queue_cast(r, lkb, 0);
1963 grant_pending_locks(r);
1964 goto out;
1965 }
1966
1967 if (can_be_queued(lkb)) {
1968 if (is_demoted(lkb))
1969 grant_pending_locks(r);
1970 error = -EINPROGRESS;
1971 del_lkb(r, lkb);
1972 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
1973 send_blocking_asts(r, lkb);
1974 goto out;
1975 }
1976
1977 error = -EAGAIN;
1978 if (force_blocking_asts(lkb))
1979 send_blocking_asts_all(r, lkb);
1980 queue_cast(r, lkb, -EAGAIN);
1981
1982 out:
1983 return error;
1984}
1985
1986static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1987{
1988 remove_lock(r, lkb);
1989 queue_cast(r, lkb, -DLM_EUNLOCK);
1990 grant_pending_locks(r);
1991 return -DLM_EUNLOCK;
1992}
1993
David Teiglandef0c2bb2007-03-28 09:56:46 -05001994/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
Steven Whitehouse907b9bc2006-09-25 09:26:04 -04001995
David Teiglande7fd4172006-01-18 09:30:29 +00001996static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
1997{
David Teiglandef0c2bb2007-03-28 09:56:46 -05001998 int error;
1999
2000 error = revert_lock(r, lkb);
2001 if (error) {
2002 queue_cast(r, lkb, -DLM_ECANCEL);
2003 grant_pending_locks(r);
2004 return -DLM_ECANCEL;
2005 }
2006 return 0;
David Teiglande7fd4172006-01-18 09:30:29 +00002007}
2008
2009/*
2010 * Four stage 3 varieties:
2011 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
2012 */
2013
2014/* add a new lkb to a possibly new rsb, called by requesting process */
2015
2016static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2017{
2018 int error;
2019
2020 /* set_master: sets lkb nodeid from r */
2021
2022 error = set_master(r, lkb);
2023 if (error < 0)
2024 goto out;
2025 if (error) {
2026 error = 0;
2027 goto out;
2028 }
2029
2030 if (is_remote(r))
2031 /* receive_request() calls do_request() on remote node */
2032 error = send_request(r, lkb);
2033 else
2034 error = do_request(r, lkb);
2035 out:
2036 return error;
2037}
2038
David Teigland3bcd3682006-02-23 09:56:38 +00002039/* change some property of an existing lkb, e.g. mode */
David Teiglande7fd4172006-01-18 09:30:29 +00002040
2041static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2042{
2043 int error;
2044
2045 if (is_remote(r))
2046 /* receive_convert() calls do_convert() on remote node */
2047 error = send_convert(r, lkb);
2048 else
2049 error = do_convert(r, lkb);
2050
2051 return error;
2052}
2053
2054/* remove an existing lkb from the granted queue */
2055
2056static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2057{
2058 int error;
2059
2060 if (is_remote(r))
2061 /* receive_unlock() calls do_unlock() on remote node */
2062 error = send_unlock(r, lkb);
2063 else
2064 error = do_unlock(r, lkb);
2065
2066 return error;
2067}
2068
2069/* remove an existing lkb from the convert or wait queue */
2070
2071static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2072{
2073 int error;
2074
2075 if (is_remote(r))
2076 /* receive_cancel() calls do_cancel() on remote node */
2077 error = send_cancel(r, lkb);
2078 else
2079 error = do_cancel(r, lkb);
2080
2081 return error;
2082}
2083
2084/*
2085 * Four stage 2 varieties:
2086 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
2087 */
2088
2089static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
2090 int len, struct dlm_args *args)
2091{
2092 struct dlm_rsb *r;
2093 int error;
2094
2095 error = validate_lock_args(ls, lkb, args);
2096 if (error)
2097 goto out;
2098
2099 error = find_rsb(ls, name, len, R_CREATE, &r);
2100 if (error)
2101 goto out;
2102
2103 lock_rsb(r);
2104
2105 attach_lkb(r, lkb);
2106 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
2107
2108 error = _request_lock(r, lkb);
2109
2110 unlock_rsb(r);
2111 put_rsb(r);
2112
2113 out:
2114 return error;
2115}
2116
2117static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2118 struct dlm_args *args)
2119{
2120 struct dlm_rsb *r;
2121 int error;
2122
2123 r = lkb->lkb_resource;
2124
2125 hold_rsb(r);
2126 lock_rsb(r);
2127
2128 error = validate_lock_args(ls, lkb, args);
2129 if (error)
2130 goto out;
2131
2132 error = _convert_lock(r, lkb);
2133 out:
2134 unlock_rsb(r);
2135 put_rsb(r);
2136 return error;
2137}
2138
2139static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2140 struct dlm_args *args)
2141{
2142 struct dlm_rsb *r;
2143 int error;
2144
2145 r = lkb->lkb_resource;
2146
2147 hold_rsb(r);
2148 lock_rsb(r);
2149
2150 error = validate_unlock_args(lkb, args);
2151 if (error)
2152 goto out;
2153
2154 error = _unlock_lock(r, lkb);
2155 out:
2156 unlock_rsb(r);
2157 put_rsb(r);
2158 return error;
2159}
2160
2161static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
2162 struct dlm_args *args)
2163{
2164 struct dlm_rsb *r;
2165 int error;
2166
2167 r = lkb->lkb_resource;
2168
2169 hold_rsb(r);
2170 lock_rsb(r);
2171
2172 error = validate_unlock_args(lkb, args);
2173 if (error)
2174 goto out;
2175
2176 error = _cancel_lock(r, lkb);
2177 out:
2178 unlock_rsb(r);
2179 put_rsb(r);
2180 return error;
2181}
2182
2183/*
2184 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
2185 */
2186
2187int dlm_lock(dlm_lockspace_t *lockspace,
2188 int mode,
2189 struct dlm_lksb *lksb,
2190 uint32_t flags,
2191 void *name,
2192 unsigned int namelen,
2193 uint32_t parent_lkid,
2194 void (*ast) (void *astarg),
2195 void *astarg,
David Teigland3bcd3682006-02-23 09:56:38 +00002196 void (*bast) (void *astarg, int mode))
David Teiglande7fd4172006-01-18 09:30:29 +00002197{
2198 struct dlm_ls *ls;
2199 struct dlm_lkb *lkb;
2200 struct dlm_args args;
2201 int error, convert = flags & DLM_LKF_CONVERT;
2202
2203 ls = dlm_find_lockspace_local(lockspace);
2204 if (!ls)
2205 return -EINVAL;
2206
2207 lock_recovery(ls);
2208
2209 if (convert)
2210 error = find_lkb(ls, lksb->sb_lkid, &lkb);
2211 else
2212 error = create_lkb(ls, &lkb);
2213
2214 if (error)
2215 goto out;
2216
2217 error = set_lock_args(mode, lksb, flags, namelen, parent_lkid, ast,
David Teigland3bcd3682006-02-23 09:56:38 +00002218 astarg, bast, &args);
David Teiglande7fd4172006-01-18 09:30:29 +00002219 if (error)
2220 goto out_put;
2221
2222 if (convert)
2223 error = convert_lock(ls, lkb, &args);
2224 else
2225 error = request_lock(ls, lkb, name, namelen, &args);
2226
2227 if (error == -EINPROGRESS)
2228 error = 0;
2229 out_put:
2230 if (convert || error)
David Teiglandb3f58d82006-02-28 11:16:37 -05002231 __put_lkb(ls, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002232 if (error == -EAGAIN)
2233 error = 0;
2234 out:
2235 unlock_recovery(ls);
2236 dlm_put_lockspace(ls);
2237 return error;
2238}
2239
2240int dlm_unlock(dlm_lockspace_t *lockspace,
2241 uint32_t lkid,
2242 uint32_t flags,
2243 struct dlm_lksb *lksb,
2244 void *astarg)
2245{
2246 struct dlm_ls *ls;
2247 struct dlm_lkb *lkb;
2248 struct dlm_args args;
2249 int error;
2250
2251 ls = dlm_find_lockspace_local(lockspace);
2252 if (!ls)
2253 return -EINVAL;
2254
2255 lock_recovery(ls);
2256
2257 error = find_lkb(ls, lkid, &lkb);
2258 if (error)
2259 goto out;
2260
2261 error = set_unlock_args(flags, astarg, &args);
2262 if (error)
2263 goto out_put;
2264
2265 if (flags & DLM_LKF_CANCEL)
2266 error = cancel_lock(ls, lkb, &args);
2267 else
2268 error = unlock_lock(ls, lkb, &args);
2269
2270 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2271 error = 0;
David Teiglandef0c2bb2007-03-28 09:56:46 -05002272 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
2273 error = 0;
David Teiglande7fd4172006-01-18 09:30:29 +00002274 out_put:
David Teiglandb3f58d82006-02-28 11:16:37 -05002275 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002276 out:
2277 unlock_recovery(ls);
2278 dlm_put_lockspace(ls);
2279 return error;
2280}
2281
2282/*
2283 * send/receive routines for remote operations and replies
2284 *
2285 * send_args
2286 * send_common
2287 * send_request receive_request
2288 * send_convert receive_convert
2289 * send_unlock receive_unlock
2290 * send_cancel receive_cancel
2291 * send_grant receive_grant
2292 * send_bast receive_bast
2293 * send_lookup receive_lookup
2294 * send_remove receive_remove
2295 *
2296 * send_common_reply
2297 * receive_request_reply send_request_reply
2298 * receive_convert_reply send_convert_reply
2299 * receive_unlock_reply send_unlock_reply
2300 * receive_cancel_reply send_cancel_reply
2301 * receive_lookup_reply send_lookup_reply
2302 */
2303
2304static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2305 int to_nodeid, int mstype,
2306 struct dlm_message **ms_ret,
2307 struct dlm_mhandle **mh_ret)
2308{
2309 struct dlm_message *ms;
2310 struct dlm_mhandle *mh;
2311 char *mb;
2312 int mb_len = sizeof(struct dlm_message);
2313
2314 switch (mstype) {
2315 case DLM_MSG_REQUEST:
2316 case DLM_MSG_LOOKUP:
2317 case DLM_MSG_REMOVE:
2318 mb_len += r->res_length;
2319 break;
2320 case DLM_MSG_CONVERT:
2321 case DLM_MSG_UNLOCK:
2322 case DLM_MSG_REQUEST_REPLY:
2323 case DLM_MSG_CONVERT_REPLY:
2324 case DLM_MSG_GRANT:
2325 if (lkb && lkb->lkb_lvbptr)
2326 mb_len += r->res_ls->ls_lvblen;
2327 break;
2328 }
2329
2330 /* get_buffer gives us a message handle (mh) that we need to
2331 pass into lowcomms_commit and a message buffer (mb) that we
2332 write our data into */
2333
2334 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
2335 if (!mh)
2336 return -ENOBUFS;
2337
2338 memset(mb, 0, mb_len);
2339
2340 ms = (struct dlm_message *) mb;
2341
2342 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2343 ms->m_header.h_lockspace = r->res_ls->ls_global_id;
2344 ms->m_header.h_nodeid = dlm_our_nodeid();
2345 ms->m_header.h_length = mb_len;
2346 ms->m_header.h_cmd = DLM_MSG;
2347
2348 ms->m_type = mstype;
2349
2350 *mh_ret = mh;
2351 *ms_ret = ms;
2352 return 0;
2353}
2354
2355/* further lowcomms enhancements or alternate implementations may make
2356 the return value from this function useful at some point */
2357
2358static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2359{
2360 dlm_message_out(ms);
2361 dlm_lowcomms_commit_buffer(mh);
2362 return 0;
2363}
2364
2365static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2366 struct dlm_message *ms)
2367{
2368 ms->m_nodeid = lkb->lkb_nodeid;
2369 ms->m_pid = lkb->lkb_ownpid;
2370 ms->m_lkid = lkb->lkb_id;
2371 ms->m_remid = lkb->lkb_remid;
2372 ms->m_exflags = lkb->lkb_exflags;
2373 ms->m_sbflags = lkb->lkb_sbflags;
2374 ms->m_flags = lkb->lkb_flags;
2375 ms->m_lvbseq = lkb->lkb_lvbseq;
2376 ms->m_status = lkb->lkb_status;
2377 ms->m_grmode = lkb->lkb_grmode;
2378 ms->m_rqmode = lkb->lkb_rqmode;
2379 ms->m_hash = r->res_hash;
2380
2381 /* m_result and m_bastmode are set from function args,
2382 not from lkb fields */
2383
2384 if (lkb->lkb_bastaddr)
2385 ms->m_asts |= AST_BAST;
2386 if (lkb->lkb_astaddr)
2387 ms->m_asts |= AST_COMP;
2388
David Teiglandda49f362006-12-13 10:38:45 -06002389 /* compare with switch in create_message; send_remove() doesn't
2390 use send_args() */
2391
2392 switch (ms->m_type) {
2393 case DLM_MSG_REQUEST:
2394 case DLM_MSG_LOOKUP:
David Teiglande7fd4172006-01-18 09:30:29 +00002395 memcpy(ms->m_extra, r->res_name, r->res_length);
David Teiglandda49f362006-12-13 10:38:45 -06002396 break;
2397 case DLM_MSG_CONVERT:
2398 case DLM_MSG_UNLOCK:
2399 case DLM_MSG_REQUEST_REPLY:
2400 case DLM_MSG_CONVERT_REPLY:
2401 case DLM_MSG_GRANT:
2402 if (!lkb->lkb_lvbptr)
2403 break;
David Teiglande7fd4172006-01-18 09:30:29 +00002404 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
David Teiglandda49f362006-12-13 10:38:45 -06002405 break;
2406 }
David Teiglande7fd4172006-01-18 09:30:29 +00002407}
2408
2409static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2410{
2411 struct dlm_message *ms;
2412 struct dlm_mhandle *mh;
2413 int to_nodeid, error;
2414
David Teiglandef0c2bb2007-03-28 09:56:46 -05002415 error = add_to_waiters(lkb, mstype);
2416 if (error)
2417 return error;
David Teiglande7fd4172006-01-18 09:30:29 +00002418
2419 to_nodeid = r->res_nodeid;
2420
2421 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2422 if (error)
2423 goto fail;
2424
2425 send_args(r, lkb, ms);
2426
2427 error = send_message(mh, ms);
2428 if (error)
2429 goto fail;
2430 return 0;
2431
2432 fail:
David Teiglandef0c2bb2007-03-28 09:56:46 -05002433 remove_from_waiters(lkb, msg_reply_type(mstype));
David Teiglande7fd4172006-01-18 09:30:29 +00002434 return error;
2435}
2436
2437static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2438{
2439 return send_common(r, lkb, DLM_MSG_REQUEST);
2440}
2441
2442static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2443{
2444 int error;
2445
2446 error = send_common(r, lkb, DLM_MSG_CONVERT);
2447
2448 /* down conversions go without a reply from the master */
2449 if (!error && down_conversion(lkb)) {
David Teiglandef0c2bb2007-03-28 09:56:46 -05002450 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
2451 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
David Teiglande7fd4172006-01-18 09:30:29 +00002452 r->res_ls->ls_stub_ms.m_result = 0;
David Teigland32f105a2006-08-23 16:07:31 -04002453 r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
David Teiglande7fd4172006-01-18 09:30:29 +00002454 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2455 }
2456
2457 return error;
2458}
2459
2460/* FIXME: if this lkb is the only lock we hold on the rsb, then set
2461 MASTER_UNCERTAIN to force the next request on the rsb to confirm
2462 that the master is still correct. */
2463
2464static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2465{
2466 return send_common(r, lkb, DLM_MSG_UNLOCK);
2467}
2468
2469static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2470{
2471 return send_common(r, lkb, DLM_MSG_CANCEL);
2472}
2473
2474static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2475{
2476 struct dlm_message *ms;
2477 struct dlm_mhandle *mh;
2478 int to_nodeid, error;
2479
2480 to_nodeid = lkb->lkb_nodeid;
2481
2482 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2483 if (error)
2484 goto out;
2485
2486 send_args(r, lkb, ms);
2487
2488 ms->m_result = 0;
2489
2490 error = send_message(mh, ms);
2491 out:
2492 return error;
2493}
2494
2495static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2496{
2497 struct dlm_message *ms;
2498 struct dlm_mhandle *mh;
2499 int to_nodeid, error;
2500
2501 to_nodeid = lkb->lkb_nodeid;
2502
2503 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2504 if (error)
2505 goto out;
2506
2507 send_args(r, lkb, ms);
2508
2509 ms->m_bastmode = mode;
2510
2511 error = send_message(mh, ms);
2512 out:
2513 return error;
2514}
2515
2516static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2517{
2518 struct dlm_message *ms;
2519 struct dlm_mhandle *mh;
2520 int to_nodeid, error;
2521
David Teiglandef0c2bb2007-03-28 09:56:46 -05002522 error = add_to_waiters(lkb, DLM_MSG_LOOKUP);
2523 if (error)
2524 return error;
David Teiglande7fd4172006-01-18 09:30:29 +00002525
2526 to_nodeid = dlm_dir_nodeid(r);
2527
2528 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2529 if (error)
2530 goto fail;
2531
2532 send_args(r, lkb, ms);
2533
2534 error = send_message(mh, ms);
2535 if (error)
2536 goto fail;
2537 return 0;
2538
2539 fail:
David Teiglandef0c2bb2007-03-28 09:56:46 -05002540 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
David Teiglande7fd4172006-01-18 09:30:29 +00002541 return error;
2542}
2543
2544static int send_remove(struct dlm_rsb *r)
2545{
2546 struct dlm_message *ms;
2547 struct dlm_mhandle *mh;
2548 int to_nodeid, error;
2549
2550 to_nodeid = dlm_dir_nodeid(r);
2551
2552 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2553 if (error)
2554 goto out;
2555
2556 memcpy(ms->m_extra, r->res_name, r->res_length);
2557 ms->m_hash = r->res_hash;
2558
2559 error = send_message(mh, ms);
2560 out:
2561 return error;
2562}
2563
2564static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2565 int mstype, int rv)
2566{
2567 struct dlm_message *ms;
2568 struct dlm_mhandle *mh;
2569 int to_nodeid, error;
2570
2571 to_nodeid = lkb->lkb_nodeid;
2572
2573 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2574 if (error)
2575 goto out;
2576
2577 send_args(r, lkb, ms);
2578
2579 ms->m_result = rv;
2580
2581 error = send_message(mh, ms);
2582 out:
2583 return error;
2584}
2585
2586static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2587{
2588 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2589}
2590
2591static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2592{
2593 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2594}
2595
2596static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2597{
2598 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2599}
2600
2601static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2602{
2603 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2604}
2605
2606static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2607 int ret_nodeid, int rv)
2608{
2609 struct dlm_rsb *r = &ls->ls_stub_rsb;
2610 struct dlm_message *ms;
2611 struct dlm_mhandle *mh;
2612 int error, nodeid = ms_in->m_header.h_nodeid;
2613
2614 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2615 if (error)
2616 goto out;
2617
2618 ms->m_lkid = ms_in->m_lkid;
2619 ms->m_result = rv;
2620 ms->m_nodeid = ret_nodeid;
2621
2622 error = send_message(mh, ms);
2623 out:
2624 return error;
2625}
2626
2627/* which args we save from a received message depends heavily on the type
2628 of message, unlike the send side where we can safely send everything about
2629 the lkb for any type of message */
2630
2631static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2632{
2633 lkb->lkb_exflags = ms->m_exflags;
David Teigland6f90a8b12006-11-10 14:16:27 -06002634 lkb->lkb_sbflags = ms->m_sbflags;
David Teiglande7fd4172006-01-18 09:30:29 +00002635 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2636 (ms->m_flags & 0x0000FFFF);
2637}
2638
2639static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2640{
2641 lkb->lkb_sbflags = ms->m_sbflags;
2642 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2643 (ms->m_flags & 0x0000FFFF);
2644}
2645
2646static int receive_extralen(struct dlm_message *ms)
2647{
2648 return (ms->m_header.h_length - sizeof(struct dlm_message));
2649}
2650
David Teiglande7fd4172006-01-18 09:30:29 +00002651static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2652 struct dlm_message *ms)
2653{
2654 int len;
2655
2656 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2657 if (!lkb->lkb_lvbptr)
2658 lkb->lkb_lvbptr = allocate_lvb(ls);
2659 if (!lkb->lkb_lvbptr)
2660 return -ENOMEM;
2661 len = receive_extralen(ms);
2662 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2663 }
2664 return 0;
2665}
2666
2667static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2668 struct dlm_message *ms)
2669{
2670 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2671 lkb->lkb_ownpid = ms->m_pid;
2672 lkb->lkb_remid = ms->m_lkid;
2673 lkb->lkb_grmode = DLM_LOCK_IV;
2674 lkb->lkb_rqmode = ms->m_rqmode;
2675 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
2676 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
2677
2678 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
2679
David Teigland8d07fd52006-12-13 10:39:20 -06002680 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2681 /* lkb was just created so there won't be an lvb yet */
2682 lkb->lkb_lvbptr = allocate_lvb(ls);
2683 if (!lkb->lkb_lvbptr)
2684 return -ENOMEM;
2685 }
David Teiglande7fd4172006-01-18 09:30:29 +00002686
2687 return 0;
2688}
2689
2690static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2691 struct dlm_message *ms)
2692{
2693 if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
2694 log_error(ls, "convert_args nodeid %d %d lkid %x %x",
2695 lkb->lkb_nodeid, ms->m_header.h_nodeid,
2696 lkb->lkb_id, lkb->lkb_remid);
2697 return -EINVAL;
2698 }
2699
2700 if (!is_master_copy(lkb))
2701 return -EINVAL;
2702
2703 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2704 return -EBUSY;
2705
David Teiglande7fd4172006-01-18 09:30:29 +00002706 if (receive_lvb(ls, lkb, ms))
2707 return -ENOMEM;
2708
2709 lkb->lkb_rqmode = ms->m_rqmode;
2710 lkb->lkb_lvbseq = ms->m_lvbseq;
2711
2712 return 0;
2713}
2714
2715static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2716 struct dlm_message *ms)
2717{
2718 if (!is_master_copy(lkb))
2719 return -EINVAL;
2720 if (receive_lvb(ls, lkb, ms))
2721 return -ENOMEM;
2722 return 0;
2723}
2724
2725/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
2726 uses to send a reply and that the remote end uses to process the reply. */
2727
2728static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
2729{
2730 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
2731 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2732 lkb->lkb_remid = ms->m_lkid;
2733}
2734
2735static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
2736{
2737 struct dlm_lkb *lkb;
2738 struct dlm_rsb *r;
2739 int error, namelen;
2740
2741 error = create_lkb(ls, &lkb);
2742 if (error)
2743 goto fail;
2744
2745 receive_flags(lkb, ms);
2746 lkb->lkb_flags |= DLM_IFL_MSTCPY;
2747 error = receive_request_args(ls, lkb, ms);
2748 if (error) {
David Teiglandb3f58d82006-02-28 11:16:37 -05002749 __put_lkb(ls, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002750 goto fail;
2751 }
2752
2753 namelen = receive_extralen(ms);
2754
2755 error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
2756 if (error) {
David Teiglandb3f58d82006-02-28 11:16:37 -05002757 __put_lkb(ls, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002758 goto fail;
2759 }
2760
2761 lock_rsb(r);
2762
2763 attach_lkb(r, lkb);
2764 error = do_request(r, lkb);
2765 send_request_reply(r, lkb, error);
2766
2767 unlock_rsb(r);
2768 put_rsb(r);
2769
2770 if (error == -EINPROGRESS)
2771 error = 0;
2772 if (error)
David Teiglandb3f58d82006-02-28 11:16:37 -05002773 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002774 return;
2775
2776 fail:
2777 setup_stub_lkb(ls, ms);
2778 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2779}
2780
2781static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
2782{
2783 struct dlm_lkb *lkb;
2784 struct dlm_rsb *r;
David Teigland90135922006-01-20 08:47:07 +00002785 int error, reply = 1;
David Teiglande7fd4172006-01-18 09:30:29 +00002786
2787 error = find_lkb(ls, ms->m_remid, &lkb);
2788 if (error)
2789 goto fail;
2790
2791 r = lkb->lkb_resource;
2792
2793 hold_rsb(r);
2794 lock_rsb(r);
2795
2796 receive_flags(lkb, ms);
2797 error = receive_convert_args(ls, lkb, ms);
2798 if (error)
2799 goto out;
2800 reply = !down_conversion(lkb);
2801
2802 error = do_convert(r, lkb);
2803 out:
2804 if (reply)
2805 send_convert_reply(r, lkb, error);
2806
2807 unlock_rsb(r);
2808 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05002809 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002810 return;
2811
2812 fail:
2813 setup_stub_lkb(ls, ms);
2814 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2815}
2816
2817static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
2818{
2819 struct dlm_lkb *lkb;
2820 struct dlm_rsb *r;
2821 int error;
2822
2823 error = find_lkb(ls, ms->m_remid, &lkb);
2824 if (error)
2825 goto fail;
2826
2827 r = lkb->lkb_resource;
2828
2829 hold_rsb(r);
2830 lock_rsb(r);
2831
2832 receive_flags(lkb, ms);
2833 error = receive_unlock_args(ls, lkb, ms);
2834 if (error)
2835 goto out;
2836
2837 error = do_unlock(r, lkb);
2838 out:
2839 send_unlock_reply(r, lkb, error);
2840
2841 unlock_rsb(r);
2842 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05002843 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002844 return;
2845
2846 fail:
2847 setup_stub_lkb(ls, ms);
2848 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2849}
2850
2851static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
2852{
2853 struct dlm_lkb *lkb;
2854 struct dlm_rsb *r;
2855 int error;
2856
2857 error = find_lkb(ls, ms->m_remid, &lkb);
2858 if (error)
2859 goto fail;
2860
2861 receive_flags(lkb, ms);
2862
2863 r = lkb->lkb_resource;
2864
2865 hold_rsb(r);
2866 lock_rsb(r);
2867
2868 error = do_cancel(r, lkb);
2869 send_cancel_reply(r, lkb, error);
2870
2871 unlock_rsb(r);
2872 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05002873 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002874 return;
2875
2876 fail:
2877 setup_stub_lkb(ls, ms);
2878 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2879}
2880
2881static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
2882{
2883 struct dlm_lkb *lkb;
2884 struct dlm_rsb *r;
2885 int error;
2886
2887 error = find_lkb(ls, ms->m_remid, &lkb);
2888 if (error) {
2889 log_error(ls, "receive_grant no lkb");
2890 return;
2891 }
2892 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2893
2894 r = lkb->lkb_resource;
2895
2896 hold_rsb(r);
2897 lock_rsb(r);
2898
2899 receive_flags_reply(lkb, ms);
2900 grant_lock_pc(r, lkb, ms);
2901 queue_cast(r, lkb, 0);
2902
2903 unlock_rsb(r);
2904 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05002905 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002906}
2907
2908static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
2909{
2910 struct dlm_lkb *lkb;
2911 struct dlm_rsb *r;
2912 int error;
2913
2914 error = find_lkb(ls, ms->m_remid, &lkb);
2915 if (error) {
2916 log_error(ls, "receive_bast no lkb");
2917 return;
2918 }
2919 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2920
2921 r = lkb->lkb_resource;
2922
2923 hold_rsb(r);
2924 lock_rsb(r);
2925
2926 queue_bast(r, lkb, ms->m_bastmode);
2927
2928 unlock_rsb(r);
2929 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05002930 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00002931}
2932
2933static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
2934{
2935 int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
2936
2937 from_nodeid = ms->m_header.h_nodeid;
2938 our_nodeid = dlm_our_nodeid();
2939
2940 len = receive_extralen(ms);
2941
2942 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2943 if (dir_nodeid != our_nodeid) {
2944 log_error(ls, "lookup dir_nodeid %d from %d",
2945 dir_nodeid, from_nodeid);
2946 error = -EINVAL;
2947 ret_nodeid = -1;
2948 goto out;
2949 }
2950
2951 error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
2952
2953 /* Optimization: we're master so treat lookup as a request */
2954 if (!error && ret_nodeid == our_nodeid) {
2955 receive_request(ls, ms);
2956 return;
2957 }
2958 out:
2959 send_lookup_reply(ls, ms, ret_nodeid, error);
2960}
2961
2962static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
2963{
2964 int len, dir_nodeid, from_nodeid;
2965
2966 from_nodeid = ms->m_header.h_nodeid;
2967
2968 len = receive_extralen(ms);
2969
2970 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2971 if (dir_nodeid != dlm_our_nodeid()) {
2972 log_error(ls, "remove dir entry dir_nodeid %d from %d",
2973 dir_nodeid, from_nodeid);
2974 return;
2975 }
2976
2977 dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
2978}
2979
2980static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
2981{
2982 struct dlm_lkb *lkb;
2983 struct dlm_rsb *r;
David Teiglandef0c2bb2007-03-28 09:56:46 -05002984 int error, mstype, result;
David Teiglande7fd4172006-01-18 09:30:29 +00002985
2986 error = find_lkb(ls, ms->m_remid, &lkb);
2987 if (error) {
2988 log_error(ls, "receive_request_reply no lkb");
2989 return;
2990 }
2991 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2992
David Teiglande7fd4172006-01-18 09:30:29 +00002993 r = lkb->lkb_resource;
2994 hold_rsb(r);
2995 lock_rsb(r);
2996
David Teiglandef0c2bb2007-03-28 09:56:46 -05002997 mstype = lkb->lkb_wait_type;
2998 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
2999 if (error)
3000 goto out;
3001
David Teiglande7fd4172006-01-18 09:30:29 +00003002 /* Optimization: the dir node was also the master, so it took our
3003 lookup as a request and sent request reply instead of lookup reply */
3004 if (mstype == DLM_MSG_LOOKUP) {
3005 r->res_nodeid = ms->m_header.h_nodeid;
3006 lkb->lkb_nodeid = r->res_nodeid;
3007 }
3008
David Teiglandef0c2bb2007-03-28 09:56:46 -05003009 /* this is the value returned from do_request() on the master */
3010 result = ms->m_result;
3011
3012 switch (result) {
David Teiglande7fd4172006-01-18 09:30:29 +00003013 case -EAGAIN:
David Teiglandef0c2bb2007-03-28 09:56:46 -05003014 /* request would block (be queued) on remote master */
David Teiglande7fd4172006-01-18 09:30:29 +00003015 queue_cast(r, lkb, -EAGAIN);
3016 confirm_master(r, -EAGAIN);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003017 unhold_lkb(lkb); /* undoes create_lkb() */
David Teiglande7fd4172006-01-18 09:30:29 +00003018 break;
3019
3020 case -EINPROGRESS:
3021 case 0:
3022 /* request was queued or granted on remote master */
3023 receive_flags_reply(lkb, ms);
3024 lkb->lkb_remid = ms->m_lkid;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003025 if (result)
David Teiglande7fd4172006-01-18 09:30:29 +00003026 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3027 else {
3028 grant_lock_pc(r, lkb, ms);
3029 queue_cast(r, lkb, 0);
3030 }
David Teiglandef0c2bb2007-03-28 09:56:46 -05003031 confirm_master(r, result);
David Teiglande7fd4172006-01-18 09:30:29 +00003032 break;
3033
David Teigland597d0ca2006-07-12 16:44:04 -05003034 case -EBADR:
David Teiglande7fd4172006-01-18 09:30:29 +00003035 case -ENOTBLK:
3036 /* find_rsb failed to find rsb or rsb wasn't master */
David Teiglandef0c2bb2007-03-28 09:56:46 -05003037 log_debug(ls, "receive_request_reply %x %x master diff %d %d",
3038 lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
David Teiglande7fd4172006-01-18 09:30:29 +00003039 r->res_nodeid = -1;
3040 lkb->lkb_nodeid = -1;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003041
3042 if (is_overlap(lkb)) {
3043 /* we'll ignore error in cancel/unlock reply */
3044 queue_cast_overlap(r, lkb);
3045 unhold_lkb(lkb); /* undoes create_lkb() */
3046 } else
3047 _request_lock(r, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003048 break;
3049
3050 default:
David Teiglandef0c2bb2007-03-28 09:56:46 -05003051 log_error(ls, "receive_request_reply %x error %d",
3052 lkb->lkb_id, result);
David Teiglande7fd4172006-01-18 09:30:29 +00003053 }
3054
David Teiglandef0c2bb2007-03-28 09:56:46 -05003055 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
3056 log_debug(ls, "receive_request_reply %x result %d unlock",
3057 lkb->lkb_id, result);
3058 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3059 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3060 send_unlock(r, lkb);
3061 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
3062 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
3063 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3064 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3065 send_cancel(r, lkb);
3066 } else {
3067 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3068 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3069 }
3070 out:
David Teiglande7fd4172006-01-18 09:30:29 +00003071 unlock_rsb(r);
3072 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05003073 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003074}
3075
3076static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3077 struct dlm_message *ms)
3078{
David Teiglande7fd4172006-01-18 09:30:29 +00003079 /* this is the value returned from do_convert() on the master */
David Teiglandef0c2bb2007-03-28 09:56:46 -05003080 switch (ms->m_result) {
David Teiglande7fd4172006-01-18 09:30:29 +00003081 case -EAGAIN:
3082 /* convert would block (be queued) on remote master */
3083 queue_cast(r, lkb, -EAGAIN);
3084 break;
3085
3086 case -EINPROGRESS:
3087 /* convert was queued on remote master */
3088 del_lkb(r, lkb);
3089 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3090 break;
3091
3092 case 0:
3093 /* convert was granted on remote master */
3094 receive_flags_reply(lkb, ms);
3095 grant_lock_pc(r, lkb, ms);
3096 queue_cast(r, lkb, 0);
3097 break;
3098
3099 default:
David Teiglandef0c2bb2007-03-28 09:56:46 -05003100 log_error(r->res_ls, "receive_convert_reply %x error %d",
3101 lkb->lkb_id, ms->m_result);
David Teiglande7fd4172006-01-18 09:30:29 +00003102 }
3103}
3104
3105static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3106{
3107 struct dlm_rsb *r = lkb->lkb_resource;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003108 int error;
David Teiglande7fd4172006-01-18 09:30:29 +00003109
3110 hold_rsb(r);
3111 lock_rsb(r);
3112
David Teiglandef0c2bb2007-03-28 09:56:46 -05003113 /* stub reply can happen with waiters_mutex held */
3114 error = remove_from_waiters_ms(lkb, ms);
3115 if (error)
3116 goto out;
David Teiglande7fd4172006-01-18 09:30:29 +00003117
David Teiglandef0c2bb2007-03-28 09:56:46 -05003118 __receive_convert_reply(r, lkb, ms);
3119 out:
David Teiglande7fd4172006-01-18 09:30:29 +00003120 unlock_rsb(r);
3121 put_rsb(r);
3122}
3123
3124static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
3125{
3126 struct dlm_lkb *lkb;
3127 int error;
3128
3129 error = find_lkb(ls, ms->m_remid, &lkb);
3130 if (error) {
3131 log_error(ls, "receive_convert_reply no lkb");
3132 return;
3133 }
3134 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3135
David Teiglande7fd4172006-01-18 09:30:29 +00003136 _receive_convert_reply(lkb, ms);
David Teiglandb3f58d82006-02-28 11:16:37 -05003137 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003138}
3139
3140static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3141{
3142 struct dlm_rsb *r = lkb->lkb_resource;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003143 int error;
David Teiglande7fd4172006-01-18 09:30:29 +00003144
3145 hold_rsb(r);
3146 lock_rsb(r);
3147
David Teiglandef0c2bb2007-03-28 09:56:46 -05003148 /* stub reply can happen with waiters_mutex held */
3149 error = remove_from_waiters_ms(lkb, ms);
3150 if (error)
3151 goto out;
3152
David Teiglande7fd4172006-01-18 09:30:29 +00003153 /* this is the value returned from do_unlock() on the master */
3154
David Teiglandef0c2bb2007-03-28 09:56:46 -05003155 switch (ms->m_result) {
David Teiglande7fd4172006-01-18 09:30:29 +00003156 case -DLM_EUNLOCK:
3157 receive_flags_reply(lkb, ms);
3158 remove_lock_pc(r, lkb);
3159 queue_cast(r, lkb, -DLM_EUNLOCK);
3160 break;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003161 case -ENOENT:
3162 break;
David Teiglande7fd4172006-01-18 09:30:29 +00003163 default:
David Teiglandef0c2bb2007-03-28 09:56:46 -05003164 log_error(r->res_ls, "receive_unlock_reply %x error %d",
3165 lkb->lkb_id, ms->m_result);
David Teiglande7fd4172006-01-18 09:30:29 +00003166 }
David Teiglandef0c2bb2007-03-28 09:56:46 -05003167 out:
David Teiglande7fd4172006-01-18 09:30:29 +00003168 unlock_rsb(r);
3169 put_rsb(r);
3170}
3171
3172static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
3173{
3174 struct dlm_lkb *lkb;
3175 int error;
3176
3177 error = find_lkb(ls, ms->m_remid, &lkb);
3178 if (error) {
3179 log_error(ls, "receive_unlock_reply no lkb");
3180 return;
3181 }
3182 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3183
David Teiglande7fd4172006-01-18 09:30:29 +00003184 _receive_unlock_reply(lkb, ms);
David Teiglandb3f58d82006-02-28 11:16:37 -05003185 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003186}
3187
3188static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3189{
3190 struct dlm_rsb *r = lkb->lkb_resource;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003191 int error;
David Teiglande7fd4172006-01-18 09:30:29 +00003192
3193 hold_rsb(r);
3194 lock_rsb(r);
3195
David Teiglandef0c2bb2007-03-28 09:56:46 -05003196 /* stub reply can happen with waiters_mutex held */
3197 error = remove_from_waiters_ms(lkb, ms);
3198 if (error)
3199 goto out;
3200
David Teiglande7fd4172006-01-18 09:30:29 +00003201 /* this is the value returned from do_cancel() on the master */
3202
David Teiglandef0c2bb2007-03-28 09:56:46 -05003203 switch (ms->m_result) {
David Teiglande7fd4172006-01-18 09:30:29 +00003204 case -DLM_ECANCEL:
3205 receive_flags_reply(lkb, ms);
3206 revert_lock_pc(r, lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003207 if (ms->m_result)
3208 queue_cast(r, lkb, -DLM_ECANCEL);
3209 break;
3210 case 0:
David Teiglande7fd4172006-01-18 09:30:29 +00003211 break;
3212 default:
David Teiglandef0c2bb2007-03-28 09:56:46 -05003213 log_error(r->res_ls, "receive_cancel_reply %x error %d",
3214 lkb->lkb_id, ms->m_result);
David Teiglande7fd4172006-01-18 09:30:29 +00003215 }
David Teiglandef0c2bb2007-03-28 09:56:46 -05003216 out:
David Teiglande7fd4172006-01-18 09:30:29 +00003217 unlock_rsb(r);
3218 put_rsb(r);
3219}
3220
3221static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
3222{
3223 struct dlm_lkb *lkb;
3224 int error;
3225
3226 error = find_lkb(ls, ms->m_remid, &lkb);
3227 if (error) {
3228 log_error(ls, "receive_cancel_reply no lkb");
3229 return;
3230 }
3231 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3232
David Teiglande7fd4172006-01-18 09:30:29 +00003233 _receive_cancel_reply(lkb, ms);
David Teiglandb3f58d82006-02-28 11:16:37 -05003234 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003235}
3236
3237static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
3238{
3239 struct dlm_lkb *lkb;
3240 struct dlm_rsb *r;
3241 int error, ret_nodeid;
3242
3243 error = find_lkb(ls, ms->m_lkid, &lkb);
3244 if (error) {
3245 log_error(ls, "receive_lookup_reply no lkb");
3246 return;
3247 }
3248
David Teiglandef0c2bb2007-03-28 09:56:46 -05003249 /* ms->m_result is the value returned by dlm_dir_lookup on dir node
David Teiglande7fd4172006-01-18 09:30:29 +00003250 FIXME: will a non-zero error ever be returned? */
David Teiglande7fd4172006-01-18 09:30:29 +00003251
3252 r = lkb->lkb_resource;
3253 hold_rsb(r);
3254 lock_rsb(r);
3255
David Teiglandef0c2bb2007-03-28 09:56:46 -05003256 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3257 if (error)
3258 goto out;
3259
David Teiglande7fd4172006-01-18 09:30:29 +00003260 ret_nodeid = ms->m_nodeid;
3261 if (ret_nodeid == dlm_our_nodeid()) {
3262 r->res_nodeid = 0;
3263 ret_nodeid = 0;
3264 r->res_first_lkid = 0;
3265 } else {
3266 /* set_master() will copy res_nodeid to lkb_nodeid */
3267 r->res_nodeid = ret_nodeid;
3268 }
3269
David Teiglandef0c2bb2007-03-28 09:56:46 -05003270 if (is_overlap(lkb)) {
3271 log_debug(ls, "receive_lookup_reply %x unlock %x",
3272 lkb->lkb_id, lkb->lkb_flags);
3273 queue_cast_overlap(r, lkb);
3274 unhold_lkb(lkb); /* undoes create_lkb() */
3275 goto out_list;
3276 }
3277
David Teiglande7fd4172006-01-18 09:30:29 +00003278 _request_lock(r, lkb);
3279
David Teiglandef0c2bb2007-03-28 09:56:46 -05003280 out_list:
David Teiglande7fd4172006-01-18 09:30:29 +00003281 if (!ret_nodeid)
3282 process_lookup_list(r);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003283 out:
David Teiglande7fd4172006-01-18 09:30:29 +00003284 unlock_rsb(r);
3285 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05003286 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003287}
3288
3289int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3290{
3291 struct dlm_message *ms = (struct dlm_message *) hd;
3292 struct dlm_ls *ls;
David Teigland8fd3a982007-01-24 10:11:45 -06003293 int error = 0;
David Teiglande7fd4172006-01-18 09:30:29 +00003294
3295 if (!recovery)
3296 dlm_message_in(ms);
3297
3298 ls = dlm_find_lockspace_global(hd->h_lockspace);
3299 if (!ls) {
3300 log_print("drop message %d from %d for unknown lockspace %d",
3301 ms->m_type, nodeid, hd->h_lockspace);
3302 return -EINVAL;
3303 }
3304
3305 /* recovery may have just ended leaving a bunch of backed-up requests
3306 in the requestqueue; wait while dlm_recoverd clears them */
3307
3308 if (!recovery)
3309 dlm_wait_requestqueue(ls);
3310
3311 /* recovery may have just started while there were a bunch of
3312 in-flight requests -- save them in requestqueue to be processed
3313 after recovery. we can't let dlm_recvd block on the recovery
3314 lock. if dlm_recoverd is calling this function to clear the
3315 requestqueue, it needs to be interrupted (-EINTR) if another
3316 recovery operation is starting. */
3317
3318 while (1) {
3319 if (dlm_locking_stopped(ls)) {
David Teiglandd4400152006-10-31 11:55:56 -06003320 if (recovery) {
3321 error = -EINTR;
3322 goto out;
3323 }
3324 error = dlm_add_requestqueue(ls, nodeid, hd);
3325 if (error == -EAGAIN)
3326 continue;
3327 else {
3328 error = -EINTR;
3329 goto out;
3330 }
David Teiglande7fd4172006-01-18 09:30:29 +00003331 }
3332
3333 if (lock_recovery_try(ls))
3334 break;
3335 schedule();
3336 }
3337
3338 switch (ms->m_type) {
3339
3340 /* messages sent to a master node */
3341
3342 case DLM_MSG_REQUEST:
3343 receive_request(ls, ms);
3344 break;
3345
3346 case DLM_MSG_CONVERT:
3347 receive_convert(ls, ms);
3348 break;
3349
3350 case DLM_MSG_UNLOCK:
3351 receive_unlock(ls, ms);
3352 break;
3353
3354 case DLM_MSG_CANCEL:
3355 receive_cancel(ls, ms);
3356 break;
3357
3358 /* messages sent from a master node (replies to above) */
3359
3360 case DLM_MSG_REQUEST_REPLY:
3361 receive_request_reply(ls, ms);
3362 break;
3363
3364 case DLM_MSG_CONVERT_REPLY:
3365 receive_convert_reply(ls, ms);
3366 break;
3367
3368 case DLM_MSG_UNLOCK_REPLY:
3369 receive_unlock_reply(ls, ms);
3370 break;
3371
3372 case DLM_MSG_CANCEL_REPLY:
3373 receive_cancel_reply(ls, ms);
3374 break;
3375
3376 /* messages sent from a master node (only two types of async msg) */
3377
3378 case DLM_MSG_GRANT:
3379 receive_grant(ls, ms);
3380 break;
3381
3382 case DLM_MSG_BAST:
3383 receive_bast(ls, ms);
3384 break;
3385
3386 /* messages sent to a dir node */
3387
3388 case DLM_MSG_LOOKUP:
3389 receive_lookup(ls, ms);
3390 break;
3391
3392 case DLM_MSG_REMOVE:
3393 receive_remove(ls, ms);
3394 break;
3395
3396 /* messages sent from a dir node (remove has no reply) */
3397
3398 case DLM_MSG_LOOKUP_REPLY:
3399 receive_lookup_reply(ls, ms);
3400 break;
3401
3402 default:
3403 log_error(ls, "unknown message type %d", ms->m_type);
3404 }
3405
3406 unlock_recovery(ls);
3407 out:
3408 dlm_put_lockspace(ls);
3409 dlm_astd_wake();
David Teigland8fd3a982007-01-24 10:11:45 -06003410 return error;
David Teiglande7fd4172006-01-18 09:30:29 +00003411}
3412
3413
3414/*
3415 * Recovery related
3416 */
3417
3418static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3419{
3420 if (middle_conversion(lkb)) {
3421 hold_lkb(lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003422 ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
David Teiglande7fd4172006-01-18 09:30:29 +00003423 ls->ls_stub_ms.m_result = -EINPROGRESS;
David Teigland075529b2006-12-13 10:40:26 -06003424 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
David Teiglande7fd4172006-01-18 09:30:29 +00003425 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3426
3427 /* Same special case as in receive_rcom_lock_args() */
3428 lkb->lkb_grmode = DLM_LOCK_IV;
3429 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3430 unhold_lkb(lkb);
3431
3432 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3433 lkb->lkb_flags |= DLM_IFL_RESEND;
3434 }
3435
3436 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3437 conversions are async; there's no reply from the remote master */
3438}
3439
3440/* A waiting lkb needs recovery if the master node has failed, or
3441 the master node is changing (only when no directory is used) */
3442
3443static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3444{
3445 if (dlm_is_removed(ls, lkb->lkb_nodeid))
3446 return 1;
3447
3448 if (!dlm_no_directory(ls))
3449 return 0;
3450
3451 if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3452 return 1;
3453
3454 return 0;
3455}
3456
3457/* Recovery for locks that are waiting for replies from nodes that are now
3458 gone. We can just complete unlocks and cancels by faking a reply from the
3459 dead node. Requests and up-conversions we flag to be resent after
3460 recovery. Down-conversions can just be completed with a fake reply like
3461 unlocks. Conversions between PR and CW need special attention. */
3462
3463void dlm_recover_waiters_pre(struct dlm_ls *ls)
3464{
3465 struct dlm_lkb *lkb, *safe;
3466
David Teigland90135922006-01-20 08:47:07 +00003467 mutex_lock(&ls->ls_waiters_mutex);
David Teiglande7fd4172006-01-18 09:30:29 +00003468
3469 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3470 log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3471 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3472
3473 /* all outstanding lookups, regardless of destination will be
3474 resent after recovery is done */
3475
3476 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3477 lkb->lkb_flags |= DLM_IFL_RESEND;
3478 continue;
3479 }
3480
3481 if (!waiter_needs_recovery(ls, lkb))
3482 continue;
3483
3484 switch (lkb->lkb_wait_type) {
3485
3486 case DLM_MSG_REQUEST:
3487 lkb->lkb_flags |= DLM_IFL_RESEND;
3488 break;
3489
3490 case DLM_MSG_CONVERT:
3491 recover_convert_waiter(ls, lkb);
3492 break;
3493
3494 case DLM_MSG_UNLOCK:
3495 hold_lkb(lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003496 ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY;
David Teiglande7fd4172006-01-18 09:30:29 +00003497 ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
David Teigland075529b2006-12-13 10:40:26 -06003498 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
David Teiglande7fd4172006-01-18 09:30:29 +00003499 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
David Teiglandb3f58d82006-02-28 11:16:37 -05003500 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003501 break;
3502
3503 case DLM_MSG_CANCEL:
3504 hold_lkb(lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003505 ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY;
David Teiglande7fd4172006-01-18 09:30:29 +00003506 ls->ls_stub_ms.m_result = -DLM_ECANCEL;
David Teigland075529b2006-12-13 10:40:26 -06003507 ls->ls_stub_ms.m_flags = lkb->lkb_flags;
David Teiglande7fd4172006-01-18 09:30:29 +00003508 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
David Teiglandb3f58d82006-02-28 11:16:37 -05003509 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003510 break;
3511
3512 default:
3513 log_error(ls, "invalid lkb wait_type %d",
3514 lkb->lkb_wait_type);
3515 }
David Teigland81456802006-07-25 14:05:09 -05003516 schedule();
David Teiglande7fd4172006-01-18 09:30:29 +00003517 }
David Teigland90135922006-01-20 08:47:07 +00003518 mutex_unlock(&ls->ls_waiters_mutex);
David Teiglande7fd4172006-01-18 09:30:29 +00003519}
3520
David Teiglandef0c2bb2007-03-28 09:56:46 -05003521static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
David Teiglande7fd4172006-01-18 09:30:29 +00003522{
3523 struct dlm_lkb *lkb;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003524 int found = 0;
David Teiglande7fd4172006-01-18 09:30:29 +00003525
David Teigland90135922006-01-20 08:47:07 +00003526 mutex_lock(&ls->ls_waiters_mutex);
David Teiglande7fd4172006-01-18 09:30:29 +00003527 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
3528 if (lkb->lkb_flags & DLM_IFL_RESEND) {
David Teiglandef0c2bb2007-03-28 09:56:46 -05003529 hold_lkb(lkb);
3530 found = 1;
David Teiglande7fd4172006-01-18 09:30:29 +00003531 break;
3532 }
3533 }
David Teigland90135922006-01-20 08:47:07 +00003534 mutex_unlock(&ls->ls_waiters_mutex);
David Teiglande7fd4172006-01-18 09:30:29 +00003535
David Teiglandef0c2bb2007-03-28 09:56:46 -05003536 if (!found)
David Teiglande7fd4172006-01-18 09:30:29 +00003537 lkb = NULL;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003538 return lkb;
David Teiglande7fd4172006-01-18 09:30:29 +00003539}
3540
3541/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
3542 master or dir-node for r. Processing the lkb may result in it being placed
3543 back on waiters. */
3544
David Teiglandef0c2bb2007-03-28 09:56:46 -05003545/* We do this after normal locking has been enabled and any saved messages
3546 (in requestqueue) have been processed. We should be confident that at
3547 this point we won't get or process a reply to any of these waiting
3548 operations. But, new ops may be coming in on the rsbs/locks here from
3549 userspace or remotely. */
3550
3551/* there may have been an overlap unlock/cancel prior to recovery or after
3552 recovery. if before, the lkb may still have a pos wait_count; if after, the
3553 overlap flag would just have been set and nothing new sent. we can be
3554 confident here than any replies to either the initial op or overlap ops
3555 prior to recovery have been received. */
3556
David Teiglande7fd4172006-01-18 09:30:29 +00003557int dlm_recover_waiters_post(struct dlm_ls *ls)
3558{
3559 struct dlm_lkb *lkb;
3560 struct dlm_rsb *r;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003561 int error = 0, mstype, err, oc, ou;
David Teiglande7fd4172006-01-18 09:30:29 +00003562
3563 while (1) {
3564 if (dlm_locking_stopped(ls)) {
3565 log_debug(ls, "recover_waiters_post aborted");
3566 error = -EINTR;
3567 break;
3568 }
3569
David Teiglandef0c2bb2007-03-28 09:56:46 -05003570 lkb = find_resend_waiter(ls);
3571 if (!lkb)
David Teiglande7fd4172006-01-18 09:30:29 +00003572 break;
3573
3574 r = lkb->lkb_resource;
David Teiglandef0c2bb2007-03-28 09:56:46 -05003575 hold_rsb(r);
3576 lock_rsb(r);
3577
3578 mstype = lkb->lkb_wait_type;
3579 oc = is_overlap_cancel(lkb);
3580 ou = is_overlap_unlock(lkb);
3581 err = 0;
David Teiglande7fd4172006-01-18 09:30:29 +00003582
3583 log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
3584 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
3585
David Teiglandef0c2bb2007-03-28 09:56:46 -05003586 /* At this point we assume that we won't get a reply to any
3587 previous op or overlap op on this lock. First, do a big
3588 remove_from_waiters() for all previous ops. */
David Teiglande7fd4172006-01-18 09:30:29 +00003589
David Teiglandef0c2bb2007-03-28 09:56:46 -05003590 lkb->lkb_flags &= ~DLM_IFL_RESEND;
3591 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
3592 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
3593 lkb->lkb_wait_type = 0;
3594 lkb->lkb_wait_count = 0;
3595 mutex_lock(&ls->ls_waiters_mutex);
3596 list_del_init(&lkb->lkb_wait_reply);
3597 mutex_unlock(&ls->ls_waiters_mutex);
3598 unhold_lkb(lkb); /* for waiters list */
David Teiglande7fd4172006-01-18 09:30:29 +00003599
David Teiglandef0c2bb2007-03-28 09:56:46 -05003600 if (oc || ou) {
3601 /* do an unlock or cancel instead of resending */
3602 switch (mstype) {
3603 case DLM_MSG_LOOKUP:
3604 case DLM_MSG_REQUEST:
3605 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
3606 -DLM_ECANCEL);
3607 unhold_lkb(lkb); /* undoes create_lkb() */
3608 break;
3609 case DLM_MSG_CONVERT:
3610 if (oc) {
3611 queue_cast(r, lkb, -DLM_ECANCEL);
3612 } else {
3613 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
3614 _unlock_lock(r, lkb);
3615 }
3616 break;
3617 default:
3618 err = 1;
3619 }
3620 } else {
3621 switch (mstype) {
3622 case DLM_MSG_LOOKUP:
3623 case DLM_MSG_REQUEST:
3624 _request_lock(r, lkb);
3625 if (is_master(r))
3626 confirm_master(r, 0);
3627 break;
3628 case DLM_MSG_CONVERT:
3629 _convert_lock(r, lkb);
3630 break;
3631 default:
3632 err = 1;
3633 }
David Teiglande7fd4172006-01-18 09:30:29 +00003634 }
David Teiglandef0c2bb2007-03-28 09:56:46 -05003635
3636 if (err)
3637 log_error(ls, "recover_waiters_post %x %d %x %d %d",
3638 lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
3639 unlock_rsb(r);
3640 put_rsb(r);
3641 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003642 }
3643
3644 return error;
3645}
3646
3647static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
3648 int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
3649{
3650 struct dlm_ls *ls = r->res_ls;
3651 struct dlm_lkb *lkb, *safe;
3652
3653 list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
3654 if (test(ls, lkb)) {
David Teigland97a35d12006-05-02 13:34:03 -04003655 rsb_set_flag(r, RSB_LOCKS_PURGED);
David Teiglande7fd4172006-01-18 09:30:29 +00003656 del_lkb(r, lkb);
3657 /* this put should free the lkb */
David Teiglandb3f58d82006-02-28 11:16:37 -05003658 if (!dlm_put_lkb(lkb))
David Teiglande7fd4172006-01-18 09:30:29 +00003659 log_error(ls, "purged lkb not released");
3660 }
3661 }
3662}
3663
3664static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3665{
3666 return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
3667}
3668
3669static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3670{
3671 return is_master_copy(lkb);
3672}
3673
3674static void purge_dead_locks(struct dlm_rsb *r)
3675{
3676 purge_queue(r, &r->res_grantqueue, &purge_dead_test);
3677 purge_queue(r, &r->res_convertqueue, &purge_dead_test);
3678 purge_queue(r, &r->res_waitqueue, &purge_dead_test);
3679}
3680
3681void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
3682{
3683 purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
3684 purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
3685 purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
3686}
3687
3688/* Get rid of locks held by nodes that are gone. */
3689
3690int dlm_purge_locks(struct dlm_ls *ls)
3691{
3692 struct dlm_rsb *r;
3693
3694 log_debug(ls, "dlm_purge_locks");
3695
3696 down_write(&ls->ls_root_sem);
3697 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
3698 hold_rsb(r);
3699 lock_rsb(r);
3700 if (is_master(r))
3701 purge_dead_locks(r);
3702 unlock_rsb(r);
3703 unhold_rsb(r);
3704
3705 schedule();
3706 }
3707 up_write(&ls->ls_root_sem);
3708
3709 return 0;
3710}
3711
David Teigland97a35d12006-05-02 13:34:03 -04003712static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
3713{
3714 struct dlm_rsb *r, *r_ret = NULL;
3715
3716 read_lock(&ls->ls_rsbtbl[bucket].lock);
3717 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
3718 if (!rsb_flag(r, RSB_LOCKS_PURGED))
3719 continue;
3720 hold_rsb(r);
3721 rsb_clear_flag(r, RSB_LOCKS_PURGED);
3722 r_ret = r;
3723 break;
3724 }
3725 read_unlock(&ls->ls_rsbtbl[bucket].lock);
3726 return r_ret;
3727}
3728
3729void dlm_grant_after_purge(struct dlm_ls *ls)
David Teiglande7fd4172006-01-18 09:30:29 +00003730{
3731 struct dlm_rsb *r;
David Teigland2b4e9262006-07-25 13:59:48 -05003732 int bucket = 0;
David Teiglande7fd4172006-01-18 09:30:29 +00003733
David Teigland2b4e9262006-07-25 13:59:48 -05003734 while (1) {
3735 r = find_purged_rsb(ls, bucket);
3736 if (!r) {
3737 if (bucket == ls->ls_rsbtbl_size - 1)
3738 break;
3739 bucket++;
David Teigland97a35d12006-05-02 13:34:03 -04003740 continue;
David Teigland2b4e9262006-07-25 13:59:48 -05003741 }
David Teigland97a35d12006-05-02 13:34:03 -04003742 lock_rsb(r);
3743 if (is_master(r)) {
3744 grant_pending_locks(r);
3745 confirm_master(r, 0);
David Teiglande7fd4172006-01-18 09:30:29 +00003746 }
David Teigland97a35d12006-05-02 13:34:03 -04003747 unlock_rsb(r);
3748 put_rsb(r);
David Teigland2b4e9262006-07-25 13:59:48 -05003749 schedule();
David Teiglande7fd4172006-01-18 09:30:29 +00003750 }
David Teiglande7fd4172006-01-18 09:30:29 +00003751}
3752
3753static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
3754 uint32_t remid)
3755{
3756 struct dlm_lkb *lkb;
3757
3758 list_for_each_entry(lkb, head, lkb_statequeue) {
3759 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
3760 return lkb;
3761 }
3762 return NULL;
3763}
3764
3765static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
3766 uint32_t remid)
3767{
3768 struct dlm_lkb *lkb;
3769
3770 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
3771 if (lkb)
3772 return lkb;
3773 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
3774 if (lkb)
3775 return lkb;
3776 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
3777 if (lkb)
3778 return lkb;
3779 return NULL;
3780}
3781
3782static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3783 struct dlm_rsb *r, struct dlm_rcom *rc)
3784{
3785 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3786 int lvblen;
3787
3788 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
3789 lkb->lkb_ownpid = rl->rl_ownpid;
3790 lkb->lkb_remid = rl->rl_lkid;
3791 lkb->lkb_exflags = rl->rl_exflags;
3792 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
3793 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3794 lkb->lkb_lvbseq = rl->rl_lvbseq;
3795 lkb->lkb_rqmode = rl->rl_rqmode;
3796 lkb->lkb_grmode = rl->rl_grmode;
3797 /* don't set lkb_status because add_lkb wants to itself */
3798
3799 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
3800 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
3801
David Teiglande7fd4172006-01-18 09:30:29 +00003802 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3803 lkb->lkb_lvbptr = allocate_lvb(ls);
3804 if (!lkb->lkb_lvbptr)
3805 return -ENOMEM;
3806 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
3807 sizeof(struct rcom_lock);
3808 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
3809 }
3810
3811 /* Conversions between PR and CW (middle modes) need special handling.
3812 The real granted mode of these converting locks cannot be determined
3813 until all locks have been rebuilt on the rsb (recover_conversion) */
3814
3815 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
3816 rl->rl_status = DLM_LKSTS_CONVERT;
3817 lkb->lkb_grmode = DLM_LOCK_IV;
3818 rsb_set_flag(r, RSB_RECOVER_CONVERT);
3819 }
3820
3821 return 0;
3822}
3823
3824/* This lkb may have been recovered in a previous aborted recovery so we need
3825 to check if the rsb already has an lkb with the given remote nodeid/lkid.
3826 If so we just send back a standard reply. If not, we create a new lkb with
3827 the given values and send back our lkid. We send back our lkid by sending
3828 back the rcom_lock struct we got but with the remid field filled in. */
3829
3830int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3831{
3832 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3833 struct dlm_rsb *r;
3834 struct dlm_lkb *lkb;
3835 int error;
3836
3837 if (rl->rl_parent_lkid) {
3838 error = -EOPNOTSUPP;
3839 goto out;
3840 }
3841
3842 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
3843 if (error)
3844 goto out;
3845
3846 lock_rsb(r);
3847
3848 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
3849 if (lkb) {
3850 error = -EEXIST;
3851 goto out_remid;
3852 }
3853
3854 error = create_lkb(ls, &lkb);
3855 if (error)
3856 goto out_unlock;
3857
3858 error = receive_rcom_lock_args(ls, lkb, r, rc);
3859 if (error) {
David Teiglandb3f58d82006-02-28 11:16:37 -05003860 __put_lkb(ls, lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003861 goto out_unlock;
3862 }
3863
3864 attach_lkb(r, lkb);
3865 add_lkb(r, lkb, rl->rl_status);
3866 error = 0;
3867
3868 out_remid:
3869 /* this is the new value returned to the lock holder for
3870 saving in its process-copy lkb */
3871 rl->rl_remid = lkb->lkb_id;
3872
3873 out_unlock:
3874 unlock_rsb(r);
3875 put_rsb(r);
3876 out:
3877 if (error)
3878 log_print("recover_master_copy %d %x", error, rl->rl_lkid);
3879 rl->rl_result = error;
3880 return error;
3881}
3882
3883int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3884{
3885 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3886 struct dlm_rsb *r;
3887 struct dlm_lkb *lkb;
3888 int error;
3889
3890 error = find_lkb(ls, rl->rl_lkid, &lkb);
3891 if (error) {
3892 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
3893 return error;
3894 }
3895
3896 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3897
3898 error = rl->rl_result;
3899
3900 r = lkb->lkb_resource;
3901 hold_rsb(r);
3902 lock_rsb(r);
3903
3904 switch (error) {
David Teiglanddc200a82006-12-13 10:36:37 -06003905 case -EBADR:
3906 /* There's a chance the new master received our lock before
3907 dlm_recover_master_reply(), this wouldn't happen if we did
3908 a barrier between recover_masters and recover_locks. */
3909 log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
3910 (unsigned long)r, r->res_name);
3911 dlm_send_rcom_lock(r, lkb);
3912 goto out;
David Teiglande7fd4172006-01-18 09:30:29 +00003913 case -EEXIST:
3914 log_debug(ls, "master copy exists %x", lkb->lkb_id);
3915 /* fall through */
3916 case 0:
3917 lkb->lkb_remid = rl->rl_remid;
3918 break;
3919 default:
3920 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
3921 error, lkb->lkb_id);
3922 }
3923
3924 /* an ack for dlm_recover_locks() which waits for replies from
3925 all the locks it sends to new masters */
3926 dlm_recovered_lock(r);
David Teiglanddc200a82006-12-13 10:36:37 -06003927 out:
David Teiglande7fd4172006-01-18 09:30:29 +00003928 unlock_rsb(r);
3929 put_rsb(r);
David Teiglandb3f58d82006-02-28 11:16:37 -05003930 dlm_put_lkb(lkb);
David Teiglande7fd4172006-01-18 09:30:29 +00003931
3932 return 0;
3933}
3934
David Teigland597d0ca2006-07-12 16:44:04 -05003935int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
3936 int mode, uint32_t flags, void *name, unsigned int namelen,
3937 uint32_t parent_lkid)
3938{
3939 struct dlm_lkb *lkb;
3940 struct dlm_args args;
3941 int error;
3942
3943 lock_recovery(ls);
3944
3945 error = create_lkb(ls, &lkb);
3946 if (error) {
3947 kfree(ua);
3948 goto out;
3949 }
3950
3951 if (flags & DLM_LKF_VALBLK) {
David Teigland62a0f622007-01-31 13:25:00 -06003952 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
David Teigland597d0ca2006-07-12 16:44:04 -05003953 if (!ua->lksb.sb_lvbptr) {
3954 kfree(ua);
3955 __put_lkb(ls, lkb);
3956 error = -ENOMEM;
3957 goto out;
3958 }
3959 }
3960
3961 /* After ua is attached to lkb it will be freed by free_lkb().
3962 When DLM_IFL_USER is set, the dlm knows that this is a userspace
3963 lock and that lkb_astparam is the dlm_user_args structure. */
3964
3965 error = set_lock_args(mode, &ua->lksb, flags, namelen, parent_lkid,
David Teigland32f105a2006-08-23 16:07:31 -04003966 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
David Teigland597d0ca2006-07-12 16:44:04 -05003967 lkb->lkb_flags |= DLM_IFL_USER;
3968 ua->old_mode = DLM_LOCK_IV;
3969
3970 if (error) {
3971 __put_lkb(ls, lkb);
3972 goto out;
3973 }
3974
3975 error = request_lock(ls, lkb, name, namelen, &args);
3976
3977 switch (error) {
3978 case 0:
3979 break;
3980 case -EINPROGRESS:
3981 error = 0;
3982 break;
3983 case -EAGAIN:
3984 error = 0;
3985 /* fall through */
3986 default:
3987 __put_lkb(ls, lkb);
3988 goto out;
3989 }
3990
3991 /* add this new lkb to the per-process list of locks */
3992 spin_lock(&ua->proc->locks_spin);
David Teiglandef0c2bb2007-03-28 09:56:46 -05003993 hold_lkb(lkb);
David Teigland597d0ca2006-07-12 16:44:04 -05003994 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
3995 spin_unlock(&ua->proc->locks_spin);
3996 out:
3997 unlock_recovery(ls);
3998 return error;
3999}
4000
4001int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4002 int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
4003{
4004 struct dlm_lkb *lkb;
4005 struct dlm_args args;
4006 struct dlm_user_args *ua;
4007 int error;
4008
4009 lock_recovery(ls);
4010
4011 error = find_lkb(ls, lkid, &lkb);
4012 if (error)
4013 goto out;
4014
4015 /* user can change the params on its lock when it converts it, or
4016 add an lvb that didn't exist before */
4017
4018 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4019
4020 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
David Teigland62a0f622007-01-31 13:25:00 -06004021 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
David Teigland597d0ca2006-07-12 16:44:04 -05004022 if (!ua->lksb.sb_lvbptr) {
4023 error = -ENOMEM;
4024 goto out_put;
4025 }
4026 }
4027 if (lvb_in && ua->lksb.sb_lvbptr)
4028 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4029
4030 ua->castparam = ua_tmp->castparam;
4031 ua->castaddr = ua_tmp->castaddr;
4032 ua->bastparam = ua_tmp->bastparam;
4033 ua->bastaddr = ua_tmp->bastaddr;
Patrick Caulfield10948eb2006-08-23 09:49:31 +01004034 ua->user_lksb = ua_tmp->user_lksb;
David Teigland597d0ca2006-07-12 16:44:04 -05004035 ua->old_mode = lkb->lkb_grmode;
4036
David Teigland32f105a2006-08-23 16:07:31 -04004037 error = set_lock_args(mode, &ua->lksb, flags, 0, 0, DLM_FAKE_USER_AST,
4038 ua, DLM_FAKE_USER_AST, &args);
David Teigland597d0ca2006-07-12 16:44:04 -05004039 if (error)
4040 goto out_put;
4041
4042 error = convert_lock(ls, lkb, &args);
4043
4044 if (error == -EINPROGRESS || error == -EAGAIN)
4045 error = 0;
4046 out_put:
4047 dlm_put_lkb(lkb);
4048 out:
4049 unlock_recovery(ls);
4050 kfree(ua_tmp);
4051 return error;
4052}
4053
4054int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4055 uint32_t flags, uint32_t lkid, char *lvb_in)
4056{
4057 struct dlm_lkb *lkb;
4058 struct dlm_args args;
4059 struct dlm_user_args *ua;
4060 int error;
4061
4062 lock_recovery(ls);
4063
4064 error = find_lkb(ls, lkid, &lkb);
4065 if (error)
4066 goto out;
4067
4068 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4069
4070 if (lvb_in && ua->lksb.sb_lvbptr)
4071 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
4072 ua->castparam = ua_tmp->castparam;
Patrick Caulfieldcc346d52006-08-08 10:34:40 -04004073 ua->user_lksb = ua_tmp->user_lksb;
David Teigland597d0ca2006-07-12 16:44:04 -05004074
4075 error = set_unlock_args(flags, ua, &args);
4076 if (error)
4077 goto out_put;
4078
4079 error = unlock_lock(ls, lkb, &args);
4080
4081 if (error == -DLM_EUNLOCK)
4082 error = 0;
David Teiglandef0c2bb2007-03-28 09:56:46 -05004083 /* from validate_unlock_args() */
4084 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
4085 error = 0;
David Teigland597d0ca2006-07-12 16:44:04 -05004086 if (error)
4087 goto out_put;
4088
4089 spin_lock(&ua->proc->locks_spin);
David Teiglanda1bc86e2007-01-15 10:34:52 -06004090 /* dlm_user_add_ast() may have already taken lkb off the proc list */
4091 if (!list_empty(&lkb->lkb_ownqueue))
4092 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
David Teigland597d0ca2006-07-12 16:44:04 -05004093 spin_unlock(&ua->proc->locks_spin);
David Teigland597d0ca2006-07-12 16:44:04 -05004094 out_put:
4095 dlm_put_lkb(lkb);
4096 out:
4097 unlock_recovery(ls);
David Teiglandef0c2bb2007-03-28 09:56:46 -05004098 kfree(ua_tmp);
David Teigland597d0ca2006-07-12 16:44:04 -05004099 return error;
4100}
4101
4102int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
4103 uint32_t flags, uint32_t lkid)
4104{
4105 struct dlm_lkb *lkb;
4106 struct dlm_args args;
4107 struct dlm_user_args *ua;
4108 int error;
4109
4110 lock_recovery(ls);
4111
4112 error = find_lkb(ls, lkid, &lkb);
4113 if (error)
4114 goto out;
4115
4116 ua = (struct dlm_user_args *)lkb->lkb_astparam;
4117 ua->castparam = ua_tmp->castparam;
Patrick Caulfieldc059f702006-08-23 10:24:03 +01004118 ua->user_lksb = ua_tmp->user_lksb;
David Teigland597d0ca2006-07-12 16:44:04 -05004119
4120 error = set_unlock_args(flags, ua, &args);
4121 if (error)
4122 goto out_put;
4123
4124 error = cancel_lock(ls, lkb, &args);
4125
4126 if (error == -DLM_ECANCEL)
4127 error = 0;
David Teiglandef0c2bb2007-03-28 09:56:46 -05004128 /* from validate_unlock_args() */
4129 if (error == -EBUSY)
4130 error = 0;
David Teigland597d0ca2006-07-12 16:44:04 -05004131 out_put:
4132 dlm_put_lkb(lkb);
4133 out:
4134 unlock_recovery(ls);
David Teiglandef0c2bb2007-03-28 09:56:46 -05004135 kfree(ua_tmp);
David Teigland597d0ca2006-07-12 16:44:04 -05004136 return error;
4137}
4138
David Teiglandef0c2bb2007-03-28 09:56:46 -05004139/* lkb's that are removed from the waiters list by revert are just left on the
4140 orphans list with the granted orphan locks, to be freed by purge */
4141
David Teigland597d0ca2006-07-12 16:44:04 -05004142static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4143{
4144 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
David Teiglandef0c2bb2007-03-28 09:56:46 -05004145 struct dlm_args args;
4146 int error;
David Teigland597d0ca2006-07-12 16:44:04 -05004147
David Teiglandef0c2bb2007-03-28 09:56:46 -05004148 hold_lkb(lkb);
4149 mutex_lock(&ls->ls_orphans_mutex);
4150 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
4151 mutex_unlock(&ls->ls_orphans_mutex);
David Teigland597d0ca2006-07-12 16:44:04 -05004152
David Teiglandef0c2bb2007-03-28 09:56:46 -05004153 set_unlock_args(0, ua, &args);
4154
4155 error = cancel_lock(ls, lkb, &args);
4156 if (error == -DLM_ECANCEL)
4157 error = 0;
4158 return error;
David Teigland597d0ca2006-07-12 16:44:04 -05004159}
4160
4161/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
4162 Regardless of what rsb queue the lock is on, it's removed and freed. */
4163
4164static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
4165{
4166 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
4167 struct dlm_args args;
4168 int error;
4169
David Teigland597d0ca2006-07-12 16:44:04 -05004170 set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
4171
4172 error = unlock_lock(ls, lkb, &args);
4173 if (error == -DLM_EUNLOCK)
4174 error = 0;
4175 return error;
4176}
4177
David Teiglandef0c2bb2007-03-28 09:56:46 -05004178/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
4179 (which does lock_rsb) due to deadlock with receiving a message that does
4180 lock_rsb followed by dlm_user_add_ast() */
4181
4182static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
4183 struct dlm_user_proc *proc)
4184{
4185 struct dlm_lkb *lkb = NULL;
4186
4187 mutex_lock(&ls->ls_clear_proc_locks);
4188 if (list_empty(&proc->locks))
4189 goto out;
4190
4191 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
4192 list_del_init(&lkb->lkb_ownqueue);
4193
4194 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
4195 lkb->lkb_flags |= DLM_IFL_ORPHAN;
4196 else
4197 lkb->lkb_flags |= DLM_IFL_DEAD;
4198 out:
4199 mutex_unlock(&ls->ls_clear_proc_locks);
4200 return lkb;
4201}
4202
David Teigland597d0ca2006-07-12 16:44:04 -05004203/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
4204 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
4205 which we clear here. */
4206
4207/* proc CLOSING flag is set so no more device_reads should look at proc->asts
4208 list, and no more device_writes should add lkb's to proc->locks list; so we
4209 shouldn't need to take asts_spin or locks_spin here. this assumes that
4210 device reads/writes/closes are serialized -- FIXME: we may need to serialize
4211 them ourself. */
4212
4213void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
4214{
4215 struct dlm_lkb *lkb, *safe;
4216
4217 lock_recovery(ls);
David Teigland597d0ca2006-07-12 16:44:04 -05004218
David Teiglandef0c2bb2007-03-28 09:56:46 -05004219 while (1) {
4220 lkb = del_proc_lock(ls, proc);
4221 if (!lkb)
4222 break;
4223 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
David Teigland597d0ca2006-07-12 16:44:04 -05004224 orphan_proc_lock(ls, lkb);
David Teiglandef0c2bb2007-03-28 09:56:46 -05004225 else
David Teigland597d0ca2006-07-12 16:44:04 -05004226 unlock_proc_lock(ls, lkb);
David Teigland597d0ca2006-07-12 16:44:04 -05004227
4228 /* this removes the reference for the proc->locks list
4229 added by dlm_user_request, it may result in the lkb
4230 being freed */
4231
4232 dlm_put_lkb(lkb);
4233 }
David Teiglanda1bc86e2007-01-15 10:34:52 -06004234
David Teiglandef0c2bb2007-03-28 09:56:46 -05004235 mutex_lock(&ls->ls_clear_proc_locks);
4236
David Teiglanda1bc86e2007-01-15 10:34:52 -06004237 /* in-progress unlocks */
4238 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
4239 list_del_init(&lkb->lkb_ownqueue);
4240 lkb->lkb_flags |= DLM_IFL_DEAD;
4241 dlm_put_lkb(lkb);
4242 }
4243
4244 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
4245 list_del(&lkb->lkb_astqueue);
4246 dlm_put_lkb(lkb);
4247 }
4248
David Teigland597d0ca2006-07-12 16:44:04 -05004249 mutex_unlock(&ls->ls_clear_proc_locks);
4250 unlock_recovery(ls);
4251}
David Teiglanda1bc86e2007-01-15 10:34:52 -06004252