blob: 2482c9047505fcc0b2439b0693142f3646793f1a [file] [log] [blame]
David Teigland869d81d2006-01-17 08:47:12 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teigland869d81d2006-01-17 08:47:12 +00008 */
David Teigland29b79982006-01-16 16:52:38 +00009
10#include "lock_dlm.h"
11
12static char junk_lvb[GDLM_LVB_SIZE];
13
Steven Whitehousef3c9d382008-05-21 17:21:42 +010014
15/* convert dlm lock-mode to gfs lock-state */
16
17static s16 gdlm_make_lmstate(s16 dlmmode)
18{
19 switch (dlmmode) {
20 case DLM_LOCK_IV:
21 case DLM_LOCK_NL:
22 return LM_ST_UNLOCKED;
23 case DLM_LOCK_EX:
24 return LM_ST_EXCLUSIVE;
25 case DLM_LOCK_CW:
26 return LM_ST_DEFERRED;
27 case DLM_LOCK_PR:
28 return LM_ST_SHARED;
29 }
30 gdlm_assert(0, "unknown DLM mode %d", dlmmode);
31 return -1;
32}
33
34/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
35 thread gets to it. */
36
37static void queue_submit(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +000038{
39 struct gdlm_ls *ls = lp->ls;
40
David Teigland29b79982006-01-16 16:52:38 +000041 spin_lock(&ls->async_lock);
Steven Whitehousef3c9d382008-05-21 17:21:42 +010042 list_add_tail(&lp->delay_list, &ls->submit);
David Teigland29b79982006-01-16 16:52:38 +000043 spin_unlock(&ls->async_lock);
44 wake_up(&ls->thread_wait);
45}
46
Steven Whitehousef3c9d382008-05-21 17:21:42 +010047static void wake_up_ast(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +000048{
Steven Whitehousef3c9d382008-05-21 17:21:42 +010049 clear_bit(LFL_AST_WAIT, &lp->flags);
50 smp_mb__after_clear_bit();
51 wake_up_bit(&lp->flags, LFL_AST_WAIT);
David Teigland29b79982006-01-16 16:52:38 +000052}
53
Steven Whitehousef3c9d382008-05-21 17:21:42 +010054static void gdlm_delete_lp(struct gdlm_lock *lp)
55{
56 struct gdlm_ls *ls = lp->ls;
57
58 spin_lock(&ls->async_lock);
59 if (!list_empty(&lp->delay_list))
60 list_del_init(&lp->delay_list);
Steven Whitehousef3c9d382008-05-21 17:21:42 +010061 ls->all_locks_count--;
62 spin_unlock(&ls->async_lock);
63
64 kfree(lp);
65}
66
67static void gdlm_queue_delayed(struct gdlm_lock *lp)
68{
69 struct gdlm_ls *ls = lp->ls;
70
71 spin_lock(&ls->async_lock);
72 list_add_tail(&lp->delay_list, &ls->delayed);
73 spin_unlock(&ls->async_lock);
74}
75
76static void process_complete(struct gdlm_lock *lp)
77{
78 struct gdlm_ls *ls = lp->ls;
79 struct lm_async_cb acb;
Steven Whitehousef3c9d382008-05-21 17:21:42 +010080
81 memset(&acb, 0, sizeof(acb));
82
83 if (lp->lksb.sb_status == -DLM_ECANCEL) {
84 log_info("complete dlm cancel %x,%llx flags %lx",
85 lp->lockname.ln_type,
86 (unsigned long long)lp->lockname.ln_number,
87 lp->flags);
88
89 lp->req = lp->cur;
90 acb.lc_ret |= LM_OUT_CANCELED;
91 if (lp->cur == DLM_LOCK_IV)
92 lp->lksb.sb_lkid = 0;
93 goto out;
94 }
95
96 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
97 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
98 log_info("unlock sb_status %d %x,%llx flags %lx",
99 lp->lksb.sb_status, lp->lockname.ln_type,
100 (unsigned long long)lp->lockname.ln_number,
101 lp->flags);
102 return;
103 }
104
105 lp->cur = DLM_LOCK_IV;
106 lp->req = DLM_LOCK_IV;
107 lp->lksb.sb_lkid = 0;
108
109 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
110 gdlm_delete_lp(lp);
111 return;
112 }
113 goto out;
114 }
115
116 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
117 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
118
119 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
120 if (lp->req == DLM_LOCK_PR)
121 lp->req = DLM_LOCK_CW;
122 else if (lp->req == DLM_LOCK_CW)
123 lp->req = DLM_LOCK_PR;
124 }
125
126 /*
127 * A canceled lock request. The lock was just taken off the delayed
128 * list and was never even submitted to dlm.
129 */
130
131 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
132 log_info("complete internal cancel %x,%llx",
133 lp->lockname.ln_type,
134 (unsigned long long)lp->lockname.ln_number);
135 lp->req = lp->cur;
136 acb.lc_ret |= LM_OUT_CANCELED;
137 goto out;
138 }
139
140 /*
141 * An error occured.
142 */
143
144 if (lp->lksb.sb_status) {
145 /* a "normal" error */
146 if ((lp->lksb.sb_status == -EAGAIN) &&
147 (lp->lkf & DLM_LKF_NOQUEUE)) {
148 lp->req = lp->cur;
149 if (lp->cur == DLM_LOCK_IV)
150 lp->lksb.sb_lkid = 0;
151 goto out;
152 }
153
154 /* this could only happen with cancels I think */
155 log_info("ast sb_status %d %x,%llx flags %lx",
156 lp->lksb.sb_status, lp->lockname.ln_type,
157 (unsigned long long)lp->lockname.ln_number,
158 lp->flags);
Steven Whitehouseb2cad262008-06-03 14:34:14 +0100159 return;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100160 }
161
162 /*
163 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
164 */
165
166 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
167 wake_up_ast(lp);
168 return;
169 }
170
171 /*
172 * A lock has been demoted to NL because it initially completed during
173 * BLOCK_LOCKS. Now it must be requested in the originally requested
174 * mode.
175 */
176
177 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
178 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
179 lp->lockname.ln_type,
180 (unsigned long long)lp->lockname.ln_number);
181 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
182 lp->lockname.ln_type,
183 (unsigned long long)lp->lockname.ln_number);
184
185 lp->cur = DLM_LOCK_NL;
186 lp->req = lp->prev_req;
187 lp->prev_req = DLM_LOCK_IV;
188 lp->lkf &= ~DLM_LKF_CONVDEADLK;
189
190 set_bit(LFL_NOCACHE, &lp->flags);
191
192 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
193 !test_bit(LFL_NOBLOCK, &lp->flags))
194 gdlm_queue_delayed(lp);
195 else
196 queue_submit(lp);
197 return;
198 }
199
200 /*
201 * A request is granted during dlm recovery. It may be granted
202 * because the locks of a failed node were cleared. In that case,
203 * there may be inconsistent data beneath this lock and we must wait
204 * for recovery to complete to use it. When gfs recovery is done this
205 * granted lock will be converted to NL and then reacquired in this
206 * granted state.
207 */
208
209 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
210 !test_bit(LFL_NOBLOCK, &lp->flags) &&
211 lp->req != DLM_LOCK_NL) {
212
213 lp->cur = lp->req;
214 lp->prev_req = lp->req;
215 lp->req = DLM_LOCK_NL;
216 lp->lkf |= DLM_LKF_CONVERT;
217 lp->lkf &= ~DLM_LKF_CONVDEADLK;
218
219 log_debug("rereq %x,%llx id %x %d,%d",
220 lp->lockname.ln_type,
221 (unsigned long long)lp->lockname.ln_number,
222 lp->lksb.sb_lkid, lp->cur, lp->req);
223
224 set_bit(LFL_REREQUEST, &lp->flags);
225 queue_submit(lp);
226 return;
227 }
228
229 /*
230 * DLM demoted the lock to NL before it was granted so GFS must be
231 * told it cannot cache data for this lock.
232 */
233
234 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
235 set_bit(LFL_NOCACHE, &lp->flags);
236
237out:
238 /*
239 * This is an internal lock_dlm lock
240 */
241
242 if (test_bit(LFL_INLOCK, &lp->flags)) {
243 clear_bit(LFL_NOBLOCK, &lp->flags);
244 lp->cur = lp->req;
245 wake_up_ast(lp);
246 return;
247 }
248
249 /*
250 * Normal completion of a lock request. Tell GFS it now has the lock.
251 */
252
253 clear_bit(LFL_NOBLOCK, &lp->flags);
254 lp->cur = lp->req;
255
256 acb.lc_name = lp->lockname;
257 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
258
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100259 ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
260}
261
262static void gdlm_ast(void *astarg)
David Teigland29b79982006-01-16 16:52:38 +0000263{
264 struct gdlm_lock *lp = astarg;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100265 clear_bit(LFL_ACTIVE, &lp->flags);
266 process_complete(lp);
267}
268
269static void process_blocking(struct gdlm_lock *lp, int bast_mode)
270{
David Teigland29b79982006-01-16 16:52:38 +0000271 struct gdlm_ls *ls = lp->ls;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100272 unsigned int cb = 0;
273
274 switch (gdlm_make_lmstate(bast_mode)) {
275 case LM_ST_EXCLUSIVE:
276 cb = LM_CB_NEED_E;
277 break;
278 case LM_ST_DEFERRED:
279 cb = LM_CB_NEED_D;
280 break;
281 case LM_ST_SHARED:
282 cb = LM_CB_NEED_S;
283 break;
284 default:
285 gdlm_assert(0, "unknown bast mode %u", bast_mode);
286 }
287
288 ls->fscb(ls->sdp, cb, &lp->lockname);
289}
290
291
292static void gdlm_bast(void *astarg, int mode)
293{
294 struct gdlm_lock *lp = astarg;
David Teigland29b79982006-01-16 16:52:38 +0000295
296 if (!mode) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500297 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
David Teigland9229f012006-05-24 09:21:30 -0400298 lp->lockname.ln_type,
299 (unsigned long long)lp->lockname.ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000300 return;
301 }
302
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100303 process_blocking(lp, mode);
David Teigland29b79982006-01-16 16:52:38 +0000304}
305
306/* convert gfs lock-state to dlm lock-mode */
307
Steven Whitehousecd915492006-09-04 12:49:07 -0400308static s16 make_mode(s16 lmstate)
David Teigland29b79982006-01-16 16:52:38 +0000309{
310 switch (lmstate) {
311 case LM_ST_UNLOCKED:
312 return DLM_LOCK_NL;
313 case LM_ST_EXCLUSIVE:
314 return DLM_LOCK_EX;
315 case LM_ST_DEFERRED:
316 return DLM_LOCK_CW;
317 case LM_ST_SHARED:
318 return DLM_LOCK_PR;
David Teigland29b79982006-01-16 16:52:38 +0000319 }
David Teigland869d81d2006-01-17 08:47:12 +0000320 gdlm_assert(0, "unknown LM state %d", lmstate);
321 return -1;
David Teigland29b79982006-01-16 16:52:38 +0000322}
323
David Teigland29b79982006-01-16 16:52:38 +0000324
325/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
326 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
327
328static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
329{
Steven Whitehousecd915492006-09-04 12:49:07 -0400330 s16 cur = make_mode(cur_state);
David Teigland29b79982006-01-16 16:52:38 +0000331 if (lp->cur != DLM_LOCK_IV)
David Teigland869d81d2006-01-17 08:47:12 +0000332 gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
David Teigland29b79982006-01-16 16:52:38 +0000333}
334
335static inline unsigned int make_flags(struct gdlm_lock *lp,
336 unsigned int gfs_flags,
Steven Whitehousecd915492006-09-04 12:49:07 -0400337 s16 cur, s16 req)
David Teigland29b79982006-01-16 16:52:38 +0000338{
339 unsigned int lkf = 0;
340
341 if (gfs_flags & LM_FLAG_TRY)
342 lkf |= DLM_LKF_NOQUEUE;
343
344 if (gfs_flags & LM_FLAG_TRY_1CB) {
345 lkf |= DLM_LKF_NOQUEUE;
346 lkf |= DLM_LKF_NOQUEUEBAST;
347 }
348
349 if (gfs_flags & LM_FLAG_PRIORITY) {
350 lkf |= DLM_LKF_NOORDER;
351 lkf |= DLM_LKF_HEADQUE;
352 }
353
354 if (gfs_flags & LM_FLAG_ANY) {
355 if (req == DLM_LOCK_PR)
356 lkf |= DLM_LKF_ALTCW;
357 else if (req == DLM_LOCK_CW)
358 lkf |= DLM_LKF_ALTPR;
359 }
360
361 if (lp->lksb.sb_lkid != 0) {
362 lkf |= DLM_LKF_CONVERT;
David Teigland29b79982006-01-16 16:52:38 +0000363 }
364
365 if (lp->lvb)
366 lkf |= DLM_LKF_VALBLK;
367
368 return lkf;
369}
370
371/* make_strname - convert GFS lock numbers to a string */
372
Steven Whitehousef35ac342007-03-18 17:04:15 +0000373static inline void make_strname(const struct lm_lockname *lockname,
David Teigland29b79982006-01-16 16:52:38 +0000374 struct gdlm_strname *str)
375{
David Teigland869d81d2006-01-17 08:47:12 +0000376 sprintf(str->name, "%8x%16llx", lockname->ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400377 (unsigned long long)lockname->ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000378 str->namelen = GDLM_STRNAME_BYTES;
379}
380
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400381static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
382 struct gdlm_lock **lpp)
David Teigland29b79982006-01-16 16:52:38 +0000383{
384 struct gdlm_lock *lp;
385
Josef Bacik16c5f062008-04-09 09:33:41 -0400386 lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
David Teigland29b79982006-01-16 16:52:38 +0000387 if (!lp)
388 return -ENOMEM;
389
David Teigland29b79982006-01-16 16:52:38 +0000390 lp->lockname = *name;
Steven Whitehousef35ac342007-03-18 17:04:15 +0000391 make_strname(name, &lp->strname);
David Teigland29b79982006-01-16 16:52:38 +0000392 lp->ls = ls;
393 lp->cur = DLM_LOCK_IV;
David Teigland29b79982006-01-16 16:52:38 +0000394 INIT_LIST_HEAD(&lp->delay_list);
395
396 spin_lock(&ls->async_lock);
David Teigland29b79982006-01-16 16:52:38 +0000397 ls->all_locks_count++;
398 spin_unlock(&ls->async_lock);
399
400 *lpp = lp;
401 return 0;
402}
403
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400404int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
405 void **lockp)
David Teigland29b79982006-01-16 16:52:38 +0000406{
407 struct gdlm_lock *lp;
408 int error;
409
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400410 error = gdlm_create_lp(lockspace, name, &lp);
David Teigland29b79982006-01-16 16:52:38 +0000411
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400412 *lockp = lp;
David Teigland29b79982006-01-16 16:52:38 +0000413 return error;
414}
415
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400416void gdlm_put_lock(void *lock)
David Teigland29b79982006-01-16 16:52:38 +0000417{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400418 gdlm_delete_lp(lock);
David Teigland29b79982006-01-16 16:52:38 +0000419}
420
David Teigland8d3b35a2006-02-23 10:00:56 +0000421unsigned int gdlm_do_lock(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000422{
423 struct gdlm_ls *ls = lp->ls;
David Teigland29b79982006-01-16 16:52:38 +0000424 int error, bast = 1;
425
426 /*
427 * When recovery is in progress, delay lock requests for submission
428 * once recovery is done. Requests for recovery (NOEXP) and unlocks
429 * can pass.
430 */
431
432 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
433 !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
434 gdlm_queue_delayed(lp);
David Teigland869d81d2006-01-17 08:47:12 +0000435 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000436 }
437
438 /*
439 * Submit the actual lock request.
440 */
441
442 if (test_bit(LFL_NOBAST, &lp->flags))
443 bast = 0;
444
David Teigland29b79982006-01-16 16:52:38 +0000445 set_bit(LFL_ACTIVE, &lp->flags);
446
David Teigland869d81d2006-01-17 08:47:12 +0000447 log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400448 (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
David Teigland29b79982006-01-16 16:52:38 +0000449 lp->cur, lp->req, lp->lkf);
450
451 error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
Steven Whitehousef35ac342007-03-18 17:04:15 +0000452 lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
453 lp, bast ? gdlm_bast : NULL);
David Teigland29b79982006-01-16 16:52:38 +0000454
455 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
456 lp->lksb.sb_status = -EAGAIN;
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100457 gdlm_ast(lp);
David Teigland29b79982006-01-16 16:52:38 +0000458 error = 0;
459 }
460
David Teigland869d81d2006-01-17 08:47:12 +0000461 if (error) {
David Teiglandb9af8a72007-03-28 11:08:04 -0500462 log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
David Teigland869d81d2006-01-17 08:47:12 +0000463 "flags=%lx", ls->fsname, lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400464 (unsigned long long)lp->lockname.ln_number, error,
465 lp->cur, lp->req, lp->lkf, lp->flags);
David Teigland869d81d2006-01-17 08:47:12 +0000466 return LM_OUT_ERROR;
467 }
468 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000469}
470
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400471static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000472{
David Teigland869d81d2006-01-17 08:47:12 +0000473 struct gdlm_ls *ls = lp->ls;
David Teigland29b79982006-01-16 16:52:38 +0000474 unsigned int lkf = 0;
475 int error;
476
477 set_bit(LFL_DLM_UNLOCK, &lp->flags);
478 set_bit(LFL_ACTIVE, &lp->flags);
479
480 if (lp->lvb)
481 lkf = DLM_LKF_VALBLK;
482
David Teigland869d81d2006-01-17 08:47:12 +0000483 log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400484 (unsigned long long)lp->lockname.ln_number,
485 lp->lksb.sb_lkid, lp->cur, lkf);
David Teigland29b79982006-01-16 16:52:38 +0000486
David Teigland869d81d2006-01-17 08:47:12 +0000487 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
David Teigland29b79982006-01-16 16:52:38 +0000488
David Teigland869d81d2006-01-17 08:47:12 +0000489 if (error) {
David Teiglandb9af8a72007-03-28 11:08:04 -0500490 log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
David Teigland869d81d2006-01-17 08:47:12 +0000491 "flags=%lx", ls->fsname, lp->lockname.ln_type,
David Teigland9229f012006-05-24 09:21:30 -0400492 (unsigned long long)lp->lockname.ln_number, error,
493 lp->cur, lp->req, lp->lkf, lp->flags);
David Teigland869d81d2006-01-17 08:47:12 +0000494 return LM_OUT_ERROR;
495 }
496 return LM_OUT_ASYNC;
David Teigland29b79982006-01-16 16:52:38 +0000497}
498
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400499unsigned int gdlm_lock(void *lock, unsigned int cur_state,
David Teigland29b79982006-01-16 16:52:38 +0000500 unsigned int req_state, unsigned int flags)
501{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400502 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000503
Steven Whitehouse6802e342008-05-21 17:03:22 +0100504 if (req_state == LM_ST_UNLOCKED)
505 return gdlm_unlock(lock, cur_state);
506
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100507 if (req_state == LM_ST_UNLOCKED)
508 return gdlm_unlock(lock, cur_state);
509
David Teigland29b79982006-01-16 16:52:38 +0000510 clear_bit(LFL_DLM_CANCEL, &lp->flags);
511 if (flags & LM_FLAG_NOEXP)
512 set_bit(LFL_NOBLOCK, &lp->flags);
513
514 check_cur_state(lp, cur_state);
515 lp->req = make_mode(req_state);
516 lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
517
David Teigland8d3b35a2006-02-23 10:00:56 +0000518 return gdlm_do_lock(lp);
David Teigland29b79982006-01-16 16:52:38 +0000519}
520
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400521unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
David Teigland29b79982006-01-16 16:52:38 +0000522{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400523 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000524
525 clear_bit(LFL_DLM_CANCEL, &lp->flags);
526 if (lp->cur == DLM_LOCK_IV)
527 return 0;
David Teigland869d81d2006-01-17 08:47:12 +0000528 return gdlm_do_unlock(lp);
David Teigland29b79982006-01-16 16:52:38 +0000529}
530
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400531void gdlm_cancel(void *lock)
David Teigland29b79982006-01-16 16:52:38 +0000532{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400533 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000534 struct gdlm_ls *ls = lp->ls;
535 int error, delay_list = 0;
536
537 if (test_bit(LFL_DLM_CANCEL, &lp->flags))
538 return;
539
David Teigland9229f012006-05-24 09:21:30 -0400540 log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
541 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000542
543 spin_lock(&ls->async_lock);
544 if (!list_empty(&lp->delay_list)) {
545 list_del_init(&lp->delay_list);
546 delay_list = 1;
547 }
548 spin_unlock(&ls->async_lock);
549
550 if (delay_list) {
551 set_bit(LFL_CANCEL, &lp->flags);
552 set_bit(LFL_ACTIVE, &lp->flags);
Steven Whitehousef3c9d382008-05-21 17:21:42 +0100553 gdlm_ast(lp);
David Teigland29b79982006-01-16 16:52:38 +0000554 return;
555 }
556
557 if (!test_bit(LFL_ACTIVE, &lp->flags) ||
David Teigland9229f012006-05-24 09:21:30 -0400558 test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
David Teigland869d81d2006-01-17 08:47:12 +0000559 log_info("gdlm_cancel skip %x,%llx flags %lx",
David Teigland9229f012006-05-24 09:21:30 -0400560 lp->lockname.ln_type,
561 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000562 return;
563 }
564
565 /* the lock is blocked in the dlm */
566
567 set_bit(LFL_DLM_CANCEL, &lp->flags);
568 set_bit(LFL_ACTIVE, &lp->flags);
569
570 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
571 NULL, lp);
572
David Teigland869d81d2006-01-17 08:47:12 +0000573 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
David Teigland9229f012006-05-24 09:21:30 -0400574 lp->lockname.ln_type,
575 (unsigned long long)lp->lockname.ln_number, lp->flags);
David Teigland29b79982006-01-16 16:52:38 +0000576
577 if (error == -EBUSY)
578 clear_bit(LFL_DLM_CANCEL, &lp->flags);
579}
580
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400581static int gdlm_add_lvb(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000582{
583 char *lvb;
584
Josef Bacik16c5f062008-04-09 09:33:41 -0400585 lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
David Teigland29b79982006-01-16 16:52:38 +0000586 if (!lvb)
587 return -ENOMEM;
588
David Teigland29b79982006-01-16 16:52:38 +0000589 lp->lksb.sb_lvbptr = lvb;
590 lp->lvb = lvb;
591 return 0;
592}
593
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400594static void gdlm_del_lvb(struct gdlm_lock *lp)
David Teigland29b79982006-01-16 16:52:38 +0000595{
596 kfree(lp->lvb);
597 lp->lvb = NULL;
598 lp->lksb.sb_lvbptr = NULL;
599}
600
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100601static int gdlm_ast_wait(void *word)
602{
603 schedule();
604 return 0;
605}
606
David Teigland29b79982006-01-16 16:52:38 +0000607/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
608 the completion) because gfs won't call hold_lvb() during a callback (from
609 the context of a lock_dlm thread). */
610
611static int hold_null_lock(struct gdlm_lock *lp)
612{
613 struct gdlm_lock *lpn = NULL;
614 int error;
615
616 if (lp->hold_null) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500617 printk(KERN_INFO "lock_dlm: lvb already held\n");
David Teigland29b79982006-01-16 16:52:38 +0000618 return 0;
619 }
620
621 error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
622 if (error)
623 goto out;
624
625 lpn->lksb.sb_lvbptr = junk_lvb;
626 lpn->lvb = junk_lvb;
627
628 lpn->req = DLM_LOCK_NL;
629 lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
630 set_bit(LFL_NOBAST, &lpn->flags);
631 set_bit(LFL_INLOCK, &lpn->flags);
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100632 set_bit(LFL_AST_WAIT, &lpn->flags);
David Teigland29b79982006-01-16 16:52:38 +0000633
David Teigland8d3b35a2006-02-23 10:00:56 +0000634 gdlm_do_lock(lpn);
Steven Whitehouse41d7db02007-05-14 17:43:26 +0100635 wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
David Teiglandc5921fd2006-07-20 09:06:34 -0500636 error = lpn->lksb.sb_status;
David Teigland29b79982006-01-16 16:52:38 +0000637 if (error) {
Steven Whitehoused92a8d42006-02-27 10:57:14 -0500638 printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
639 error);
David Teigland29b79982006-01-16 16:52:38 +0000640 gdlm_delete_lp(lpn);
641 lpn = NULL;
642 }
Steven Whitehousea91ea692006-09-04 12:04:26 -0400643out:
David Teigland29b79982006-01-16 16:52:38 +0000644 lp->hold_null = lpn;
645 return error;
646}
647
648/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
649 the completion) because gfs may call unhold_lvb() during a callback (from
650 the context of a lock_dlm thread) which could cause a deadlock since the
651 other lock_dlm thread could be engaged in recovery. */
652
653static void unhold_null_lock(struct gdlm_lock *lp)
654{
655 struct gdlm_lock *lpn = lp->hold_null;
656
David Teigland9229f012006-05-24 09:21:30 -0400657 gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
658 (unsigned long long)lp->lockname.ln_number);
David Teigland29b79982006-01-16 16:52:38 +0000659 lpn->lksb.sb_lvbptr = NULL;
660 lpn->lvb = NULL;
661 set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
662 gdlm_do_unlock(lpn);
663 lp->hold_null = NULL;
664}
665
666/* Acquire a NL lock because gfs requires the value block to remain
667 intact on the resource while the lvb is "held" even if it's holding no locks
668 on the resource. */
669
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400670int gdlm_hold_lvb(void *lock, char **lvbp)
David Teigland29b79982006-01-16 16:52:38 +0000671{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400672 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000673 int error;
674
675 error = gdlm_add_lvb(lp);
676 if (error)
677 return error;
678
679 *lvbp = lp->lvb;
680
681 error = hold_null_lock(lp);
682 if (error)
683 gdlm_del_lvb(lp);
684
685 return error;
686}
687
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400688void gdlm_unhold_lvb(void *lock, char *lvb)
David Teigland29b79982006-01-16 16:52:38 +0000689{
Steven Whitehouse9b47c112006-09-08 10:17:58 -0400690 struct gdlm_lock *lp = lock;
David Teigland29b79982006-01-16 16:52:38 +0000691
692 unhold_null_lock(lp);
693 gdlm_del_lvb(lp);
694}
695
David Teigland29b79982006-01-16 16:52:38 +0000696void gdlm_submit_delayed(struct gdlm_ls *ls)
697{
698 struct gdlm_lock *lp, *safe;
699
700 spin_lock(&ls->async_lock);
701 list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
702 list_del_init(&lp->delay_list);
703 list_add_tail(&lp->delay_list, &ls->submit);
704 }
705 spin_unlock(&ls->async_lock);
706 wake_up(&ls->thread_wait);
707}
708