blob: cdeafb4e7ed60d496200cd8a78806c874e9d8c62 [file] [log] [blame]
Kurt Hackel6714d8e2005-12-15 14:31:23 -08001/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmconvert.c
5 *
6 * underlying calls for lock conversion
7 *
8 * Copyright (C) 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 *
25 */
26
27
28#include <linux/module.h>
29#include <linux/fs.h>
30#include <linux/types.h>
Kurt Hackel6714d8e2005-12-15 14:31:23 -080031#include <linux/highmem.h>
Kurt Hackel6714d8e2005-12-15 14:31:23 -080032#include <linux/init.h>
33#include <linux/sysctl.h>
34#include <linux/random.h>
35#include <linux/blkdev.h>
36#include <linux/socket.h>
37#include <linux/inet.h>
38#include <linux/spinlock.h>
39
40
41#include "cluster/heartbeat.h"
42#include "cluster/nodemanager.h"
43#include "cluster/tcp.h"
44
45#include "dlmapi.h"
46#include "dlmcommon.h"
47
48#include "dlmconvert.h"
49
50#define MLOG_MASK_PREFIX ML_DLM
51#include "cluster/masklog.h"
52
53/* NOTE: __dlmconvert_master is the only function in here that
54 * needs a spinlock held on entry (res->spinlock) and it is the
55 * only one that holds a lock on exit (res->spinlock).
56 * All other functions in here need no locks and drop all of
57 * the locks that they acquire. */
58static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
59 struct dlm_lock_resource *res,
60 struct dlm_lock *lock, int flags,
61 int type, int *call_ast,
62 int *kick_thread);
63static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
64 struct dlm_lock_resource *res,
65 struct dlm_lock *lock, int flags, int type);
66
67/*
68 * this is only called directly by dlmlock(), and only when the
69 * local node is the owner of the lockres
70 * locking:
71 * caller needs: none
72 * taken: takes and drops res->spinlock
73 * held on exit: none
74 * returns: see __dlmconvert_master
75 */
76enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm,
77 struct dlm_lock_resource *res,
78 struct dlm_lock *lock, int flags, int type)
79{
80 int call_ast = 0, kick_thread = 0;
81 enum dlm_status status;
82
83 spin_lock(&res->spinlock);
84 /* we are not in a network handler, this is fine */
85 __dlm_wait_on_lockres(res);
86 __dlm_lockres_reserve_ast(res);
87 res->state |= DLM_LOCK_RES_IN_PROGRESS;
88
89 status = __dlmconvert_master(dlm, res, lock, flags, type,
90 &call_ast, &kick_thread);
91
92 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
93 spin_unlock(&res->spinlock);
94 wake_up(&res->wq);
95 if (status != DLM_NORMAL && status != DLM_NOTQUEUED)
96 dlm_error(status);
97
98 /* either queue the ast or release it */
99 if (call_ast)
100 dlm_queue_ast(dlm, lock);
101 else
102 dlm_lockres_release_ast(dlm, res);
103
104 if (kick_thread)
105 dlm_kick_thread(dlm, res);
106
107 return status;
108}
109
110/* performs lock conversion at the lockres master site
111 * locking:
112 * caller needs: res->spinlock
113 * taken: takes and drops lock->spinlock
114 * held on exit: res->spinlock
115 * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED
116 * call_ast: whether ast should be called for this lock
117 * kick_thread: whether dlm_kick_thread should be called
118 */
119static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm,
120 struct dlm_lock_resource *res,
121 struct dlm_lock *lock, int flags,
122 int type, int *call_ast,
123 int *kick_thread)
124{
125 enum dlm_status status = DLM_NORMAL;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800126 struct dlm_lock *tmplock=NULL;
127
128 assert_spin_locked(&res->spinlock);
129
Tao Maef6b6892011-02-21 11:10:44 +0800130 mlog(0, "type=%d, convert_type=%d, new convert_type=%d\n",
131 lock->ml.type, lock->ml.convert_type, type);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800132
133 spin_lock(&lock->spinlock);
134
135 /* already converting? */
136 if (lock->ml.convert_type != LKM_IVMODE) {
137 mlog(ML_ERROR, "attempted to convert a lock with a lock "
138 "conversion pending\n");
139 status = DLM_DENIED;
140 goto unlock_exit;
141 }
142
143 /* must be on grant queue to convert */
144 if (!dlm_lock_on_list(&res->granted, lock)) {
145 mlog(ML_ERROR, "attempted to convert a lock not on grant "
146 "queue\n");
147 status = DLM_DENIED;
148 goto unlock_exit;
149 }
150
151 if (flags & LKM_VALBLK) {
152 switch (lock->ml.type) {
153 case LKM_EXMODE:
154 /* EX + LKM_VALBLK + convert == set lvb */
155 mlog(0, "will set lvb: converting %s->%s\n",
156 dlm_lock_mode_name(lock->ml.type),
157 dlm_lock_mode_name(type));
158 lock->lksb->flags |= DLM_LKSB_PUT_LVB;
159 break;
160 case LKM_PRMODE:
161 case LKM_NLMODE:
162 /* refetch if new level is not NL */
163 if (type > LKM_NLMODE) {
164 mlog(0, "will fetch new value into "
165 "lvb: converting %s->%s\n",
166 dlm_lock_mode_name(lock->ml.type),
167 dlm_lock_mode_name(type));
168 lock->lksb->flags |= DLM_LKSB_GET_LVB;
169 } else {
170 mlog(0, "will NOT fetch new value "
171 "into lvb: converting %s->%s\n",
172 dlm_lock_mode_name(lock->ml.type),
173 dlm_lock_mode_name(type));
174 flags &= ~(LKM_VALBLK);
175 }
176 break;
177 }
178 }
179
180
181 /* in-place downconvert? */
182 if (type <= lock->ml.type)
183 goto grant;
184
185 /* upconvert from here on */
186 status = DLM_NORMAL;
Dong Fangdf53cd32013-09-11 14:19:50 -0700187 list_for_each_entry(tmplock, &res->granted, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800188 if (tmplock == lock)
189 continue;
190 if (!dlm_lock_compatible(tmplock->ml.type, type))
191 goto switch_queues;
192 }
193
Dong Fangdf53cd32013-09-11 14:19:50 -0700194 list_for_each_entry(tmplock, &res->converting, list) {
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800195 if (!dlm_lock_compatible(tmplock->ml.type, type))
196 goto switch_queues;
197 /* existing conversion requests take precedence */
198 if (!dlm_lock_compatible(tmplock->ml.convert_type, type))
199 goto switch_queues;
200 }
201
202 /* fall thru to grant */
203
204grant:
205 mlog(0, "res %.*s, granting %s lock\n", res->lockname.len,
206 res->lockname.name, dlm_lock_mode_name(type));
207 /* immediately grant the new lock type */
208 lock->lksb->status = DLM_NORMAL;
209 if (lock->ml.node == dlm->node_num)
210 mlog(0, "doing in-place convert for nonlocal lock\n");
211 lock->ml.type = type;
Mark Fashehc0a85202006-04-27 19:07:45 -0700212 if (lock->lksb->flags & DLM_LKSB_PUT_LVB)
213 memcpy(res->lvb, lock->lksb->lvb, DLM_LVB_LEN);
214
xuejiufeie5054c92016-03-25 14:21:38 -0700215 /*
216 * Move the lock to the tail because it may be the only lock which has
217 * an invalid lvb.
218 */
219 list_move_tail(&lock->list, &res->granted);
220
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800221 status = DLM_NORMAL;
222 *call_ast = 1;
223 goto unlock_exit;
224
225switch_queues:
226 if (flags & LKM_NOQUEUE) {
227 mlog(0, "failed to convert NOQUEUE lock %.*s from "
228 "%d to %d...\n", res->lockname.len, res->lockname.name,
229 lock->ml.type, type);
230 status = DLM_NOTQUEUED;
231 goto unlock_exit;
232 }
233 mlog(0, "res %.*s, queueing...\n", res->lockname.len,
234 res->lockname.name);
235
236 lock->ml.convert_type = type;
237 /* do not alter lock refcount. switching lists. */
Akinobu Mitaf1166292006-06-26 00:24:46 -0700238 list_move_tail(&lock->list, &res->converting);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800239
240unlock_exit:
241 spin_unlock(&lock->spinlock);
242 if (status == DLM_DENIED) {
243 __dlm_print_one_lock_resource(res);
244 }
245 if (status == DLM_NORMAL)
246 *kick_thread = 1;
247 return status;
248}
249
250void dlm_revert_pending_convert(struct dlm_lock_resource *res,
251 struct dlm_lock *lock)
252{
253 /* do not alter lock refcount. switching lists. */
Akinobu Mitaf1166292006-06-26 00:24:46 -0700254 list_move_tail(&lock->list, &res->granted);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800255 lock->ml.convert_type = LKM_IVMODE;
256 lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
257}
258
259/* messages the master site to do lock conversion
260 * locking:
261 * caller needs: none
262 * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS
263 * held on exit: none
264 * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node
265 */
266enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
267 struct dlm_lock_resource *res,
268 struct dlm_lock *lock, int flags, int type)
269{
270 enum dlm_status status;
Joseph Qiac7cf242016-03-25 14:21:26 -0700271 u8 old_owner = res->owner;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800272
273 mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
274 lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
275
276 spin_lock(&res->spinlock);
277 if (res->state & DLM_LOCK_RES_RECOVERING) {
278 mlog(0, "bailing out early since res is RECOVERING "
279 "on secondary queue\n");
280 /* __dlm_print_one_lock_resource(res); */
281 status = DLM_RECOVERING;
282 goto bail;
283 }
284 /* will exit this call with spinlock held */
285 __dlm_wait_on_lockres(res);
286
287 if (lock->ml.convert_type != LKM_IVMODE) {
288 __dlm_print_one_lock_resource(res);
289 mlog(ML_ERROR, "converting a remote lock that is already "
Kurt Hackel29004852006-03-02 16:43:36 -0800290 "converting! (cookie=%u:%llu, conv=%d)\n",
Kurt Hackel74aa2582007-01-17 15:11:36 -0800291 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
292 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
Kurt Hackel29004852006-03-02 16:43:36 -0800293 lock->ml.convert_type);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800294 status = DLM_DENIED;
295 goto bail;
296 }
Joseph Qibe12b292016-03-25 14:21:29 -0700297
298 if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
299 mlog(0, "last convert request returned DLM_RECOVERING, but "
300 "owner has already queued and sent ast to me. res %.*s, "
301 "(cookie=%u:%llu, type=%d, conv=%d)\n",
302 res->lockname.len, res->lockname.name,
303 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
304 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
305 lock->ml.type, lock->ml.convert_type);
306 status = DLM_NORMAL;
307 goto bail;
308 }
309
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800310 res->state |= DLM_LOCK_RES_IN_PROGRESS;
311 /* move lock to local convert queue */
312 /* do not alter lock refcount. switching lists. */
Akinobu Mitaf1166292006-06-26 00:24:46 -0700313 list_move_tail(&lock->list, &res->converting);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800314 lock->convert_pending = 1;
315 lock->ml.convert_type = type;
316
317 if (flags & LKM_VALBLK) {
318 if (lock->ml.type == LKM_EXMODE) {
319 flags |= LKM_PUT_LVB;
320 lock->lksb->flags |= DLM_LKSB_PUT_LVB;
321 } else {
322 if (lock->ml.convert_type == LKM_NLMODE)
323 flags &= ~LKM_VALBLK;
324 else {
325 flags |= LKM_GET_LVB;
326 lock->lksb->flags |= DLM_LKSB_GET_LVB;
327 }
328 }
329 }
330 spin_unlock(&res->spinlock);
331
332 /* no locks held here.
333 * need to wait for a reply as to whether it got queued or not. */
334 status = dlm_send_remote_convert_request(dlm, res, lock, flags, type);
335
336 spin_lock(&res->spinlock);
337 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
338 lock->convert_pending = 0;
Joseph Qiac7cf242016-03-25 14:21:26 -0700339 /* if it failed, move it back to granted queue.
340 * if master returns DLM_NORMAL and then down before sending ast,
341 * it may have already been moved to granted queue, reset to
342 * DLM_RECOVERING and retry convert */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800343 if (status != DLM_NORMAL) {
344 if (status != DLM_NOTQUEUED)
345 dlm_error(status);
346 dlm_revert_pending_convert(res, lock);
Joseph Qiac7cf242016-03-25 14:21:26 -0700347 } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
348 (old_owner != res->owner)) {
349 mlog(0, "res %.*s is in recovering or has been recovered.\n",
350 res->lockname.len, res->lockname.name);
351 status = DLM_RECOVERING;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800352 }
353bail:
354 spin_unlock(&res->spinlock);
355
356 /* TODO: should this be a wake_one? */
357 /* wake up any IN_PROGRESS waiters */
358 wake_up(&res->wq);
359
360 return status;
361}
362
363/* sends DLM_CONVERT_LOCK_MSG to master site
364 * locking:
365 * caller needs: none
366 * taken: none
367 * held on exit: none
368 * returns: DLM_NOLOCKMGR, status from remote node
369 */
370static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
371 struct dlm_lock_resource *res,
372 struct dlm_lock *lock, int flags, int type)
373{
374 struct dlm_convert_lock convert;
375 int tmpret;
376 enum dlm_status ret;
377 int status = 0;
378 struct kvec vec[2];
379 size_t veclen = 1;
380
Tao Maef6b6892011-02-21 11:10:44 +0800381 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800382
383 memset(&convert, 0, sizeof(struct dlm_convert_lock));
384 convert.node_idx = dlm->node_num;
385 convert.requested_type = type;
386 convert.cookie = lock->ml.cookie;
387 convert.namelen = res->lockname.len;
388 convert.flags = cpu_to_be32(flags);
389 memcpy(convert.name, res->lockname.name, convert.namelen);
390
391 vec[0].iov_len = sizeof(struct dlm_convert_lock);
392 vec[0].iov_base = &convert;
393
394 if (flags & LKM_PUT_LVB) {
395 /* extra data to send if we are updating lvb */
396 vec[1].iov_len = DLM_LVB_LEN;
397 vec[1].iov_base = lock->lksb->lvb;
398 veclen++;
399 }
400
401 tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key,
402 vec, veclen, res->owner, &status);
403 if (tmpret >= 0) {
404 // successfully sent and received
405 ret = status; // this is already a dlm_status
406 if (ret == DLM_RECOVERING) {
407 mlog(0, "node %u returned DLM_RECOVERING from convert "
408 "message!\n", res->owner);
409 } else if (ret == DLM_MIGRATING) {
410 mlog(0, "node %u returned DLM_MIGRATING from convert "
411 "message!\n", res->owner);
412 } else if (ret == DLM_FORWARD) {
413 mlog(0, "node %u returned DLM_FORWARD from convert "
414 "message!\n", res->owner);
415 } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED)
416 dlm_error(ret);
417 } else {
Wengang Wanga5196ec2010-03-30 12:09:22 +0800418 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
419 "node %u\n", tmpret, DLM_CONVERT_LOCK_MSG, dlm->key,
420 res->owner);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800421 if (dlm_is_host_down(tmpret)) {
Kurt Hackel44465a72006-01-18 17:05:38 -0800422 /* instead of logging the same network error over
423 * and over, sleep here and wait for the heartbeat
424 * to notice the node is dead. times out after 5s. */
Sunil Mushran2bd63212010-01-25 16:57:38 -0800425 dlm_wait_for_node_death(dlm, res->owner,
Kurt Hackel44465a72006-01-18 17:05:38 -0800426 DLM_NODE_DEATH_WAIT_MAX);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800427 ret = DLM_RECOVERING;
428 mlog(0, "node %u died so returning DLM_RECOVERING "
429 "from convert message!\n", res->owner);
430 } else {
431 ret = dlm_err_to_dlm_status(tmpret);
432 }
433 }
434
435 return ret;
436}
437
438/* handler for DLM_CONVERT_LOCK_MSG on master site
439 * locking:
440 * caller needs: none
441 * taken: takes and drop res->spinlock
442 * held on exit: none
443 * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS,
444 * status from __dlmconvert_master
445 */
Kurt Hackeld74c9802007-01-17 17:04:25 -0800446int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
447 void **ret_data)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800448{
449 struct dlm_ctxt *dlm = data;
450 struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf;
451 struct dlm_lock_resource *res = NULL;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800452 struct dlm_lock *lock = NULL;
Dong Fangdf53cd32013-09-11 14:19:50 -0700453 struct dlm_lock *tmp_lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800454 struct dlm_lockstatus *lksb;
455 enum dlm_status status = DLM_NORMAL;
456 u32 flags;
Kurt Hackela6fa3642007-01-17 14:59:12 -0800457 int call_ast = 0, kick_thread = 0, ast_reserved = 0, wake = 0;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800458
459 if (!dlm_grab(dlm)) {
460 dlm_error(DLM_REJECTED);
461 return DLM_REJECTED;
462 }
463
464 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
465 "Domain %s not fully joined!\n", dlm->name);
466
467 if (cnv->namelen > DLM_LOCKID_NAME_MAX) {
468 status = DLM_IVBUFLEN;
469 dlm_error(status);
470 goto leave;
471 }
472
473 flags = be32_to_cpu(cnv->flags);
474
475 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
476 (LKM_PUT_LVB|LKM_GET_LVB)) {
477 mlog(ML_ERROR, "both PUT and GET lvb specified\n");
478 status = DLM_BADARGS;
479 goto leave;
480 }
481
482 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
483 (flags & LKM_GET_LVB ? "get lvb" : "none"));
484
485 status = DLM_IVLOCKID;
486 res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen);
487 if (!res) {
488 dlm_error(status);
489 goto leave;
490 }
491
492 spin_lock(&res->spinlock);
Kurt Hackelb2205322006-05-01 14:29:28 -0700493 status = __dlm_lockres_state_to_status(res);
494 if (status != DLM_NORMAL) {
495 spin_unlock(&res->spinlock);
496 dlm_error(status);
497 goto leave;
498 }
Dong Fangdf53cd32013-09-11 14:19:50 -0700499 list_for_each_entry(tmp_lock, &res->granted, list) {
500 if (tmp_lock->ml.cookie == cnv->cookie &&
501 tmp_lock->ml.node == cnv->node_idx) {
502 lock = tmp_lock;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800503 dlm_lock_get(lock);
504 break;
505 }
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800506 }
507 spin_unlock(&res->spinlock);
508 if (!lock) {
509 status = DLM_IVLOCKID;
Kurt Hackel90aaaf1c2007-01-17 15:01:45 -0800510 mlog(ML_ERROR, "did not find lock to convert on grant queue! "
511 "cookie=%u:%llu\n",
Kurt Hackel74aa2582007-01-17 15:11:36 -0800512 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
513 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
Tao Ma2af37ce2008-02-28 10:41:55 +0800514 dlm_print_one_lock_resource(res);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800515 goto leave;
516 }
517
518 /* found the lock */
519 lksb = lock->lksb;
520
521 /* see if caller needed to get/put lvb */
522 if (flags & LKM_PUT_LVB) {
523 BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
524 lksb->flags |= DLM_LKSB_PUT_LVB;
525 memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN);
526 } else if (flags & LKM_GET_LVB) {
527 BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
528 lksb->flags |= DLM_LKSB_GET_LVB;
529 }
530
531 spin_lock(&res->spinlock);
532 status = __dlm_lockres_state_to_status(res);
533 if (status == DLM_NORMAL) {
534 __dlm_lockres_reserve_ast(res);
Kurt Hackele2b5e452006-01-18 17:02:56 -0800535 ast_reserved = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800536 res->state |= DLM_LOCK_RES_IN_PROGRESS;
537 status = __dlmconvert_master(dlm, res, lock, flags,
538 cnv->requested_type,
539 &call_ast, &kick_thread);
540 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
Kurt Hackela6fa3642007-01-17 14:59:12 -0800541 wake = 1;
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800542 }
543 spin_unlock(&res->spinlock);
Kurt Hackela6fa3642007-01-17 14:59:12 -0800544 if (wake)
545 wake_up(&res->wq);
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800546
547 if (status != DLM_NORMAL) {
548 if (status != DLM_NOTQUEUED)
549 dlm_error(status);
550 lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB);
551 }
552
553leave:
Kurt Hackel90aaaf1c2007-01-17 15:01:45 -0800554 if (lock)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800555 dlm_lock_put(lock);
556
Kurt Hackele2b5e452006-01-18 17:02:56 -0800557 /* either queue the ast or release it, if reserved */
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800558 if (call_ast)
559 dlm_queue_ast(dlm, lock);
Kurt Hackele2b5e452006-01-18 17:02:56 -0800560 else if (ast_reserved)
Kurt Hackel6714d8e2005-12-15 14:31:23 -0800561 dlm_lockres_release_ast(dlm, res);
562
563 if (kick_thread)
564 dlm_kick_thread(dlm, res);
565
566 if (res)
567 dlm_lockres_put(res);
568
569 dlm_put(dlm);
570
571 return status;
572}