blob: 8dad6b09371660ff66d5fb17dba7c6e38bbd0d53 [file] [log] [blame]
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
David Teiglande0c2a9a2012-01-09 17:18:05 -05003 * Copyright 2004-2011 Red Hat, Inc.
Steven Whitehousef057f6c2009-01-12 10:43:39 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/fs.h>
11#include <linux/dlm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Steven Whitehousef057f6c2009-01-12 10:43:39 +000013#include <linux/types.h>
David Teiglande0c2a9a2012-01-09 17:18:05 -050014#include <linux/delay.h>
Steven Whitehousef057f6c2009-01-12 10:43:39 +000015#include <linux/gfs2_ondisk.h>
16
17#include "incore.h"
18#include "glock.h"
19#include "util.h"
David Teiglande0c2a9a2012-01-09 17:18:05 -050020#include "sys.h"
Steven Whitehousea2457692012-01-20 10:38:36 +000021#include "trace_gfs2.h"
Steven Whitehousef057f6c2009-01-12 10:43:39 +000022
David Teiglande0c2a9a2012-01-09 17:18:05 -050023extern struct workqueue_struct *gfs2_control_wq;
Steven Whitehousef057f6c2009-01-12 10:43:39 +000024
Steven Whitehousea2457692012-01-20 10:38:36 +000025/**
26 * gfs2_update_stats - Update time based stats
27 * @mv: Pointer to mean/variance structure to update
28 * @sample: New data to include
29 *
30 * @delta is the difference between the current rtt sample and the
31 * running average srtt. We add 1/8 of that to the srtt in order to
32 * update the current srtt estimate. The varience estimate is a bit
33 * more complicated. We subtract the abs value of the @delta from
34 * the current variance estimate and add 1/4 of that to the running
35 * total.
36 *
37 * Note that the index points at the array entry containing the smoothed
38 * mean value, and the variance is always in the following entry
39 *
40 * Reference: TCP/IP Illustrated, vol 2, p. 831,832
41 * All times are in units of integer nanoseconds. Unlike the TCP/IP case,
42 * they are not scaled fixed point.
43 */
44
45static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
46 s64 sample)
47{
48 s64 delta = sample - s->stats[index];
49 s->stats[index] += (delta >> 3);
50 index++;
51 s->stats[index] += ((abs64(delta) - s->stats[index]) >> 2);
52}
53
54/**
55 * gfs2_update_reply_times - Update locking statistics
56 * @gl: The glock to update
57 *
58 * This assumes that gl->gl_dstamp has been set earlier.
59 *
60 * The rtt (lock round trip time) is an estimate of the time
61 * taken to perform a dlm lock request. We update it on each
62 * reply from the dlm.
63 *
64 * The blocking flag is set on the glock for all dlm requests
65 * which may potentially block due to lock requests from other nodes.
66 * DLM requests where the current lock state is exclusive, the
67 * requested state is null (or unlocked) or where the TRY or
68 * TRY_1CB flags are set are classified as non-blocking. All
69 * other DLM requests are counted as (potentially) blocking.
70 */
71static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
72{
73 struct gfs2_pcpu_lkstats *lks;
74 const unsigned gltype = gl->gl_name.ln_type;
75 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
76 GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
77 s64 rtt;
78
79 preempt_disable();
80 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
81 lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats);
82 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */
83 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */
84 preempt_enable();
85
86 trace_gfs2_glock_lock_time(gl, rtt);
87}
88
89/**
90 * gfs2_update_request_times - Update locking statistics
91 * @gl: The glock to update
92 *
93 * The irt (lock inter-request times) measures the average time
94 * between requests to the dlm. It is updated immediately before
95 * each dlm call.
96 */
97
98static inline void gfs2_update_request_times(struct gfs2_glock *gl)
99{
100 struct gfs2_pcpu_lkstats *lks;
101 const unsigned gltype = gl->gl_name.ln_type;
102 ktime_t dstamp;
103 s64 irt;
104
105 preempt_disable();
106 dstamp = gl->gl_dstamp;
107 gl->gl_dstamp = ktime_get_real();
108 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
109 lks = this_cpu_ptr(gl->gl_sbd->sd_lkstats);
110 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */
111 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */
112 preempt_enable();
113}
114
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000115static void gdlm_ast(void *arg)
116{
117 struct gfs2_glock *gl = arg;
118 unsigned ret = gl->gl_state;
119
Steven Whitehousea2457692012-01-20 10:38:36 +0000120 gfs2_update_reply_times(gl);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000121 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
122
David Teigland4e2f8842012-11-14 13:47:37 -0500123 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
124 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000125
126 switch (gl->gl_lksb.sb_status) {
127 case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
Steven Whitehousefc0e38d2011-03-09 10:58:04 +0000128 gfs2_glock_free(gl);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000129 return;
130 case -DLM_ECANCEL: /* Cancel while getting lock */
131 ret |= LM_OUT_CANCELED;
132 goto out;
133 case -EAGAIN: /* Try lock fails */
Steven Whitehouse1fea7c22010-09-08 10:09:25 +0100134 case -EDEADLK: /* Deadlock detected */
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000135 goto out;
Steven Whitehouse1fea7c22010-09-08 10:09:25 +0100136 case -ETIMEDOUT: /* Canceled due to timeout */
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000137 ret |= LM_OUT_ERROR;
138 goto out;
139 case 0: /* Success */
140 break;
141 default: /* Something unexpected */
142 BUG();
143 }
144
Benjamin Marzinski02ffad082009-03-06 10:03:20 -0600145 ret = gl->gl_req;
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000146 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
Benjamin Marzinski02ffad082009-03-06 10:03:20 -0600147 if (gl->gl_req == LM_ST_SHARED)
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000148 ret = LM_ST_DEFERRED;
Benjamin Marzinski02ffad082009-03-06 10:03:20 -0600149 else if (gl->gl_req == LM_ST_DEFERRED)
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000150 ret = LM_ST_SHARED;
151 else
152 BUG();
153 }
154
155 set_bit(GLF_INITIAL, &gl->gl_flags);
156 gfs2_glock_complete(gl, ret);
157 return;
158out:
159 if (!test_bit(GLF_INITIAL, &gl->gl_flags))
160 gl->gl_lksb.sb_lkid = 0;
161 gfs2_glock_complete(gl, ret);
162}
163
164static void gdlm_bast(void *arg, int mode)
165{
166 struct gfs2_glock *gl = arg;
167
168 switch (mode) {
169 case DLM_LOCK_EX:
170 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
171 break;
172 case DLM_LOCK_CW:
173 gfs2_glock_cb(gl, LM_ST_DEFERRED);
174 break;
175 case DLM_LOCK_PR:
176 gfs2_glock_cb(gl, LM_ST_SHARED);
177 break;
178 default:
179 printk(KERN_ERR "unknown bast mode %d", mode);
180 BUG();
181 }
182}
183
184/* convert gfs lock-state to dlm lock-mode */
185
186static int make_mode(const unsigned int lmstate)
187{
188 switch (lmstate) {
189 case LM_ST_UNLOCKED:
190 return DLM_LOCK_NL;
191 case LM_ST_EXCLUSIVE:
192 return DLM_LOCK_EX;
193 case LM_ST_DEFERRED:
194 return DLM_LOCK_CW;
195 case LM_ST_SHARED:
196 return DLM_LOCK_PR;
197 }
198 printk(KERN_ERR "unknown LM state %d", lmstate);
199 BUG();
200 return -1;
201}
202
Bob Peterson4c569a72012-04-10 14:45:24 -0400203static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000204 const int req)
205{
David Teiglanddba2d702012-11-14 13:46:53 -0500206 u32 lkf = 0;
207
David Teigland4e2f8842012-11-14 13:47:37 -0500208 if (gl->gl_lksb.sb_lvbptr)
David Teiglanddba2d702012-11-14 13:46:53 -0500209 lkf |= DLM_LKF_VALBLK;
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000210
211 if (gfs_flags & LM_FLAG_TRY)
212 lkf |= DLM_LKF_NOQUEUE;
213
214 if (gfs_flags & LM_FLAG_TRY_1CB) {
215 lkf |= DLM_LKF_NOQUEUE;
216 lkf |= DLM_LKF_NOQUEUEBAST;
217 }
218
219 if (gfs_flags & LM_FLAG_PRIORITY) {
220 lkf |= DLM_LKF_NOORDER;
221 lkf |= DLM_LKF_HEADQUE;
222 }
223
224 if (gfs_flags & LM_FLAG_ANY) {
225 if (req == DLM_LOCK_PR)
226 lkf |= DLM_LKF_ALTCW;
227 else if (req == DLM_LOCK_CW)
228 lkf |= DLM_LKF_ALTPR;
229 else
230 BUG();
231 }
232
David Teiglanddba2d702012-11-14 13:46:53 -0500233 if (gl->gl_lksb.sb_lkid != 0) {
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000234 lkf |= DLM_LKF_CONVERT;
Bob Peterson4c569a72012-04-10 14:45:24 -0400235 if (test_bit(GLF_BLOCKING, &gl->gl_flags))
236 lkf |= DLM_LKF_QUECVT;
237 }
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000238
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000239 return lkf;
240}
241
Steven Whitehousea2457692012-01-20 10:38:36 +0000242static void gfs2_reverse_hex(char *c, u64 value)
243{
244 while (value) {
245 *c-- = hex_asc[value & 0x0f];
246 value >>= 4;
247 }
248}
249
Steven Whitehouse921169c2010-11-29 12:50:38 +0000250static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
251 unsigned int flags)
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000252{
253 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000254 int req;
255 u32 lkf;
Steven Whitehousea2457692012-01-20 10:38:36 +0000256 char strname[GDLM_STRNAME_BYTES] = "";
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000257
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000258 req = make_mode(req_state);
Bob Peterson4c569a72012-04-10 14:45:24 -0400259 lkf = make_flags(gl, flags, req);
Steven Whitehousea2457692012-01-20 10:38:36 +0000260 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
261 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
262 if (gl->gl_lksb.sb_lkid) {
263 gfs2_update_request_times(gl);
264 } else {
265 memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
266 strname[GDLM_STRNAME_BYTES - 1] = '\0';
267 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
268 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
269 gl->gl_dstamp = ktime_get_real();
270 }
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000271 /*
272 * Submit the actual lock request.
273 */
274
Steven Whitehousea2457692012-01-20 10:38:36 +0000275 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
Steven Whitehouse921169c2010-11-29 12:50:38 +0000276 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000277}
278
Steven Whitehousebc015cb2011-01-19 09:30:01 +0000279static void gdlm_put_lock(struct gfs2_glock *gl)
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000280{
Steven Whitehousee4027462010-01-25 11:20:19 +0000281 struct gfs2_sbd *sdp = gl->gl_sbd;
282 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000283 int error;
284
285 if (gl->gl_lksb.sb_lkid == 0) {
Steven Whitehousefc0e38d2011-03-09 10:58:04 +0000286 gfs2_glock_free(gl);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000287 return;
288 }
289
Steven Whitehousea2457692012-01-20 10:38:36 +0000290 clear_bit(GLF_BLOCKING, &gl->gl_flags);
291 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
292 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
293 gfs2_update_request_times(gl);
David Teiglandfb6791d2012-11-13 10:58:56 -0500294
295 /* don't want to skip dlm_unlock writing the lvb when lock is ex */
296 if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
David Teigland4e2f8842012-11-14 13:47:37 -0500297 gl->gl_lksb.sb_lvbptr && (gl->gl_state != LM_ST_EXCLUSIVE)) {
David Teiglandfb6791d2012-11-13 10:58:56 -0500298 gfs2_glock_free(gl);
299 return;
300 }
301
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000302 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
303 NULL, gl);
304 if (error) {
305 printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n",
306 gl->gl_name.ln_type,
307 (unsigned long long)gl->gl_name.ln_number, error);
308 return;
309 }
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000310}
311
312static void gdlm_cancel(struct gfs2_glock *gl)
313{
314 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
315 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
316}
317
David Teiglande0c2a9a2012-01-09 17:18:05 -0500318/*
319 * dlm/gfs2 recovery coordination using dlm_recover callbacks
320 *
321 * 1. dlm_controld sees lockspace members change
322 * 2. dlm_controld blocks dlm-kernel locking activity
323 * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
324 * 4. dlm_controld starts and finishes its own user level recovery
325 * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery
326 * 6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot)
327 * 7. dlm_recoverd does its own lock recovery
328 * 8. dlm_recoverd unblocks dlm-kernel locking activity
329 * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation)
330 * 10. gfs2_control updates control_lock lvb with new generation and jid bits
331 * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none)
332 * 12. gfs2_recover dequeues and recovers journals of failed nodes
333 * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result)
334 * 14. gfs2_control updates control_lock lvb jid bits for recovered journals
335 * 15. gfs2_control unblocks normal locking when all journals are recovered
336 *
337 * - failures during recovery
338 *
339 * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control
340 * clears BLOCK_LOCKS (step 15), e.g. another node fails while still
341 * recovering for a prior failure. gfs2_control needs a way to detect
342 * this so it can leave BLOCK_LOCKS set in step 15. This is managed using
343 * the recover_block and recover_start values.
344 *
345 * recover_done() provides a new lockspace generation number each time it
346 * is called (step 9). This generation number is saved as recover_start.
347 * When recover_prep() is called, it sets BLOCK_LOCKS and sets
348 * recover_block = recover_start. So, while recover_block is equal to
349 * recover_start, BLOCK_LOCKS should remain set. (recover_spin must
350 * be held around the BLOCK_LOCKS/recover_block/recover_start logic.)
351 *
352 * - more specific gfs2 steps in sequence above
353 *
354 * 3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start
355 * 6. recover_slot records any failed jids (maybe none)
356 * 9. recover_done sets recover_start = new generation number
357 * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids
358 * 12. gfs2_recover does journal recoveries for failed jids identified above
359 * 14. gfs2_control clears control_lock lvb bits for recovered jids
360 * 15. gfs2_control checks if recover_block == recover_start (step 3 occured
361 * again) then do nothing, otherwise if recover_start > recover_block
362 * then clear BLOCK_LOCKS.
363 *
364 * - parallel recovery steps across all nodes
365 *
366 * All nodes attempt to update the control_lock lvb with the new generation
367 * number and jid bits, but only the first to get the control_lock EX will
368 * do so; others will see that it's already done (lvb already contains new
369 * generation number.)
370 *
371 * . All nodes get the same recover_prep/recover_slot/recover_done callbacks
372 * . All nodes attempt to set control_lock lvb gen + bits for the new gen
373 * . One node gets control_lock first and writes the lvb, others see it's done
374 * . All nodes attempt to recover jids for which they see control_lock bits set
375 * . One node succeeds for a jid, and that one clears the jid bit in the lvb
376 * . All nodes will eventually see all lvb bits clear and unblock locks
377 *
378 * - is there a problem with clearing an lvb bit that should be set
379 * and missing a journal recovery?
380 *
381 * 1. jid fails
382 * 2. lvb bit set for step 1
383 * 3. jid recovered for step 1
384 * 4. jid taken again (new mount)
385 * 5. jid fails (for step 4)
386 * 6. lvb bit set for step 5 (will already be set)
387 * 7. lvb bit cleared for step 3
388 *
389 * This is not a problem because the failure in step 5 does not
390 * require recovery, because the mount in step 4 could not have
391 * progressed far enough to unblock locks and access the fs. The
392 * control_mount() function waits for all recoveries to be complete
393 * for the latest lockspace generation before ever unblocking locks
394 * and returning. The mount in step 4 waits until the recovery in
395 * step 1 is done.
396 *
397 * - special case of first mounter: first node to mount the fs
398 *
399 * The first node to mount a gfs2 fs needs to check all the journals
400 * and recover any that need recovery before other nodes are allowed
401 * to mount the fs. (Others may begin mounting, but they must wait
402 * for the first mounter to be done before taking locks on the fs
403 * or accessing the fs.) This has two parts:
404 *
405 * 1. The mounted_lock tells a node it's the first to mount the fs.
406 * Each node holds the mounted_lock in PR while it's mounted.
407 * Each node tries to acquire the mounted_lock in EX when it mounts.
408 * If a node is granted the mounted_lock EX it means there are no
409 * other mounted nodes (no PR locks exist), and it is the first mounter.
410 * The mounted_lock is demoted to PR when first recovery is done, so
411 * others will fail to get an EX lock, but will get a PR lock.
412 *
413 * 2. The control_lock blocks others in control_mount() while the first
414 * mounter is doing first mount recovery of all journals.
415 * A mounting node needs to acquire control_lock in EX mode before
416 * it can proceed. The first mounter holds control_lock in EX while doing
417 * the first mount recovery, blocking mounts from other nodes, then demotes
418 * control_lock to NL when it's done (others_may_mount/first_done),
419 * allowing other nodes to continue mounting.
420 *
421 * first mounter:
422 * control_lock EX/NOQUEUE success
423 * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters)
424 * set first=1
425 * do first mounter recovery
426 * mounted_lock EX->PR
427 * control_lock EX->NL, write lvb generation
428 *
429 * other mounter:
430 * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry)
431 * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR)
432 * mounted_lock PR/NOQUEUE success
433 * read lvb generation
434 * control_lock EX->NL
435 * set first=0
436 *
437 * - mount during recovery
438 *
439 * If a node mounts while others are doing recovery (not first mounter),
440 * the mounting node will get its initial recover_done() callback without
441 * having seen any previous failures/callbacks.
442 *
443 * It must wait for all recoveries preceding its mount to be finished
444 * before it unblocks locks. It does this by repeating the "other mounter"
445 * steps above until the lvb generation number is >= its mount generation
446 * number (from initial recover_done) and all lvb bits are clear.
447 *
448 * - control_lock lvb format
449 *
450 * 4 bytes generation number: the latest dlm lockspace generation number
451 * from recover_done callback. Indicates the jid bitmap has been updated
452 * to reflect all slot failures through that generation.
453 * 4 bytes unused.
454 * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates
455 * that jid N needs recovery.
456 */
457
458#define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */
459
460static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen,
461 char *lvb_bits)
462{
463 uint32_t gen;
464 memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE);
465 memcpy(&gen, lvb_bits, sizeof(uint32_t));
466 *lvb_gen = le32_to_cpu(gen);
467}
468
469static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen,
470 char *lvb_bits)
471{
472 uint32_t gen;
473 memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE);
474 gen = cpu_to_le32(lvb_gen);
475 memcpy(ls->ls_control_lvb, &gen, sizeof(uint32_t));
476}
477
478static int all_jid_bits_clear(char *lvb)
479{
480 int i;
481 for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) {
482 if (lvb[i])
483 return 0;
484 }
485 return 1;
486}
487
488static void sync_wait_cb(void *arg)
489{
490 struct lm_lockstruct *ls = arg;
491 complete(&ls->ls_sync_wait);
492}
493
494static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name)
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000495{
496 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
497 int error;
498
David Teiglande0c2a9a2012-01-09 17:18:05 -0500499 error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls);
500 if (error) {
501 fs_err(sdp, "%s lkid %x error %d\n",
502 name, lksb->sb_lkid, error);
503 return error;
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000504 }
505
David Teiglande0c2a9a2012-01-09 17:18:05 -0500506 wait_for_completion(&ls->ls_sync_wait);
507
508 if (lksb->sb_status != -DLM_EUNLOCK) {
509 fs_err(sdp, "%s lkid %x status %d\n",
510 name, lksb->sb_lkid, lksb->sb_status);
511 return -1;
512 }
513 return 0;
514}
515
516static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags,
517 unsigned int num, struct dlm_lksb *lksb, char *name)
518{
519 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
520 char strname[GDLM_STRNAME_BYTES];
521 int error, status;
522
523 memset(strname, 0, GDLM_STRNAME_BYTES);
524 snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num);
525
526 error = dlm_lock(ls->ls_dlm, mode, lksb, flags,
527 strname, GDLM_STRNAME_BYTES - 1,
528 0, sync_wait_cb, ls, NULL);
529 if (error) {
530 fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n",
531 name, lksb->sb_lkid, flags, mode, error);
532 return error;
533 }
534
535 wait_for_completion(&ls->ls_sync_wait);
536
537 status = lksb->sb_status;
538
539 if (status && status != -EAGAIN) {
540 fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n",
541 name, lksb->sb_lkid, flags, mode, status);
542 }
543
544 return status;
545}
546
547static int mounted_unlock(struct gfs2_sbd *sdp)
548{
549 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
550 return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock");
551}
552
553static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
554{
555 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
556 return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK,
557 &ls->ls_mounted_lksb, "mounted_lock");
558}
559
560static int control_unlock(struct gfs2_sbd *sdp)
561{
562 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
563 return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock");
564}
565
566static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
567{
568 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
569 return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK,
570 &ls->ls_control_lksb, "control_lock");
571}
572
573static void gfs2_control_func(struct work_struct *work)
574{
575 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
576 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
577 char lvb_bits[GDLM_LVB_SIZE];
578 uint32_t block_gen, start_gen, lvb_gen, flags;
579 int recover_set = 0;
580 int write_lvb = 0;
581 int recover_size;
582 int i, error;
583
584 spin_lock(&ls->ls_recover_spin);
585 /*
586 * No MOUNT_DONE means we're still mounting; control_mount()
587 * will set this flag, after which this thread will take over
588 * all further clearing of BLOCK_LOCKS.
589 *
590 * FIRST_MOUNT means this node is doing first mounter recovery,
591 * for which recovery control is handled by
592 * control_mount()/control_first_done(), not this thread.
593 */
594 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
595 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
596 spin_unlock(&ls->ls_recover_spin);
597 return;
598 }
599 block_gen = ls->ls_recover_block;
600 start_gen = ls->ls_recover_start;
601 spin_unlock(&ls->ls_recover_spin);
602
603 /*
604 * Equal block_gen and start_gen implies we are between
605 * recover_prep and recover_done callbacks, which means
606 * dlm recovery is in progress and dlm locking is blocked.
607 * There's no point trying to do any work until recover_done.
608 */
609
610 if (block_gen == start_gen)
611 return;
612
613 /*
614 * Propagate recover_submit[] and recover_result[] to lvb:
615 * dlm_recoverd adds to recover_submit[] jids needing recovery
616 * gfs2_recover adds to recover_result[] journal recovery results
617 *
618 * set lvb bit for jids in recover_submit[] if the lvb has not
619 * yet been updated for the generation of the failure
620 *
621 * clear lvb bit for jids in recover_result[] if the result of
622 * the journal recovery is SUCCESS
623 */
624
625 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
626 if (error) {
627 fs_err(sdp, "control lock EX error %d\n", error);
628 return;
629 }
630
631 control_lvb_read(ls, &lvb_gen, lvb_bits);
632
633 spin_lock(&ls->ls_recover_spin);
634 if (block_gen != ls->ls_recover_block ||
635 start_gen != ls->ls_recover_start) {
636 fs_info(sdp, "recover generation %u block1 %u %u\n",
637 start_gen, block_gen, ls->ls_recover_block);
638 spin_unlock(&ls->ls_recover_spin);
639 control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
640 return;
641 }
642
643 recover_size = ls->ls_recover_size;
644
645 if (lvb_gen <= start_gen) {
646 /*
647 * Clear lvb bits for jids we've successfully recovered.
648 * Because all nodes attempt to recover failed journals,
649 * a journal can be recovered multiple times successfully
650 * in succession. Only the first will really do recovery,
651 * the others find it clean, but still report a successful
652 * recovery. So, another node may have already recovered
653 * the jid and cleared the lvb bit for it.
654 */
655 for (i = 0; i < recover_size; i++) {
656 if (ls->ls_recover_result[i] != LM_RD_SUCCESS)
657 continue;
658
659 ls->ls_recover_result[i] = 0;
660
661 if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET))
662 continue;
663
664 __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
665 write_lvb = 1;
666 }
667 }
668
669 if (lvb_gen == start_gen) {
670 /*
671 * Failed slots before start_gen are already set in lvb.
672 */
673 for (i = 0; i < recover_size; i++) {
674 if (!ls->ls_recover_submit[i])
675 continue;
676 if (ls->ls_recover_submit[i] < lvb_gen)
677 ls->ls_recover_submit[i] = 0;
678 }
679 } else if (lvb_gen < start_gen) {
680 /*
681 * Failed slots before start_gen are not yet set in lvb.
682 */
683 for (i = 0; i < recover_size; i++) {
684 if (!ls->ls_recover_submit[i])
685 continue;
686 if (ls->ls_recover_submit[i] < start_gen) {
687 ls->ls_recover_submit[i] = 0;
688 __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET);
689 }
690 }
691 /* even if there are no bits to set, we need to write the
692 latest generation to the lvb */
693 write_lvb = 1;
694 } else {
695 /*
696 * we should be getting a recover_done() for lvb_gen soon
697 */
698 }
699 spin_unlock(&ls->ls_recover_spin);
700
701 if (write_lvb) {
702 control_lvb_write(ls, start_gen, lvb_bits);
703 flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK;
704 } else {
705 flags = DLM_LKF_CONVERT;
706 }
707
708 error = control_lock(sdp, DLM_LOCK_NL, flags);
709 if (error) {
710 fs_err(sdp, "control lock NL error %d\n", error);
711 return;
712 }
713
714 /*
715 * Everyone will see jid bits set in the lvb, run gfs2_recover_set(),
716 * and clear a jid bit in the lvb if the recovery is a success.
717 * Eventually all journals will be recovered, all jid bits will
718 * be cleared in the lvb, and everyone will clear BLOCK_LOCKS.
719 */
720
721 for (i = 0; i < recover_size; i++) {
722 if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) {
723 fs_info(sdp, "recover generation %u jid %d\n",
724 start_gen, i);
725 gfs2_recover_set(sdp, i);
726 recover_set++;
727 }
728 }
729 if (recover_set)
730 return;
731
732 /*
733 * No more jid bits set in lvb, all recovery is done, unblock locks
734 * (unless a new recover_prep callback has occured blocking locks
735 * again while working above)
736 */
737
738 spin_lock(&ls->ls_recover_spin);
739 if (ls->ls_recover_block == block_gen &&
740 ls->ls_recover_start == start_gen) {
741 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
742 spin_unlock(&ls->ls_recover_spin);
743 fs_info(sdp, "recover generation %u done\n", start_gen);
744 gfs2_glock_thaw(sdp);
745 } else {
746 fs_info(sdp, "recover generation %u block2 %u %u\n",
747 start_gen, block_gen, ls->ls_recover_block);
748 spin_unlock(&ls->ls_recover_spin);
749 }
750}
751
752static int control_mount(struct gfs2_sbd *sdp)
753{
754 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
755 char lvb_bits[GDLM_LVB_SIZE];
756 uint32_t start_gen, block_gen, mount_gen, lvb_gen;
757 int mounted_mode;
758 int retries = 0;
759 int error;
760
761 memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb));
762 memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb));
763 memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE);
764 ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb;
765 init_completion(&ls->ls_sync_wait);
766
767 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
768
769 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK);
770 if (error) {
771 fs_err(sdp, "control_mount control_lock NL error %d\n", error);
772 return error;
773 }
774
775 error = mounted_lock(sdp, DLM_LOCK_NL, 0);
776 if (error) {
777 fs_err(sdp, "control_mount mounted_lock NL error %d\n", error);
778 control_unlock(sdp);
779 return error;
780 }
781 mounted_mode = DLM_LOCK_NL;
782
783restart:
784 if (retries++ && signal_pending(current)) {
785 error = -EINTR;
786 goto fail;
787 }
788
789 /*
790 * We always start with both locks in NL. control_lock is
791 * demoted to NL below so we don't need to do it here.
792 */
793
794 if (mounted_mode != DLM_LOCK_NL) {
795 error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
796 if (error)
797 goto fail;
798 mounted_mode = DLM_LOCK_NL;
799 }
800
801 /*
802 * Other nodes need to do some work in dlm recovery and gfs2_control
803 * before the recover_done and control_lock will be ready for us below.
804 * A delay here is not required but often avoids having to retry.
805 */
806
807 msleep_interruptible(500);
808
809 /*
810 * Acquire control_lock in EX and mounted_lock in either EX or PR.
811 * control_lock lvb keeps track of any pending journal recoveries.
812 * mounted_lock indicates if any other nodes have the fs mounted.
813 */
814
815 error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK);
816 if (error == -EAGAIN) {
817 goto restart;
818 } else if (error) {
819 fs_err(sdp, "control_mount control_lock EX error %d\n", error);
820 goto fail;
821 }
822
823 error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
824 if (!error) {
825 mounted_mode = DLM_LOCK_EX;
826 goto locks_done;
827 } else if (error != -EAGAIN) {
828 fs_err(sdp, "control_mount mounted_lock EX error %d\n", error);
829 goto fail;
830 }
831
832 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE);
833 if (!error) {
834 mounted_mode = DLM_LOCK_PR;
835 goto locks_done;
836 } else {
837 /* not even -EAGAIN should happen here */
838 fs_err(sdp, "control_mount mounted_lock PR error %d\n", error);
839 goto fail;
840 }
841
842locks_done:
843 /*
844 * If we got both locks above in EX, then we're the first mounter.
845 * If not, then we need to wait for the control_lock lvb to be
846 * updated by other mounted nodes to reflect our mount generation.
847 *
848 * In simple first mounter cases, first mounter will see zero lvb_gen,
849 * but in cases where all existing nodes leave/fail before mounting
850 * nodes finish control_mount, then all nodes will be mounting and
851 * lvb_gen will be non-zero.
852 */
853
854 control_lvb_read(ls, &lvb_gen, lvb_bits);
855
856 if (lvb_gen == 0xFFFFFFFF) {
857 /* special value to force mount attempts to fail */
858 fs_err(sdp, "control_mount control_lock disabled\n");
859 error = -EINVAL;
860 goto fail;
861 }
862
863 if (mounted_mode == DLM_LOCK_EX) {
864 /* first mounter, keep both EX while doing first recovery */
865 spin_lock(&ls->ls_recover_spin);
866 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
867 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
868 set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
869 spin_unlock(&ls->ls_recover_spin);
870 fs_info(sdp, "first mounter control generation %u\n", lvb_gen);
871 return 0;
872 }
873
874 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000875 if (error)
David Teiglande0c2a9a2012-01-09 17:18:05 -0500876 goto fail;
877
878 /*
879 * We are not first mounter, now we need to wait for the control_lock
880 * lvb generation to be >= the generation from our first recover_done
881 * and all lvb bits to be clear (no pending journal recoveries.)
882 */
883
884 if (!all_jid_bits_clear(lvb_bits)) {
885 /* journals need recovery, wait until all are clear */
886 fs_info(sdp, "control_mount wait for journal recovery\n");
887 goto restart;
888 }
889
890 spin_lock(&ls->ls_recover_spin);
891 block_gen = ls->ls_recover_block;
892 start_gen = ls->ls_recover_start;
893 mount_gen = ls->ls_recover_mount;
894
895 if (lvb_gen < mount_gen) {
896 /* wait for mounted nodes to update control_lock lvb to our
897 generation, which might include new recovery bits set */
898 fs_info(sdp, "control_mount wait1 block %u start %u mount %u "
899 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
900 lvb_gen, ls->ls_recover_flags);
901 spin_unlock(&ls->ls_recover_spin);
902 goto restart;
903 }
904
905 if (lvb_gen != start_gen) {
906 /* wait for mounted nodes to update control_lock lvb to the
907 latest recovery generation */
908 fs_info(sdp, "control_mount wait2 block %u start %u mount %u "
909 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
910 lvb_gen, ls->ls_recover_flags);
911 spin_unlock(&ls->ls_recover_spin);
912 goto restart;
913 }
914
915 if (block_gen == start_gen) {
916 /* dlm recovery in progress, wait for it to finish */
917 fs_info(sdp, "control_mount wait3 block %u start %u mount %u "
918 "lvb %u flags %lx\n", block_gen, start_gen, mount_gen,
919 lvb_gen, ls->ls_recover_flags);
920 spin_unlock(&ls->ls_recover_spin);
921 goto restart;
922 }
923
924 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
925 set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags);
926 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
927 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
928 spin_unlock(&ls->ls_recover_spin);
929 return 0;
930
931fail:
932 mounted_unlock(sdp);
933 control_unlock(sdp);
934 return error;
935}
936
937static int dlm_recovery_wait(void *word)
938{
939 schedule();
940 return 0;
941}
942
943static int control_first_done(struct gfs2_sbd *sdp)
944{
945 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
946 char lvb_bits[GDLM_LVB_SIZE];
947 uint32_t start_gen, block_gen;
948 int error;
949
950restart:
951 spin_lock(&ls->ls_recover_spin);
952 start_gen = ls->ls_recover_start;
953 block_gen = ls->ls_recover_block;
954
955 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) ||
956 !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
957 !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
958 /* sanity check, should not happen */
959 fs_err(sdp, "control_first_done start %u block %u flags %lx\n",
960 start_gen, block_gen, ls->ls_recover_flags);
961 spin_unlock(&ls->ls_recover_spin);
962 control_unlock(sdp);
963 return -1;
964 }
965
966 if (start_gen == block_gen) {
967 /*
968 * Wait for the end of a dlm recovery cycle to switch from
969 * first mounter recovery. We can ignore any recover_slot
970 * callbacks between the recover_prep and next recover_done
971 * because we are still the first mounter and any failed nodes
972 * have not fully mounted, so they don't need recovery.
973 */
974 spin_unlock(&ls->ls_recover_spin);
975 fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
976
977 wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
978 dlm_recovery_wait, TASK_UNINTERRUPTIBLE);
979 goto restart;
980 }
981
982 clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
983 set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags);
984 memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t));
985 memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t));
986 spin_unlock(&ls->ls_recover_spin);
987
988 memset(lvb_bits, 0, sizeof(lvb_bits));
989 control_lvb_write(ls, start_gen, lvb_bits);
990
991 error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT);
992 if (error)
993 fs_err(sdp, "control_first_done mounted PR error %d\n", error);
994
995 error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK);
996 if (error)
997 fs_err(sdp, "control_first_done control NL error %d\n", error);
Steven Whitehousef057f6c2009-01-12 10:43:39 +0000998
999 return error;
1000}
1001
David Teiglande0c2a9a2012-01-09 17:18:05 -05001002/*
1003 * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
1004 * to accomodate the largest slot number. (NB dlm slot numbers start at 1,
1005 * gfs2 jids start at 0, so jid = slot - 1)
1006 */
1007
1008#define RECOVER_SIZE_INC 16
1009
1010static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1011 int num_slots)
1012{
1013 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1014 uint32_t *submit = NULL;
1015 uint32_t *result = NULL;
1016 uint32_t old_size, new_size;
1017 int i, max_jid;
1018
1019 max_jid = 0;
1020 for (i = 0; i < num_slots; i++) {
1021 if (max_jid < slots[i].slot - 1)
1022 max_jid = slots[i].slot - 1;
1023 }
1024
1025 old_size = ls->ls_recover_size;
1026
1027 if (old_size >= max_jid + 1)
1028 return 0;
1029
1030 new_size = old_size + RECOVER_SIZE_INC;
1031
1032 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
1033 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
1034 if (!submit || !result) {
1035 kfree(submit);
1036 kfree(result);
1037 return -ENOMEM;
1038 }
1039
1040 spin_lock(&ls->ls_recover_spin);
1041 memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t));
1042 memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t));
1043 kfree(ls->ls_recover_submit);
1044 kfree(ls->ls_recover_result);
1045 ls->ls_recover_submit = submit;
1046 ls->ls_recover_result = result;
1047 ls->ls_recover_size = new_size;
1048 spin_unlock(&ls->ls_recover_spin);
1049 return 0;
1050}
1051
1052static void free_recover_size(struct lm_lockstruct *ls)
1053{
1054 kfree(ls->ls_recover_submit);
1055 kfree(ls->ls_recover_result);
1056 ls->ls_recover_submit = NULL;
1057 ls->ls_recover_result = NULL;
1058 ls->ls_recover_size = 0;
1059}
1060
1061/* dlm calls before it does lock recovery */
1062
1063static void gdlm_recover_prep(void *arg)
1064{
1065 struct gfs2_sbd *sdp = arg;
1066 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1067
1068 spin_lock(&ls->ls_recover_spin);
1069 ls->ls_recover_block = ls->ls_recover_start;
1070 set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1071
1072 if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) ||
1073 test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1074 spin_unlock(&ls->ls_recover_spin);
1075 return;
1076 }
1077 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
1078 spin_unlock(&ls->ls_recover_spin);
1079}
1080
1081/* dlm calls after recover_prep has been completed on all lockspace members;
1082 identifies slot/jid of failed member */
1083
1084static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
1085{
1086 struct gfs2_sbd *sdp = arg;
1087 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1088 int jid = slot->slot - 1;
1089
1090 spin_lock(&ls->ls_recover_spin);
1091 if (ls->ls_recover_size < jid + 1) {
1092 fs_err(sdp, "recover_slot jid %d gen %u short size %d",
1093 jid, ls->ls_recover_block, ls->ls_recover_size);
1094 spin_unlock(&ls->ls_recover_spin);
1095 return;
1096 }
1097
1098 if (ls->ls_recover_submit[jid]) {
1099 fs_info(sdp, "recover_slot jid %d gen %u prev %u",
1100 jid, ls->ls_recover_block, ls->ls_recover_submit[jid]);
1101 }
1102 ls->ls_recover_submit[jid] = ls->ls_recover_block;
1103 spin_unlock(&ls->ls_recover_spin);
1104}
1105
1106/* dlm calls after recover_slot and after it completes lock recovery */
1107
1108static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
1109 int our_slot, uint32_t generation)
1110{
1111 struct gfs2_sbd *sdp = arg;
1112 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1113
1114 /* ensure the ls jid arrays are large enough */
1115 set_recover_size(sdp, slots, num_slots);
1116
1117 spin_lock(&ls->ls_recover_spin);
1118 ls->ls_recover_start = generation;
1119
1120 if (!ls->ls_recover_mount) {
1121 ls->ls_recover_mount = generation;
1122 ls->ls_jid = our_slot - 1;
1123 }
1124
1125 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1126 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
1127
1128 clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
1129 smp_mb__after_clear_bit();
1130 wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY);
1131 spin_unlock(&ls->ls_recover_spin);
1132}
1133
1134/* gfs2_recover thread has a journal recovery result */
1135
1136static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
1137 unsigned int result)
1138{
1139 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1140
1141 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1142 return;
1143
1144 /* don't care about the recovery of own journal during mount */
1145 if (jid == ls->ls_jid)
1146 return;
1147
1148 spin_lock(&ls->ls_recover_spin);
1149 if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) {
1150 spin_unlock(&ls->ls_recover_spin);
1151 return;
1152 }
1153 if (ls->ls_recover_size < jid + 1) {
1154 fs_err(sdp, "recovery_result jid %d short size %d",
1155 jid, ls->ls_recover_size);
1156 spin_unlock(&ls->ls_recover_spin);
1157 return;
1158 }
1159
1160 fs_info(sdp, "recover jid %d result %s\n", jid,
1161 result == LM_RD_GAVEUP ? "busy" : "success");
1162
1163 ls->ls_recover_result[jid] = result;
1164
1165 /* GAVEUP means another node is recovering the journal; delay our
1166 next attempt to recover it, to give the other node a chance to
1167 finish before trying again */
1168
1169 if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
1170 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work,
1171 result == LM_RD_GAVEUP ? HZ : 0);
1172 spin_unlock(&ls->ls_recover_spin);
1173}
1174
1175const struct dlm_lockspace_ops gdlm_lockspace_ops = {
1176 .recover_prep = gdlm_recover_prep,
1177 .recover_slot = gdlm_recover_slot,
1178 .recover_done = gdlm_recover_done,
1179};
1180
1181static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
1182{
1183 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1184 char cluster[GFS2_LOCKNAME_LEN];
1185 const char *fsname;
1186 uint32_t flags;
1187 int error, ops_result;
1188
1189 /*
1190 * initialize everything
1191 */
1192
1193 INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func);
1194 spin_lock_init(&ls->ls_recover_spin);
1195 ls->ls_recover_flags = 0;
1196 ls->ls_recover_mount = 0;
1197 ls->ls_recover_start = 0;
1198 ls->ls_recover_block = 0;
1199 ls->ls_recover_size = 0;
1200 ls->ls_recover_submit = NULL;
1201 ls->ls_recover_result = NULL;
1202
1203 error = set_recover_size(sdp, NULL, 0);
1204 if (error)
1205 goto fail;
1206
1207 /*
1208 * prepare dlm_new_lockspace args
1209 */
1210
1211 fsname = strchr(table, ':');
1212 if (!fsname) {
1213 fs_info(sdp, "no fsname found\n");
1214 error = -EINVAL;
1215 goto fail_free;
1216 }
1217 memset(cluster, 0, sizeof(cluster));
1218 memcpy(cluster, table, strlen(table) - strlen(fsname));
1219 fsname++;
1220
1221 flags = DLM_LSFL_FS | DLM_LSFL_NEWEXCL;
David Teiglande0c2a9a2012-01-09 17:18:05 -05001222
1223 /*
1224 * create/join lockspace
1225 */
1226
1227 error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE,
1228 &gdlm_lockspace_ops, sdp, &ops_result,
1229 &ls->ls_dlm);
1230 if (error) {
1231 fs_err(sdp, "dlm_new_lockspace error %d\n", error);
1232 goto fail_free;
1233 }
1234
1235 if (ops_result < 0) {
1236 /*
1237 * dlm does not support ops callbacks,
1238 * old dlm_controld/gfs_controld are used, try without ops.
1239 */
1240 fs_info(sdp, "dlm lockspace ops not used\n");
1241 free_recover_size(ls);
1242 set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags);
1243 return 0;
1244 }
1245
1246 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) {
1247 fs_err(sdp, "dlm lockspace ops disallow jid preset\n");
1248 error = -EINVAL;
1249 goto fail_release;
1250 }
1251
1252 /*
1253 * control_mount() uses control_lock to determine first mounter,
1254 * and for later mounts, waits for any recoveries to be cleared.
1255 */
1256
1257 error = control_mount(sdp);
1258 if (error) {
1259 fs_err(sdp, "mount control error %d\n", error);
1260 goto fail_release;
1261 }
1262
1263 ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags);
1264 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
1265 smp_mb__after_clear_bit();
1266 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
1267 return 0;
1268
1269fail_release:
1270 dlm_release_lockspace(ls->ls_dlm, 2);
1271fail_free:
1272 free_recover_size(ls);
1273fail:
1274 return error;
1275}
1276
1277static void gdlm_first_done(struct gfs2_sbd *sdp)
1278{
1279 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1280 int error;
1281
1282 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1283 return;
1284
1285 error = control_first_done(sdp);
1286 if (error)
1287 fs_err(sdp, "mount first_done error %d\n", error);
1288}
1289
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001290static void gdlm_unmount(struct gfs2_sbd *sdp)
1291{
1292 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1293
David Teiglande0c2a9a2012-01-09 17:18:05 -05001294 if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
1295 goto release;
1296
1297 /* wait for gfs2_control_wq to be done with this mount */
1298
1299 spin_lock(&ls->ls_recover_spin);
1300 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1301 spin_unlock(&ls->ls_recover_spin);
Tejun Heo43829732012-08-20 14:51:24 -07001302 flush_delayed_work(&sdp->sd_control_work);
David Teiglande0c2a9a2012-01-09 17:18:05 -05001303
1304 /* mounted_lock and control_lock will be purged in dlm recovery */
1305release:
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001306 if (ls->ls_dlm) {
1307 dlm_release_lockspace(ls->ls_dlm, 2);
1308 ls->ls_dlm = NULL;
1309 }
David Teiglande0c2a9a2012-01-09 17:18:05 -05001310
1311 free_recover_size(ls);
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001312}
1313
1314static const match_table_t dlm_tokens = {
1315 { Opt_jid, "jid=%d"},
1316 { Opt_id, "id=%d"},
1317 { Opt_first, "first=%d"},
1318 { Opt_nodir, "nodir=%d"},
1319 { Opt_err, NULL },
1320};
1321
1322const struct lm_lockops gfs2_dlm_ops = {
1323 .lm_proto_name = "lock_dlm",
1324 .lm_mount = gdlm_mount,
David Teiglande0c2a9a2012-01-09 17:18:05 -05001325 .lm_first_done = gdlm_first_done,
1326 .lm_recovery_result = gdlm_recovery_result,
Steven Whitehousef057f6c2009-01-12 10:43:39 +00001327 .lm_unmount = gdlm_unmount,
1328 .lm_put_lock = gdlm_put_lock,
1329 .lm_lock = gdlm_lock,
1330 .lm_cancel = gdlm_cancel,
1331 .lm_tokens = &dlm_tokens,
1332};
1333