blob: cf1dc17faf4f4d1083f13de1b59d7f06a4ca9b63 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
18#include <linux/kref.h>
19#include <asm/semaphore.h>
20#include <asm/uaccess.h>
21
22#include "gfs2.h"
23#include "glock.h"
24#include "glops.h"
25#include "inode.h"
26#include "lm.h"
27#include "lops.h"
28#include "meta_io.h"
29#include "quota.h"
30#include "super.h"
31
32/* Must be kept in sync with the beginning of struct gfs2_glock */
33struct glock_plug {
34 struct list_head gl_list;
35 unsigned long gl_flags;
36};
37
38struct greedy {
39 struct gfs2_holder gr_gh;
40 struct work_struct gr_work;
41};
42
43typedef void (*glock_examiner) (struct gfs2_glock * gl);
44
45/**
46 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
47 * @actual: the current state of the lock
48 * @requested: the lock state that was requested by the caller
49 * @flags: the modifier flags passed in by the caller
50 *
51 * Returns: 1 if the locks are compatible, 0 otherwise
52 */
53
54static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
55 int flags)
56{
57 if (actual == requested)
58 return 1;
59
60 if (flags & GL_EXACT)
61 return 0;
62
63 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
64 return 1;
65
66 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
67 return 1;
68
69 return 0;
70}
71
72/**
73 * gl_hash() - Turn glock number into hash bucket number
74 * @lock: The glock number
75 *
76 * Returns: The number of the corresponding hash bucket
77 */
78
79static unsigned int gl_hash(struct lm_lockname *name)
80{
81 unsigned int h;
82
83 h = jhash(&name->ln_number, sizeof(uint64_t), 0);
84 h = jhash(&name->ln_type, sizeof(unsigned int), h);
85 h &= GFS2_GL_HASH_MASK;
86
87 return h;
88}
89
90/**
91 * glock_free() - Perform a few checks and then release struct gfs2_glock
92 * @gl: The glock to release
93 *
94 * Also calls lock module to release its internal structure for this glock.
95 *
96 */
97
98static void glock_free(struct gfs2_glock *gl)
99{
100 struct gfs2_sbd *sdp = gl->gl_sbd;
101 struct inode *aspace = gl->gl_aspace;
102
103 gfs2_lm_put_lock(sdp, gl->gl_lock);
104
105 if (aspace)
106 gfs2_aspace_put(aspace);
107
108 kmem_cache_free(gfs2_glock_cachep, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000109}
110
111/**
112 * gfs2_glock_hold() - increment reference count on glock
113 * @gl: The glock to hold
114 *
115 */
116
117void gfs2_glock_hold(struct gfs2_glock *gl)
118{
119 kref_get(&gl->gl_ref);
120}
121
122/* All work is done after the return from kref_put() so we
123 can release the write_lock before the free. */
124
125static void kill_glock(struct kref *kref)
126{
127 struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
128 struct gfs2_sbd *sdp = gl->gl_sbd;
129
130 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
131 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
132 gfs2_assert(sdp, list_empty(&gl->gl_holders));
133 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
134 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
135 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
136}
137
138/**
139 * gfs2_glock_put() - Decrement reference count on glock
140 * @gl: The glock to put
141 *
142 */
143
144int gfs2_glock_put(struct gfs2_glock *gl)
145{
146 struct gfs2_sbd *sdp = gl->gl_sbd;
147 struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
148 int rv = 0;
149
Steven Whitehousef55ab262006-02-21 12:51:39 +0000150 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000151
152 write_lock(&bucket->hb_lock);
153 if (kref_put(&gl->gl_ref, kill_glock)) {
154 list_del_init(&gl->gl_list);
155 write_unlock(&bucket->hb_lock);
156 glock_free(gl);
157 rv = 1;
158 goto out;
159 }
160 write_unlock(&bucket->hb_lock);
161 out:
Steven Whitehousef55ab262006-02-21 12:51:39 +0000162 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000163 return rv;
164}
165
166/**
167 * queue_empty - check to see if a glock's queue is empty
168 * @gl: the glock
169 * @head: the head of the queue to check
170 *
171 * This function protects the list in the event that a process already
172 * has a holder on the list and is adding a second holder for itself.
173 * The glmutex lock is what generally prevents processes from working
174 * on the same glock at once, but the special case of adding a second
175 * holder for yourself ("recursive" locking) doesn't involve locking
176 * glmutex, making the spin lock necessary.
177 *
178 * Returns: 1 if the queue is empty
179 */
180
181static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
182{
183 int empty;
184 spin_lock(&gl->gl_spin);
185 empty = list_empty(head);
186 spin_unlock(&gl->gl_spin);
187 return empty;
188}
189
190/**
191 * search_bucket() - Find struct gfs2_glock by lock number
192 * @bucket: the bucket to search
193 * @name: The lock name
194 *
195 * Returns: NULL, or the struct gfs2_glock with the requested number
196 */
197
198static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
199 struct lm_lockname *name)
200{
201 struct gfs2_glock *gl;
202
203 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
204 if (test_bit(GLF_PLUG, &gl->gl_flags))
205 continue;
206 if (!lm_name_equal(&gl->gl_name, name))
207 continue;
208
209 kref_get(&gl->gl_ref);
210
211 return gl;
212 }
213
214 return NULL;
215}
216
217/**
218 * gfs2_glock_find() - Find glock by lock number
219 * @sdp: The GFS2 superblock
220 * @name: The lock name
221 *
222 * Returns: NULL, or the struct gfs2_glock with the requested number
223 */
224
225struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
226 struct lm_lockname *name)
227{
228 struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
229 struct gfs2_glock *gl;
230
231 read_lock(&bucket->hb_lock);
232 gl = search_bucket(bucket, name);
233 read_unlock(&bucket->hb_lock);
234
235 return gl;
236}
237
238/**
239 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
240 * @sdp: The GFS2 superblock
241 * @number: the lock number
242 * @glops: The glock_operations to use
243 * @create: If 0, don't create the glock if it doesn't exist
244 * @glp: the glock is returned here
245 *
246 * This does not lock a glock, just finds/creates structures for one.
247 *
248 * Returns: errno
249 */
250
251int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
252 struct gfs2_glock_operations *glops, int create,
253 struct gfs2_glock **glp)
254{
255 struct lm_lockname name;
256 struct gfs2_glock *gl, *tmp;
257 struct gfs2_gl_hash_bucket *bucket;
258 int error;
259
260 name.ln_number = number;
261 name.ln_type = glops->go_type;
262 bucket = &sdp->sd_gl_hash[gl_hash(&name)];
263
264 read_lock(&bucket->hb_lock);
265 gl = search_bucket(bucket, &name);
266 read_unlock(&bucket->hb_lock);
267
268 if (gl || !create) {
269 *glp = gl;
270 return 0;
271 }
272
273 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
274 if (!gl)
275 return -ENOMEM;
276
277 memset(gl, 0, sizeof(struct gfs2_glock));
278
279 INIT_LIST_HEAD(&gl->gl_list);
280 gl->gl_name = name;
281 kref_init(&gl->gl_ref);
282
283 spin_lock_init(&gl->gl_spin);
284
285 gl->gl_state = LM_ST_UNLOCKED;
286 INIT_LIST_HEAD(&gl->gl_holders);
287 INIT_LIST_HEAD(&gl->gl_waiters1);
288 INIT_LIST_HEAD(&gl->gl_waiters2);
289 INIT_LIST_HEAD(&gl->gl_waiters3);
290
291 gl->gl_ops = glops;
292
293 gl->gl_bucket = bucket;
294 INIT_LIST_HEAD(&gl->gl_reclaim);
295
296 gl->gl_sbd = sdp;
297
298 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
299 INIT_LIST_HEAD(&gl->gl_ail_list);
300
301 /* If this glock protects actual on-disk data or metadata blocks,
302 create a VFS inode to manage the pages/buffers holding them. */
303 if (glops == &gfs2_inode_glops ||
304 glops == &gfs2_rgrp_glops ||
305 glops == &gfs2_meta_glops) {
306 gl->gl_aspace = gfs2_aspace_get(sdp);
307 if (!gl->gl_aspace) {
308 error = -ENOMEM;
309 goto fail;
310 }
311 }
312
313 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
314 if (error)
315 goto fail_aspace;
316
David Teiglandb3b94fa2006-01-16 16:50:04 +0000317 write_lock(&bucket->hb_lock);
318 tmp = search_bucket(bucket, &name);
319 if (tmp) {
320 write_unlock(&bucket->hb_lock);
321 glock_free(gl);
322 gl = tmp;
323 } else {
324 list_add_tail(&gl->gl_list, &bucket->hb_list);
325 write_unlock(&bucket->hb_lock);
326 }
327
328 *glp = gl;
329
330 return 0;
331
332 fail_aspace:
333 if (gl->gl_aspace)
334 gfs2_aspace_put(gl->gl_aspace);
335
336 fail:
337 kmem_cache_free(gfs2_glock_cachep, gl);
338
339 return error;
340}
341
342/**
343 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
344 * @gl: the glock
345 * @state: the state we're requesting
346 * @flags: the modifier flags
347 * @gh: the holder structure
348 *
349 */
350
351void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, int flags,
352 struct gfs2_holder *gh)
353{
354 INIT_LIST_HEAD(&gh->gh_list);
355 gh->gh_gl = gl;
356 gh->gh_owner = (flags & GL_NEVER_RECURSE) ? NULL : current;
357 gh->gh_state = state;
358 gh->gh_flags = flags;
359 gh->gh_error = 0;
360 gh->gh_iflags = 0;
361 init_completion(&gh->gh_wait);
362
363 if (gh->gh_state == LM_ST_EXCLUSIVE)
364 gh->gh_flags |= GL_LOCAL_EXCL;
365
366 gfs2_glock_hold(gl);
367}
368
369/**
370 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
371 * @state: the state we're requesting
372 * @flags: the modifier flags
373 * @gh: the holder structure
374 *
375 * Don't mess with the glock.
376 *
377 */
378
379void gfs2_holder_reinit(unsigned int state, int flags, struct gfs2_holder *gh)
380{
381 gh->gh_state = state;
382 gh->gh_flags = flags;
383 if (gh->gh_state == LM_ST_EXCLUSIVE)
384 gh->gh_flags |= GL_LOCAL_EXCL;
385
386 gh->gh_iflags &= 1 << HIF_ALLOCED;
387}
388
389/**
390 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
391 * @gh: the holder structure
392 *
393 */
394
395void gfs2_holder_uninit(struct gfs2_holder *gh)
396{
397 gfs2_glock_put(gh->gh_gl);
398 gh->gh_gl = NULL;
399}
400
401/**
402 * gfs2_holder_get - get a struct gfs2_holder structure
403 * @gl: the glock
404 * @state: the state we're requesting
405 * @flags: the modifier flags
406 * @gfp_flags: __GFP_NOFAIL
407 *
408 * Figure out how big an impact this function has. Either:
409 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
410 * 2) Leave it like it is
411 *
412 * Returns: the holder structure, NULL on ENOMEM
413 */
414
415struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state,
416 int flags, gfp_t gfp_flags)
417{
418 struct gfs2_holder *gh;
419
420 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
421 if (!gh)
422 return NULL;
423
424 gfs2_holder_init(gl, state, flags, gh);
425 set_bit(HIF_ALLOCED, &gh->gh_iflags);
426
427 return gh;
428}
429
430/**
431 * gfs2_holder_put - get rid of a struct gfs2_holder structure
432 * @gh: the holder structure
433 *
434 */
435
436void gfs2_holder_put(struct gfs2_holder *gh)
437{
438 gfs2_holder_uninit(gh);
439 kfree(gh);
440}
441
442/**
443 * handle_recurse - put other holder structures (marked recursive)
444 * into the holders list
445 * @gh: the holder structure
446 *
447 */
448
449static void handle_recurse(struct gfs2_holder *gh)
450{
451 struct gfs2_glock *gl = gh->gh_gl;
452 struct gfs2_sbd *sdp = gl->gl_sbd;
453 struct gfs2_holder *tmp_gh, *safe;
454 int found = 0;
455
456 if (gfs2_assert_warn(sdp, gh->gh_owner))
457 return;
458
459 list_for_each_entry_safe(tmp_gh, safe, &gl->gl_waiters3, gh_list) {
460 if (tmp_gh->gh_owner != gh->gh_owner)
461 continue;
462
463 gfs2_assert_warn(sdp,
464 test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
465
466 list_move_tail(&tmp_gh->gh_list, &gl->gl_holders);
467 tmp_gh->gh_error = 0;
468 set_bit(HIF_HOLDER, &tmp_gh->gh_iflags);
469
470 complete(&tmp_gh->gh_wait);
471
472 found = 1;
473 }
474
475 gfs2_assert_warn(sdp, found);
476}
477
478/**
479 * do_unrecurse - a recursive holder was just dropped of the waiters3 list
480 * @gh: the holder
481 *
482 * If there is only one other recursive holder, clear its HIF_RECURSE bit.
483 * If there is more than one, leave them alone.
484 *
485 */
486
487static void do_unrecurse(struct gfs2_holder *gh)
488{
489 struct gfs2_glock *gl = gh->gh_gl;
490 struct gfs2_sbd *sdp = gl->gl_sbd;
491 struct gfs2_holder *tmp_gh, *last_gh = NULL;
492 int found = 0;
493
494 if (gfs2_assert_warn(sdp, gh->gh_owner))
495 return;
496
497 list_for_each_entry(tmp_gh, &gl->gl_waiters3, gh_list) {
498 if (tmp_gh->gh_owner != gh->gh_owner)
499 continue;
500
501 gfs2_assert_warn(sdp,
502 test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
503
504 if (found)
505 return;
506
507 found = 1;
508 last_gh = tmp_gh;
509 }
510
511 if (!gfs2_assert_warn(sdp, found))
512 clear_bit(HIF_RECURSE, &last_gh->gh_iflags);
513}
514
515/**
516 * rq_mutex - process a mutex request in the queue
517 * @gh: the glock holder
518 *
519 * Returns: 1 if the queue is blocked
520 */
521
522static int rq_mutex(struct gfs2_holder *gh)
523{
524 struct gfs2_glock *gl = gh->gh_gl;
525
526 list_del_init(&gh->gh_list);
527 /* gh->gh_error never examined. */
528 set_bit(GLF_LOCK, &gl->gl_flags);
529 complete(&gh->gh_wait);
530
531 return 1;
532}
533
534/**
535 * rq_promote - process a promote request in the queue
536 * @gh: the glock holder
537 *
538 * Acquire a new inter-node lock, or change a lock state to more restrictive.
539 *
540 * Returns: 1 if the queue is blocked
541 */
542
543static int rq_promote(struct gfs2_holder *gh)
544{
545 struct gfs2_glock *gl = gh->gh_gl;
546 struct gfs2_sbd *sdp = gl->gl_sbd;
547 struct gfs2_glock_operations *glops = gl->gl_ops;
548 int recurse;
549
550 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
551 if (list_empty(&gl->gl_holders)) {
552 gl->gl_req_gh = gh;
553 set_bit(GLF_LOCK, &gl->gl_flags);
554 spin_unlock(&gl->gl_spin);
555
556 if (atomic_read(&sdp->sd_reclaim_count) >
557 gfs2_tune_get(sdp, gt_reclaim_limit) &&
558 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
559 gfs2_reclaim_glock(sdp);
560 gfs2_reclaim_glock(sdp);
561 }
562
563 glops->go_xmote_th(gl, gh->gh_state,
564 gh->gh_flags);
565
566 spin_lock(&gl->gl_spin);
567 }
568 return 1;
569 }
570
571 if (list_empty(&gl->gl_holders)) {
572 set_bit(HIF_FIRST, &gh->gh_iflags);
573 set_bit(GLF_LOCK, &gl->gl_flags);
574 recurse = 0;
575 } else {
576 struct gfs2_holder *next_gh;
577 if (gh->gh_flags & GL_LOCAL_EXCL)
578 return 1;
579 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
580 gh_list);
581 if (next_gh->gh_flags & GL_LOCAL_EXCL)
582 return 1;
583 recurse = test_bit(HIF_RECURSE, &gh->gh_iflags);
584 }
585
586 list_move_tail(&gh->gh_list, &gl->gl_holders);
587 gh->gh_error = 0;
588 set_bit(HIF_HOLDER, &gh->gh_iflags);
589
590 if (recurse)
591 handle_recurse(gh);
592
593 complete(&gh->gh_wait);
594
595 return 0;
596}
597
598/**
599 * rq_demote - process a demote request in the queue
600 * @gh: the glock holder
601 *
602 * Returns: 1 if the queue is blocked
603 */
604
605static int rq_demote(struct gfs2_holder *gh)
606{
607 struct gfs2_glock *gl = gh->gh_gl;
608 struct gfs2_glock_operations *glops = gl->gl_ops;
609
610 if (!list_empty(&gl->gl_holders))
611 return 1;
612
613 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
614 list_del_init(&gh->gh_list);
615 gh->gh_error = 0;
616 spin_unlock(&gl->gl_spin);
617 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
618 gfs2_holder_put(gh);
619 else
620 complete(&gh->gh_wait);
621 spin_lock(&gl->gl_spin);
622 } else {
623 gl->gl_req_gh = gh;
624 set_bit(GLF_LOCK, &gl->gl_flags);
625 spin_unlock(&gl->gl_spin);
626
627 if (gh->gh_state == LM_ST_UNLOCKED ||
628 gl->gl_state != LM_ST_EXCLUSIVE)
629 glops->go_drop_th(gl);
630 else
631 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
632
633 spin_lock(&gl->gl_spin);
634 }
635
636 return 0;
637}
638
639/**
640 * rq_greedy - process a queued request to drop greedy status
641 * @gh: the glock holder
642 *
643 * Returns: 1 if the queue is blocked
644 */
645
646static int rq_greedy(struct gfs2_holder *gh)
647{
648 struct gfs2_glock *gl = gh->gh_gl;
649
650 list_del_init(&gh->gh_list);
651 /* gh->gh_error never examined. */
652 clear_bit(GLF_GREEDY, &gl->gl_flags);
653 spin_unlock(&gl->gl_spin);
654
655 gfs2_holder_uninit(gh);
656 kfree(container_of(gh, struct greedy, gr_gh));
657
658 spin_lock(&gl->gl_spin);
659
660 return 0;
661}
662
663/**
664 * run_queue - process holder structures on a glock
665 * @gl: the glock
666 *
667 */
668
669static void run_queue(struct gfs2_glock *gl)
670{
671 struct gfs2_holder *gh;
672 int blocked = 1;
673
674 for (;;) {
675 if (test_bit(GLF_LOCK, &gl->gl_flags))
676 break;
677
678 if (!list_empty(&gl->gl_waiters1)) {
679 gh = list_entry(gl->gl_waiters1.next,
680 struct gfs2_holder, gh_list);
681
682 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
683 blocked = rq_mutex(gh);
684 else
685 gfs2_assert_warn(gl->gl_sbd, 0);
686
687 } else if (!list_empty(&gl->gl_waiters2) &&
688 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
689 gh = list_entry(gl->gl_waiters2.next,
690 struct gfs2_holder, gh_list);
691
692 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
693 blocked = rq_demote(gh);
694 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
695 blocked = rq_greedy(gh);
696 else
697 gfs2_assert_warn(gl->gl_sbd, 0);
698
699 } else if (!list_empty(&gl->gl_waiters3)) {
700 gh = list_entry(gl->gl_waiters3.next,
701 struct gfs2_holder, gh_list);
702
703 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
704 blocked = rq_promote(gh);
705 else
706 gfs2_assert_warn(gl->gl_sbd, 0);
707
708 } else
709 break;
710
711 if (blocked)
712 break;
713 }
714}
715
716/**
717 * gfs2_glmutex_lock - acquire a local lock on a glock
718 * @gl: the glock
719 *
720 * Gives caller exclusive access to manipulate a glock structure.
721 */
722
723void gfs2_glmutex_lock(struct gfs2_glock *gl)
724{
725 struct gfs2_holder gh;
726
727 gfs2_holder_init(gl, 0, 0, &gh);
728 set_bit(HIF_MUTEX, &gh.gh_iflags);
729
730 spin_lock(&gl->gl_spin);
731 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
732 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
733 else
734 complete(&gh.gh_wait);
735 spin_unlock(&gl->gl_spin);
736
737 wait_for_completion(&gh.gh_wait);
738 gfs2_holder_uninit(&gh);
739}
740
741/**
742 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
743 * @gl: the glock
744 *
745 * Returns: 1 if the glock is acquired
746 */
747
748int gfs2_glmutex_trylock(struct gfs2_glock *gl)
749{
750 int acquired = 1;
751
752 spin_lock(&gl->gl_spin);
753 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
754 acquired = 0;
755 spin_unlock(&gl->gl_spin);
756
757 return acquired;
758}
759
760/**
761 * gfs2_glmutex_unlock - release a local lock on a glock
762 * @gl: the glock
763 *
764 */
765
766void gfs2_glmutex_unlock(struct gfs2_glock *gl)
767{
768 spin_lock(&gl->gl_spin);
769 clear_bit(GLF_LOCK, &gl->gl_flags);
770 run_queue(gl);
771 spin_unlock(&gl->gl_spin);
772}
773
774/**
775 * handle_callback - add a demote request to a lock's queue
776 * @gl: the glock
777 * @state: the state the caller wants us to change to
778 *
779 */
780
781static void handle_callback(struct gfs2_glock *gl, unsigned int state)
782{
783 struct gfs2_holder *gh, *new_gh = NULL;
784
785 restart:
786 spin_lock(&gl->gl_spin);
787
788 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
789 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
790 gl->gl_req_gh != gh) {
791 if (gh->gh_state != state)
792 gh->gh_state = LM_ST_UNLOCKED;
793 goto out;
794 }
795 }
796
797 if (new_gh) {
798 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
799 new_gh = NULL;
800 } else {
801 spin_unlock(&gl->gl_spin);
802
803 new_gh = gfs2_holder_get(gl, state,
804 LM_FLAG_TRY | GL_NEVER_RECURSE,
805 GFP_KERNEL | __GFP_NOFAIL),
806 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
807 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
808
809 goto restart;
810 }
811
812 out:
813 spin_unlock(&gl->gl_spin);
814
815 if (new_gh)
816 gfs2_holder_put(new_gh);
817}
818
819/**
820 * state_change - record that the glock is now in a different state
821 * @gl: the glock
822 * @new_state the new state
823 *
824 */
825
826static void state_change(struct gfs2_glock *gl, unsigned int new_state)
827{
828 struct gfs2_sbd *sdp = gl->gl_sbd;
829 int held1, held2;
830
831 held1 = (gl->gl_state != LM_ST_UNLOCKED);
832 held2 = (new_state != LM_ST_UNLOCKED);
833
834 if (held1 != held2) {
David Teigland6a6b3d02006-02-23 10:11:47 +0000835 if (held2)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000836 gfs2_glock_hold(gl);
David Teigland6a6b3d02006-02-23 10:11:47 +0000837 else
David Teiglandb3b94fa2006-01-16 16:50:04 +0000838 gfs2_glock_put(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000839 }
840
841 gl->gl_state = new_state;
842}
843
844/**
845 * xmote_bh - Called after the lock module is done acquiring a lock
846 * @gl: The glock in question
847 * @ret: the int returned from the lock module
848 *
849 */
850
851static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
852{
853 struct gfs2_sbd *sdp = gl->gl_sbd;
854 struct gfs2_glock_operations *glops = gl->gl_ops;
855 struct gfs2_holder *gh = gl->gl_req_gh;
856 int prev_state = gl->gl_state;
857 int op_done = 1;
858
859 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
860 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
861 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
862
863 state_change(gl, ret & LM_OUT_ST_MASK);
864
865 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
866 if (glops->go_inval)
867 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
868 } else if (gl->gl_state == LM_ST_DEFERRED) {
869 /* We might not want to do this here.
870 Look at moving to the inode glops. */
871 if (glops->go_inval)
872 glops->go_inval(gl, DIO_DATA);
873 }
874
875 /* Deal with each possible exit condition */
876
877 if (!gh)
878 gl->gl_stamp = jiffies;
879
880 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
881 spin_lock(&gl->gl_spin);
882 list_del_init(&gh->gh_list);
883 gh->gh_error = -EIO;
884 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
885 do_unrecurse(gh);
886 spin_unlock(&gl->gl_spin);
887
888 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
889 spin_lock(&gl->gl_spin);
890 list_del_init(&gh->gh_list);
891 if (gl->gl_state == gh->gh_state ||
892 gl->gl_state == LM_ST_UNLOCKED)
893 gh->gh_error = 0;
894 else {
895 if (gfs2_assert_warn(sdp, gh->gh_flags &
896 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
897 fs_warn(sdp, "ret = 0x%.8X\n", ret);
898 gh->gh_error = GLR_TRYFAILED;
899 }
900 spin_unlock(&gl->gl_spin);
901
902 if (ret & LM_OUT_CANCELED)
903 handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
904
905 } else if (ret & LM_OUT_CANCELED) {
906 spin_lock(&gl->gl_spin);
907 list_del_init(&gh->gh_list);
908 gh->gh_error = GLR_CANCELED;
909 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
910 do_unrecurse(gh);
911 spin_unlock(&gl->gl_spin);
912
913 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
914 spin_lock(&gl->gl_spin);
915 list_move_tail(&gh->gh_list, &gl->gl_holders);
916 gh->gh_error = 0;
917 set_bit(HIF_HOLDER, &gh->gh_iflags);
918 spin_unlock(&gl->gl_spin);
919
920 set_bit(HIF_FIRST, &gh->gh_iflags);
921
922 op_done = 0;
923
924 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
925 spin_lock(&gl->gl_spin);
926 list_del_init(&gh->gh_list);
927 gh->gh_error = GLR_TRYFAILED;
928 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
929 do_unrecurse(gh);
930 spin_unlock(&gl->gl_spin);
931
932 } else {
933 if (gfs2_assert_withdraw(sdp, 0) == -1)
934 fs_err(sdp, "ret = 0x%.8X\n", ret);
935 }
936
937 if (glops->go_xmote_bh)
938 glops->go_xmote_bh(gl);
939
940 if (op_done) {
941 spin_lock(&gl->gl_spin);
942 gl->gl_req_gh = NULL;
943 gl->gl_req_bh = NULL;
944 clear_bit(GLF_LOCK, &gl->gl_flags);
945 run_queue(gl);
946 spin_unlock(&gl->gl_spin);
947 }
948
949 gfs2_glock_put(gl);
950
951 if (gh) {
952 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
953 gfs2_holder_put(gh);
954 else
955 complete(&gh->gh_wait);
956 }
957}
958
959/**
960 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
961 * @gl: The glock in question
962 * @state: the requested state
963 * @flags: modifier flags to the lock call
964 *
965 */
966
967void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
968{
969 struct gfs2_sbd *sdp = gl->gl_sbd;
970 struct gfs2_glock_operations *glops = gl->gl_ops;
971 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
972 LM_FLAG_NOEXP | LM_FLAG_ANY |
973 LM_FLAG_PRIORITY);
974 unsigned int lck_ret;
975
976 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
977 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
978 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
979 gfs2_assert_warn(sdp, state != gl->gl_state);
980
981 if (gl->gl_state == LM_ST_EXCLUSIVE) {
982 if (glops->go_sync)
983 glops->go_sync(gl,
984 DIO_METADATA | DIO_DATA | DIO_RELEASE);
985 }
986
987 gfs2_glock_hold(gl);
988 gl->gl_req_bh = xmote_bh;
989
David Teiglandb3b94fa2006-01-16 16:50:04 +0000990 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
991 lck_flags);
992
993 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
994 return;
995
996 if (lck_ret & LM_OUT_ASYNC)
997 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
998 else
999 xmote_bh(gl, lck_ret);
1000}
1001
1002/**
1003 * drop_bh - Called after a lock module unlock completes
1004 * @gl: the glock
1005 * @ret: the return status
1006 *
1007 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1008 * Doesn't drop the reference on the glock the top half took out
1009 *
1010 */
1011
1012static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1013{
1014 struct gfs2_sbd *sdp = gl->gl_sbd;
1015 struct gfs2_glock_operations *glops = gl->gl_ops;
1016 struct gfs2_holder *gh = gl->gl_req_gh;
1017
1018 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1019
1020 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1021 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1022 gfs2_assert_warn(sdp, !ret);
1023
1024 state_change(gl, LM_ST_UNLOCKED);
1025
1026 if (glops->go_inval)
1027 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
1028
1029 if (gh) {
1030 spin_lock(&gl->gl_spin);
1031 list_del_init(&gh->gh_list);
1032 gh->gh_error = 0;
1033 spin_unlock(&gl->gl_spin);
1034 }
1035
1036 if (glops->go_drop_bh)
1037 glops->go_drop_bh(gl);
1038
1039 spin_lock(&gl->gl_spin);
1040 gl->gl_req_gh = NULL;
1041 gl->gl_req_bh = NULL;
1042 clear_bit(GLF_LOCK, &gl->gl_flags);
1043 run_queue(gl);
1044 spin_unlock(&gl->gl_spin);
1045
1046 gfs2_glock_put(gl);
1047
1048 if (gh) {
1049 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1050 gfs2_holder_put(gh);
1051 else
1052 complete(&gh->gh_wait);
1053 }
1054}
1055
1056/**
1057 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1058 * @gl: the glock
1059 *
1060 */
1061
1062void gfs2_glock_drop_th(struct gfs2_glock *gl)
1063{
1064 struct gfs2_sbd *sdp = gl->gl_sbd;
1065 struct gfs2_glock_operations *glops = gl->gl_ops;
1066 unsigned int ret;
1067
1068 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1069 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1070 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1071
1072 if (gl->gl_state == LM_ST_EXCLUSIVE) {
1073 if (glops->go_sync)
1074 glops->go_sync(gl,
1075 DIO_METADATA | DIO_DATA | DIO_RELEASE);
1076 }
1077
1078 gfs2_glock_hold(gl);
1079 gl->gl_req_bh = drop_bh;
1080
David Teiglandb3b94fa2006-01-16 16:50:04 +00001081 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1082
1083 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1084 return;
1085
1086 if (!ret)
1087 drop_bh(gl, ret);
1088 else
1089 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1090}
1091
1092/**
1093 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1094 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1095 *
1096 * Don't cancel GL_NOCANCEL requests.
1097 */
1098
1099static void do_cancels(struct gfs2_holder *gh)
1100{
1101 struct gfs2_glock *gl = gh->gh_gl;
1102
1103 spin_lock(&gl->gl_spin);
1104
1105 while (gl->gl_req_gh != gh &&
1106 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1107 !list_empty(&gh->gh_list)) {
1108 if (gl->gl_req_bh &&
1109 !(gl->gl_req_gh &&
1110 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1111 spin_unlock(&gl->gl_spin);
1112 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1113 msleep(100);
1114 spin_lock(&gl->gl_spin);
1115 } else {
1116 spin_unlock(&gl->gl_spin);
1117 msleep(100);
1118 spin_lock(&gl->gl_spin);
1119 }
1120 }
1121
1122 spin_unlock(&gl->gl_spin);
1123}
1124
1125/**
1126 * glock_wait_internal - wait on a glock acquisition
1127 * @gh: the glock holder
1128 *
1129 * Returns: 0 on success
1130 */
1131
1132static int glock_wait_internal(struct gfs2_holder *gh)
1133{
1134 struct gfs2_glock *gl = gh->gh_gl;
1135 struct gfs2_sbd *sdp = gl->gl_sbd;
1136 struct gfs2_glock_operations *glops = gl->gl_ops;
1137
1138 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1139 return -EIO;
1140
1141 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1142 spin_lock(&gl->gl_spin);
1143 if (gl->gl_req_gh != gh &&
1144 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1145 !list_empty(&gh->gh_list)) {
1146 list_del_init(&gh->gh_list);
1147 gh->gh_error = GLR_TRYFAILED;
1148 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1149 do_unrecurse(gh);
1150 run_queue(gl);
1151 spin_unlock(&gl->gl_spin);
1152 return gh->gh_error;
1153 }
1154 spin_unlock(&gl->gl_spin);
1155 }
1156
1157 if (gh->gh_flags & LM_FLAG_PRIORITY)
1158 do_cancels(gh);
1159
1160 wait_for_completion(&gh->gh_wait);
1161
1162 if (gh->gh_error)
1163 return gh->gh_error;
1164
1165 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1166 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
1167 gh->gh_state,
1168 gh->gh_flags));
1169
1170 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1171 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1172
1173 if (glops->go_lock) {
1174 gh->gh_error = glops->go_lock(gh);
1175 if (gh->gh_error) {
1176 spin_lock(&gl->gl_spin);
1177 list_del_init(&gh->gh_list);
1178 if (test_and_clear_bit(HIF_RECURSE,
1179 &gh->gh_iflags))
1180 do_unrecurse(gh);
1181 spin_unlock(&gl->gl_spin);
1182 }
1183 }
1184
1185 spin_lock(&gl->gl_spin);
1186 gl->gl_req_gh = NULL;
1187 gl->gl_req_bh = NULL;
1188 clear_bit(GLF_LOCK, &gl->gl_flags);
1189 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1190 handle_recurse(gh);
1191 run_queue(gl);
1192 spin_unlock(&gl->gl_spin);
1193 }
1194
1195 return gh->gh_error;
1196}
1197
1198static inline struct gfs2_holder *
1199find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1200{
1201 struct gfs2_holder *gh;
1202
1203 list_for_each_entry(gh, head, gh_list) {
1204 if (gh->gh_owner == owner)
1205 return gh;
1206 }
1207
1208 return NULL;
1209}
1210
1211/**
1212 * recurse_check -
1213 *
1214 * Make sure the new holder is compatible with the pre-existing one.
1215 *
1216 */
1217
1218static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new,
1219 unsigned int state)
1220{
1221 struct gfs2_sbd *sdp = existing->gh_gl->gl_sbd;
1222
1223 if (gfs2_assert_warn(sdp, (new->gh_flags & LM_FLAG_ANY) ||
1224 !(existing->gh_flags & LM_FLAG_ANY)))
1225 goto fail;
1226
1227 if (gfs2_assert_warn(sdp, (existing->gh_flags & GL_LOCAL_EXCL) ||
1228 !(new->gh_flags & GL_LOCAL_EXCL)))
1229 goto fail;
1230
1231 if (gfs2_assert_warn(sdp, relaxed_state_ok(state, new->gh_state,
1232 new->gh_flags)))
1233 goto fail;
1234
1235 return 0;
1236
1237 fail:
1238 set_bit(HIF_ABORTED, &new->gh_iflags);
1239 return -EINVAL;
1240}
1241
1242/**
1243 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1244 * @gh: the holder structure to add
1245 *
1246 */
1247
1248static void add_to_queue(struct gfs2_holder *gh)
1249{
1250 struct gfs2_glock *gl = gh->gh_gl;
1251 struct gfs2_holder *existing;
1252
1253 if (!gh->gh_owner)
1254 goto out;
1255
1256 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1257 if (existing) {
1258 if (recurse_check(existing, gh, gl->gl_state))
1259 return;
1260
1261 list_add_tail(&gh->gh_list, &gl->gl_holders);
1262 set_bit(HIF_HOLDER, &gh->gh_iflags);
1263
1264 gh->gh_error = 0;
1265 complete(&gh->gh_wait);
1266
1267 return;
1268 }
1269
1270 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1271 if (existing) {
1272 if (recurse_check(existing, gh, existing->gh_state))
1273 return;
1274
1275 set_bit(HIF_RECURSE, &gh->gh_iflags);
1276 set_bit(HIF_RECURSE, &existing->gh_iflags);
1277
1278 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1279
1280 return;
1281 }
1282
1283 out:
1284 if (gh->gh_flags & LM_FLAG_PRIORITY)
1285 list_add(&gh->gh_list, &gl->gl_waiters3);
1286 else
1287 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1288}
1289
1290/**
1291 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1292 * @gh: the holder structure
1293 *
1294 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1295 *
1296 * Returns: 0, GLR_TRYFAILED, or errno on failure
1297 */
1298
1299int gfs2_glock_nq(struct gfs2_holder *gh)
1300{
1301 struct gfs2_glock *gl = gh->gh_gl;
1302 struct gfs2_sbd *sdp = gl->gl_sbd;
1303 int error = 0;
1304
David Teiglandb3b94fa2006-01-16 16:50:04 +00001305 restart:
1306 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1307 set_bit(HIF_ABORTED, &gh->gh_iflags);
1308 return -EIO;
1309 }
1310
1311 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1312
1313 spin_lock(&gl->gl_spin);
1314 add_to_queue(gh);
1315 run_queue(gl);
1316 spin_unlock(&gl->gl_spin);
1317
1318 if (!(gh->gh_flags & GL_ASYNC)) {
1319 error = glock_wait_internal(gh);
1320 if (error == GLR_CANCELED) {
1321 msleep(1000);
1322 goto restart;
1323 }
1324 }
1325
1326 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1327
1328 return error;
1329}
1330
1331/**
1332 * gfs2_glock_poll - poll to see if an async request has been completed
1333 * @gh: the holder
1334 *
1335 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1336 */
1337
1338int gfs2_glock_poll(struct gfs2_holder *gh)
1339{
1340 struct gfs2_glock *gl = gh->gh_gl;
1341 int ready = 0;
1342
1343 spin_lock(&gl->gl_spin);
1344
1345 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1346 ready = 1;
1347 else if (list_empty(&gh->gh_list)) {
1348 if (gh->gh_error == GLR_CANCELED) {
1349 spin_unlock(&gl->gl_spin);
1350 msleep(1000);
1351 if (gfs2_glock_nq(gh))
1352 return 1;
1353 return 0;
1354 } else
1355 ready = 1;
1356 }
1357
1358 spin_unlock(&gl->gl_spin);
1359
1360 return ready;
1361}
1362
1363/**
1364 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1365 * @gh: the holder structure
1366 *
1367 * Returns: 0, GLR_TRYFAILED, or errno on failure
1368 */
1369
1370int gfs2_glock_wait(struct gfs2_holder *gh)
1371{
1372 int error;
1373
1374 error = glock_wait_internal(gh);
1375 if (error == GLR_CANCELED) {
1376 msleep(1000);
1377 gh->gh_flags &= ~GL_ASYNC;
1378 error = gfs2_glock_nq(gh);
1379 }
1380
1381 return error;
1382}
1383
1384/**
1385 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1386 * @gh: the glock holder
1387 *
1388 */
1389
1390void gfs2_glock_dq(struct gfs2_holder *gh)
1391{
1392 struct gfs2_glock *gl = gh->gh_gl;
1393 struct gfs2_sbd *sdp = gl->gl_sbd;
1394 struct gfs2_glock_operations *glops = gl->gl_ops;
1395
David Teiglandb3b94fa2006-01-16 16:50:04 +00001396 if (gh->gh_flags & GL_SYNC)
1397 set_bit(GLF_SYNC, &gl->gl_flags);
1398
1399 if (gh->gh_flags & GL_NOCACHE)
1400 handle_callback(gl, LM_ST_UNLOCKED);
1401
1402 gfs2_glmutex_lock(gl);
1403
1404 spin_lock(&gl->gl_spin);
1405 list_del_init(&gh->gh_list);
1406
1407 if (list_empty(&gl->gl_holders)) {
1408 spin_unlock(&gl->gl_spin);
1409
1410 if (glops->go_unlock)
1411 glops->go_unlock(gh);
1412
1413 if (test_bit(GLF_SYNC, &gl->gl_flags)) {
1414 if (glops->go_sync)
1415 glops->go_sync(gl, DIO_METADATA | DIO_DATA);
1416 }
1417
1418 gl->gl_stamp = jiffies;
1419
1420 spin_lock(&gl->gl_spin);
1421 }
1422
1423 clear_bit(GLF_LOCK, &gl->gl_flags);
1424 run_queue(gl);
1425 spin_unlock(&gl->gl_spin);
1426}
1427
1428/**
1429 * gfs2_glock_prefetch - Try to prefetch a glock
1430 * @gl: the glock
1431 * @state: the state to prefetch in
1432 * @flags: flags passed to go_xmote_th()
1433 *
1434 */
1435
1436void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, int flags)
1437{
1438 struct gfs2_glock_operations *glops = gl->gl_ops;
1439
1440 spin_lock(&gl->gl_spin);
1441
1442 if (test_bit(GLF_LOCK, &gl->gl_flags) ||
1443 !list_empty(&gl->gl_holders) ||
1444 !list_empty(&gl->gl_waiters1) ||
1445 !list_empty(&gl->gl_waiters2) ||
1446 !list_empty(&gl->gl_waiters3) ||
1447 relaxed_state_ok(gl->gl_state, state, flags)) {
1448 spin_unlock(&gl->gl_spin);
1449 return;
1450 }
1451
1452 set_bit(GLF_PREFETCH, &gl->gl_flags);
1453 set_bit(GLF_LOCK, &gl->gl_flags);
1454 spin_unlock(&gl->gl_spin);
1455
1456 glops->go_xmote_th(gl, state, flags);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001457}
1458
1459/**
1460 * gfs2_glock_force_drop - Force a glock to be uncached
1461 * @gl: the glock
1462 *
1463 */
1464
1465void gfs2_glock_force_drop(struct gfs2_glock *gl)
1466{
1467 struct gfs2_holder gh;
1468
1469 gfs2_holder_init(gl, LM_ST_UNLOCKED, GL_NEVER_RECURSE, &gh);
1470 set_bit(HIF_DEMOTE, &gh.gh_iflags);
1471
1472 spin_lock(&gl->gl_spin);
1473 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
1474 run_queue(gl);
1475 spin_unlock(&gl->gl_spin);
1476
1477 wait_for_completion(&gh.gh_wait);
1478 gfs2_holder_uninit(&gh);
1479}
1480
1481static void greedy_work(void *data)
1482{
1483 struct greedy *gr = (struct greedy *)data;
1484 struct gfs2_holder *gh = &gr->gr_gh;
1485 struct gfs2_glock *gl = gh->gh_gl;
1486 struct gfs2_glock_operations *glops = gl->gl_ops;
1487
1488 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1489
1490 if (glops->go_greedy)
1491 glops->go_greedy(gl);
1492
1493 spin_lock(&gl->gl_spin);
1494
1495 if (list_empty(&gl->gl_waiters2)) {
1496 clear_bit(GLF_GREEDY, &gl->gl_flags);
1497 spin_unlock(&gl->gl_spin);
1498 gfs2_holder_uninit(gh);
1499 kfree(gr);
1500 } else {
1501 gfs2_glock_hold(gl);
1502 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1503 run_queue(gl);
1504 spin_unlock(&gl->gl_spin);
1505 gfs2_glock_put(gl);
1506 }
1507}
1508
1509/**
1510 * gfs2_glock_be_greedy -
1511 * @gl:
1512 * @time:
1513 *
1514 * Returns: 0 if go_greedy will be called, 1 otherwise
1515 */
1516
1517int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1518{
1519 struct greedy *gr;
1520 struct gfs2_holder *gh;
1521
1522 if (!time ||
1523 gl->gl_sbd->sd_args.ar_localcaching ||
1524 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1525 return 1;
1526
1527 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1528 if (!gr) {
1529 clear_bit(GLF_GREEDY, &gl->gl_flags);
1530 return 1;
1531 }
1532 gh = &gr->gr_gh;
1533
1534 gfs2_holder_init(gl, 0, GL_NEVER_RECURSE, gh);
1535 set_bit(HIF_GREEDY, &gh->gh_iflags);
1536 INIT_WORK(&gr->gr_work, greedy_work, gr);
1537
1538 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1539 schedule_delayed_work(&gr->gr_work, time);
1540
1541 return 0;
1542}
1543
1544/**
1545 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
1546 * @gl: the glock
1547 * @state: the state we're requesting
1548 * @flags: the modifier flags
1549 * @gh: the holder structure
1550 *
1551 * Returns: 0, GLR_*, or errno
1552 */
1553
1554int gfs2_glock_nq_init(struct gfs2_glock *gl, unsigned int state, int flags,
1555 struct gfs2_holder *gh)
1556{
1557 int error;
1558
1559 gfs2_holder_init(gl, state, flags, gh);
1560
1561 error = gfs2_glock_nq(gh);
1562 if (error)
1563 gfs2_holder_uninit(gh);
1564
1565 return error;
1566}
1567
1568/**
1569 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1570 * @gh: the holder structure
1571 *
1572 */
1573
1574void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1575{
1576 gfs2_glock_dq(gh);
1577 gfs2_holder_uninit(gh);
1578}
1579
1580/**
1581 * gfs2_glock_nq_num - acquire a glock based on lock number
1582 * @sdp: the filesystem
1583 * @number: the lock number
1584 * @glops: the glock operations for the type of glock
1585 * @state: the state to acquire the glock in
1586 * @flags: modifier flags for the aquisition
1587 * @gh: the struct gfs2_holder
1588 *
1589 * Returns: errno
1590 */
1591
1592int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
1593 struct gfs2_glock_operations *glops, unsigned int state,
1594 int flags, struct gfs2_holder *gh)
1595{
1596 struct gfs2_glock *gl;
1597 int error;
1598
1599 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1600 if (!error) {
1601 error = gfs2_glock_nq_init(gl, state, flags, gh);
1602 gfs2_glock_put(gl);
1603 }
1604
1605 return error;
1606}
1607
1608/**
1609 * glock_compare - Compare two struct gfs2_glock structures for sorting
1610 * @arg_a: the first structure
1611 * @arg_b: the second structure
1612 *
1613 */
1614
1615static int glock_compare(const void *arg_a, const void *arg_b)
1616{
1617 struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1618 struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1619 struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1620 struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1621 int ret = 0;
1622
1623 if (a->ln_number > b->ln_number)
1624 ret = 1;
1625 else if (a->ln_number < b->ln_number)
1626 ret = -1;
1627 else {
1628 if (gh_a->gh_state == LM_ST_SHARED &&
1629 gh_b->gh_state == LM_ST_EXCLUSIVE)
1630 ret = 1;
1631 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1632 (gh_b->gh_flags & GL_LOCAL_EXCL))
1633 ret = 1;
1634 }
1635
1636 return ret;
1637}
1638
1639/**
1640 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1641 * @num_gh: the number of structures
1642 * @ghs: an array of struct gfs2_holder structures
1643 *
1644 * Returns: 0 on success (all glocks acquired),
1645 * errno on failure (no glocks acquired)
1646 */
1647
1648static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1649 struct gfs2_holder **p)
1650{
1651 unsigned int x;
1652 int error = 0;
1653
1654 for (x = 0; x < num_gh; x++)
1655 p[x] = &ghs[x];
1656
1657 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1658
1659 for (x = 0; x < num_gh; x++) {
1660 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1661
1662 error = gfs2_glock_nq(p[x]);
1663 if (error) {
1664 while (x--)
1665 gfs2_glock_dq(p[x]);
1666 break;
1667 }
1668 }
1669
1670 return error;
1671}
1672
1673/**
1674 * gfs2_glock_nq_m - acquire multiple glocks
1675 * @num_gh: the number of structures
1676 * @ghs: an array of struct gfs2_holder structures
1677 *
1678 * Figure out how big an impact this function has. Either:
1679 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1680 * 2) Forget async stuff and just call nq_m_sync()
1681 * 3) Leave it like it is
1682 *
1683 * Returns: 0 on success (all glocks acquired),
1684 * errno on failure (no glocks acquired)
1685 */
1686
1687int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1688{
1689 int *e;
1690 unsigned int x;
1691 int borked = 0, serious = 0;
1692 int error = 0;
1693
1694 if (!num_gh)
1695 return 0;
1696
1697 if (num_gh == 1) {
1698 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1699 return gfs2_glock_nq(ghs);
1700 }
1701
1702 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1703 if (!e)
1704 return -ENOMEM;
1705
1706 for (x = 0; x < num_gh; x++) {
1707 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1708 error = gfs2_glock_nq(&ghs[x]);
1709 if (error) {
1710 borked = 1;
1711 serious = error;
1712 num_gh = x;
1713 break;
1714 }
1715 }
1716
1717 for (x = 0; x < num_gh; x++) {
1718 error = e[x] = glock_wait_internal(&ghs[x]);
1719 if (error) {
1720 borked = 1;
1721 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1722 serious = error;
1723 }
1724 }
1725
1726 if (!borked) {
1727 kfree(e);
1728 return 0;
1729 }
1730
1731 for (x = 0; x < num_gh; x++)
1732 if (!e[x])
1733 gfs2_glock_dq(&ghs[x]);
1734
1735 if (serious)
1736 error = serious;
1737 else {
1738 for (x = 0; x < num_gh; x++)
1739 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1740 &ghs[x]);
1741 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1742 }
1743
1744 kfree(e);
1745
1746 return error;
1747}
1748
1749/**
1750 * gfs2_glock_dq_m - release multiple glocks
1751 * @num_gh: the number of structures
1752 * @ghs: an array of struct gfs2_holder structures
1753 *
1754 */
1755
1756void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1757{
1758 unsigned int x;
1759
1760 for (x = 0; x < num_gh; x++)
1761 gfs2_glock_dq(&ghs[x]);
1762}
1763
1764/**
1765 * gfs2_glock_dq_uninit_m - release multiple glocks
1766 * @num_gh: the number of structures
1767 * @ghs: an array of struct gfs2_holder structures
1768 *
1769 */
1770
1771void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1772{
1773 unsigned int x;
1774
1775 for (x = 0; x < num_gh; x++)
1776 gfs2_glock_dq_uninit(&ghs[x]);
1777}
1778
1779/**
1780 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1781 * @sdp: the filesystem
1782 * @number: the lock number
1783 * @glops: the glock operations for the type of glock
1784 * @state: the state to acquire the glock in
1785 * @flags: modifier flags for the aquisition
1786 *
1787 * Returns: errno
1788 */
1789
1790void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
1791 struct gfs2_glock_operations *glops,
1792 unsigned int state, int flags)
1793{
1794 struct gfs2_glock *gl;
1795 int error;
1796
1797 if (atomic_read(&sdp->sd_reclaim_count) <
1798 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1799 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1800 if (!error) {
1801 gfs2_glock_prefetch(gl, state, flags);
1802 gfs2_glock_put(gl);
1803 }
1804 }
1805}
1806
1807/**
1808 * gfs2_lvb_hold - attach a LVB from a glock
1809 * @gl: The glock in question
1810 *
1811 */
1812
1813int gfs2_lvb_hold(struct gfs2_glock *gl)
1814{
1815 int error;
1816
1817 gfs2_glmutex_lock(gl);
1818
1819 if (!atomic_read(&gl->gl_lvb_count)) {
1820 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1821 if (error) {
1822 gfs2_glmutex_unlock(gl);
1823 return error;
1824 }
1825 gfs2_glock_hold(gl);
1826 }
1827 atomic_inc(&gl->gl_lvb_count);
1828
1829 gfs2_glmutex_unlock(gl);
1830
1831 return 0;
1832}
1833
1834/**
1835 * gfs2_lvb_unhold - detach a LVB from a glock
1836 * @gl: The glock in question
1837 *
1838 */
1839
1840void gfs2_lvb_unhold(struct gfs2_glock *gl)
1841{
1842 gfs2_glock_hold(gl);
1843 gfs2_glmutex_lock(gl);
1844
1845 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1846 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1847 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1848 gl->gl_lvb = NULL;
1849 gfs2_glock_put(gl);
1850 }
1851
1852 gfs2_glmutex_unlock(gl);
1853 gfs2_glock_put(gl);
1854}
1855
1856void gfs2_lvb_sync(struct gfs2_glock *gl)
1857{
1858 gfs2_glmutex_lock(gl);
1859
1860 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
1861 if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
1862 gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1863
1864 gfs2_glmutex_unlock(gl);
1865}
1866
1867static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1868 unsigned int state)
1869{
1870 struct gfs2_glock *gl;
1871
1872 gl = gfs2_glock_find(sdp, name);
1873 if (!gl)
1874 return;
1875
1876 if (gl->gl_ops->go_callback)
1877 gl->gl_ops->go_callback(gl, state);
1878 handle_callback(gl, state);
1879
1880 spin_lock(&gl->gl_spin);
1881 run_queue(gl);
1882 spin_unlock(&gl->gl_spin);
1883
1884 gfs2_glock_put(gl);
1885}
1886
1887/**
1888 * gfs2_glock_cb - Callback used by locking module
1889 * @fsdata: Pointer to the superblock
1890 * @type: Type of callback
1891 * @data: Type dependent data pointer
1892 *
1893 * Called by the locking module when it wants to tell us something.
1894 * Either we need to drop a lock, one of our ASYNC requests completed, or
1895 * a journal from another client needs to be recovered.
1896 */
1897
1898void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
1899{
1900 struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
1901
David Teiglandb3b94fa2006-01-16 16:50:04 +00001902 switch (type) {
1903 case LM_CB_NEED_E:
1904 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_UNLOCKED);
1905 return;
1906
1907 case LM_CB_NEED_D:
1908 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_DEFERRED);
1909 return;
1910
1911 case LM_CB_NEED_S:
1912 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_SHARED);
1913 return;
1914
1915 case LM_CB_ASYNC: {
1916 struct lm_async_cb *async = (struct lm_async_cb *)data;
1917 struct gfs2_glock *gl;
1918
1919 gl = gfs2_glock_find(sdp, &async->lc_name);
1920 if (gfs2_assert_warn(sdp, gl))
1921 return;
1922 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1923 gl->gl_req_bh(gl, async->lc_ret);
1924 gfs2_glock_put(gl);
1925
1926 return;
1927 }
1928
1929 case LM_CB_NEED_RECOVERY:
1930 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1931 if (sdp->sd_recoverd_process)
1932 wake_up_process(sdp->sd_recoverd_process);
1933 return;
1934
1935 case LM_CB_DROPLOCKS:
1936 gfs2_gl_hash_clear(sdp, NO_WAIT);
1937 gfs2_quota_scan(sdp);
1938 return;
1939
1940 default:
1941 gfs2_assert_warn(sdp, 0);
1942 return;
1943 }
1944}
1945
1946/**
1947 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1948 * sdp: the filesystem
1949 * inum: the inode number
1950 *
1951 */
1952
1953void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
1954{
1955 struct gfs2_glock *gl;
1956 struct gfs2_inode *ip;
1957 int error;
1958
1959 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
1960 NO_CREATE, &gl);
1961 if (error || !gl)
1962 return;
1963
1964 if (!gfs2_glmutex_trylock(gl))
1965 goto out;
1966
1967 ip = get_gl2ip(gl);
1968 if (!ip)
1969 goto out_unlock;
1970
1971 if (atomic_read(&ip->i_count))
1972 goto out_unlock;
1973
1974 gfs2_inode_destroy(ip);
1975
1976 out_unlock:
1977 gfs2_glmutex_unlock(gl);
1978
1979 out:
1980 gfs2_glock_put(gl);
1981}
1982
1983/**
1984 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1985 * iopen glock from memory
1986 * @io_gl: the iopen glock
1987 * @state: the state into which the glock should be put
1988 *
1989 */
1990
1991void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1992{
1993 struct gfs2_glock *i_gl;
1994
1995 if (state != LM_ST_UNLOCKED)
1996 return;
1997
1998 spin_lock(&io_gl->gl_spin);
1999 i_gl = get_gl2gl(io_gl);
2000 if (i_gl) {
2001 gfs2_glock_hold(i_gl);
2002 spin_unlock(&io_gl->gl_spin);
2003 } else {
2004 spin_unlock(&io_gl->gl_spin);
2005 return;
2006 }
2007
2008 if (gfs2_glmutex_trylock(i_gl)) {
2009 struct gfs2_inode *ip = get_gl2ip(i_gl);
2010 if (ip) {
2011 gfs2_try_toss_vnode(ip);
2012 gfs2_glmutex_unlock(i_gl);
2013 gfs2_glock_schedule_for_reclaim(i_gl);
2014 goto out;
2015 }
2016 gfs2_glmutex_unlock(i_gl);
2017 }
2018
2019 out:
2020 gfs2_glock_put(i_gl);
2021}
2022
2023/**
2024 * demote_ok - Check to see if it's ok to unlock a glock
2025 * @gl: the glock
2026 *
2027 * Returns: 1 if it's ok
2028 */
2029
2030static int demote_ok(struct gfs2_glock *gl)
2031{
2032 struct gfs2_sbd *sdp = gl->gl_sbd;
2033 struct gfs2_glock_operations *glops = gl->gl_ops;
2034 int demote = 1;
2035
2036 if (test_bit(GLF_STICKY, &gl->gl_flags))
2037 demote = 0;
2038 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
2039 demote = time_after_eq(jiffies,
2040 gl->gl_stamp +
2041 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
2042 else if (glops->go_demote_ok)
2043 demote = glops->go_demote_ok(gl);
2044
2045 return demote;
2046}
2047
2048/**
2049 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2050 * @gl: the glock
2051 *
2052 */
2053
2054void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
2055{
2056 struct gfs2_sbd *sdp = gl->gl_sbd;
2057
2058 spin_lock(&sdp->sd_reclaim_lock);
2059 if (list_empty(&gl->gl_reclaim)) {
2060 gfs2_glock_hold(gl);
2061 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
2062 atomic_inc(&sdp->sd_reclaim_count);
2063 }
2064 spin_unlock(&sdp->sd_reclaim_lock);
2065
2066 wake_up(&sdp->sd_reclaim_wq);
2067}
2068
2069/**
2070 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2071 * @sdp: the filesystem
2072 *
2073 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2074 * different glock and we notice that there are a lot of glocks in the
2075 * reclaim list.
2076 *
2077 */
2078
2079void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
2080{
2081 struct gfs2_glock *gl;
2082
2083 spin_lock(&sdp->sd_reclaim_lock);
2084 if (list_empty(&sdp->sd_reclaim_list)) {
2085 spin_unlock(&sdp->sd_reclaim_lock);
2086 return;
2087 }
2088 gl = list_entry(sdp->sd_reclaim_list.next,
2089 struct gfs2_glock, gl_reclaim);
2090 list_del_init(&gl->gl_reclaim);
2091 spin_unlock(&sdp->sd_reclaim_lock);
2092
2093 atomic_dec(&sdp->sd_reclaim_count);
2094 atomic_inc(&sdp->sd_reclaimed);
2095
2096 if (gfs2_glmutex_trylock(gl)) {
2097 if (gl->gl_ops == &gfs2_inode_glops) {
2098 struct gfs2_inode *ip = get_gl2ip(gl);
2099 if (ip && !atomic_read(&ip->i_count))
2100 gfs2_inode_destroy(ip);
2101 }
2102 if (queue_empty(gl, &gl->gl_holders) &&
2103 gl->gl_state != LM_ST_UNLOCKED &&
2104 demote_ok(gl))
2105 handle_callback(gl, LM_ST_UNLOCKED);
2106 gfs2_glmutex_unlock(gl);
2107 }
2108
2109 gfs2_glock_put(gl);
2110}
2111
2112/**
2113 * examine_bucket - Call a function for glock in a hash bucket
2114 * @examiner: the function
2115 * @sdp: the filesystem
2116 * @bucket: the bucket
2117 *
2118 * Returns: 1 if the bucket has entries
2119 */
2120
2121static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
2122 struct gfs2_gl_hash_bucket *bucket)
2123{
2124 struct glock_plug plug;
2125 struct list_head *tmp;
2126 struct gfs2_glock *gl;
2127 int entries;
2128
2129 /* Add "plug" to end of bucket list, work back up list from there */
2130 memset(&plug.gl_flags, 0, sizeof(unsigned long));
2131 set_bit(GLF_PLUG, &plug.gl_flags);
2132
2133 write_lock(&bucket->hb_lock);
2134 list_add(&plug.gl_list, &bucket->hb_list);
2135 write_unlock(&bucket->hb_lock);
2136
2137 for (;;) {
2138 write_lock(&bucket->hb_lock);
2139
2140 for (;;) {
2141 tmp = plug.gl_list.next;
2142
2143 if (tmp == &bucket->hb_list) {
2144 list_del(&plug.gl_list);
2145 entries = !list_empty(&bucket->hb_list);
2146 write_unlock(&bucket->hb_lock);
2147 return entries;
2148 }
2149 gl = list_entry(tmp, struct gfs2_glock, gl_list);
2150
2151 /* Move plug up list */
2152 list_move(&plug.gl_list, &gl->gl_list);
2153
2154 if (test_bit(GLF_PLUG, &gl->gl_flags))
2155 continue;
2156
2157 /* examiner() must glock_put() */
2158 gfs2_glock_hold(gl);
2159
2160 break;
2161 }
2162
2163 write_unlock(&bucket->hb_lock);
2164
2165 examiner(gl);
2166 }
2167}
2168
2169/**
2170 * scan_glock - look at a glock and see if we can reclaim it
2171 * @gl: the glock to look at
2172 *
2173 */
2174
2175static void scan_glock(struct gfs2_glock *gl)
2176{
2177 if (gfs2_glmutex_trylock(gl)) {
2178 if (gl->gl_ops == &gfs2_inode_glops) {
2179 struct gfs2_inode *ip = get_gl2ip(gl);
2180 if (ip && !atomic_read(&ip->i_count))
2181 goto out_schedule;
2182 }
2183 if (queue_empty(gl, &gl->gl_holders) &&
2184 gl->gl_state != LM_ST_UNLOCKED &&
2185 demote_ok(gl))
2186 goto out_schedule;
2187
2188 gfs2_glmutex_unlock(gl);
2189 }
2190
2191 gfs2_glock_put(gl);
2192
2193 return;
2194
2195 out_schedule:
2196 gfs2_glmutex_unlock(gl);
2197 gfs2_glock_schedule_for_reclaim(gl);
2198 gfs2_glock_put(gl);
2199}
2200
2201/**
2202 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2203 * @sdp: the filesystem
2204 *
2205 */
2206
2207void gfs2_scand_internal(struct gfs2_sbd *sdp)
2208{
2209 unsigned int x;
2210
2211 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2212 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
2213 cond_resched();
2214 }
2215}
2216
2217/**
2218 * clear_glock - look at a glock and see if we can free it from glock cache
2219 * @gl: the glock to look at
2220 *
2221 */
2222
2223static void clear_glock(struct gfs2_glock *gl)
2224{
2225 struct gfs2_sbd *sdp = gl->gl_sbd;
2226 int released;
2227
2228 spin_lock(&sdp->sd_reclaim_lock);
2229 if (!list_empty(&gl->gl_reclaim)) {
2230 list_del_init(&gl->gl_reclaim);
2231 atomic_dec(&sdp->sd_reclaim_count);
2232 released = gfs2_glock_put(gl);
2233 gfs2_assert(sdp, !released);
2234 }
2235 spin_unlock(&sdp->sd_reclaim_lock);
2236
2237 if (gfs2_glmutex_trylock(gl)) {
2238 if (gl->gl_ops == &gfs2_inode_glops) {
2239 struct gfs2_inode *ip = get_gl2ip(gl);
2240 if (ip && !atomic_read(&ip->i_count))
2241 gfs2_inode_destroy(ip);
2242 }
2243 if (queue_empty(gl, &gl->gl_holders) &&
2244 gl->gl_state != LM_ST_UNLOCKED)
2245 handle_callback(gl, LM_ST_UNLOCKED);
2246
2247 gfs2_glmutex_unlock(gl);
2248 }
2249
2250 gfs2_glock_put(gl);
2251}
2252
2253/**
2254 * gfs2_gl_hash_clear - Empty out the glock hash table
2255 * @sdp: the filesystem
2256 * @wait: wait until it's all gone
2257 *
2258 * Called when unmounting the filesystem, or when inter-node lock manager
2259 * requests DROPLOCKS because it is running out of capacity.
2260 */
2261
2262void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2263{
2264 unsigned long t;
2265 unsigned int x;
2266 int cont;
2267
2268 t = jiffies;
2269
2270 for (;;) {
2271 cont = 0;
2272
2273 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2274 if (examine_bucket(clear_glock, sdp,
2275 &sdp->sd_gl_hash[x]))
2276 cont = 1;
2277
2278 if (!wait || !cont)
2279 break;
2280
2281 if (time_after_eq(jiffies,
2282 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2283 fs_warn(sdp, "Unmount seems to be stalled. "
2284 "Dumping lock state...\n");
2285 gfs2_dump_lockstate(sdp);
2286 t = jiffies;
2287 }
2288
2289 /* invalidate_inodes() requires that the sb inodes list
2290 not change, but an async completion callback for an
2291 unlock can occur which does glock_put() which
2292 can call iput() which will change the sb inodes list.
2293 invalidate_inodes_mutex prevents glock_put()'s during
2294 an invalidate_inodes() */
2295
Steven Whitehousef55ab262006-02-21 12:51:39 +00002296 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002297 invalidate_inodes(sdp->sd_vfs);
Steven Whitehousef55ab262006-02-21 12:51:39 +00002298 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002299 yield();
2300 }
2301}
2302
2303/*
2304 * Diagnostic routines to help debug distributed deadlock
2305 */
2306
2307/**
2308 * dump_holder - print information about a glock holder
2309 * @str: a string naming the type of holder
2310 * @gh: the glock holder
2311 *
2312 * Returns: 0 on success, -ENOBUFS when we run out of space
2313 */
2314
2315static int dump_holder(char *str, struct gfs2_holder *gh)
2316{
2317 unsigned int x;
2318 int error = -ENOBUFS;
2319
2320 printk(" %s\n", str);
2321 printk(" owner = %ld\n",
2322 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2323 printk(" gh_state = %u\n", gh->gh_state);
2324 printk(" gh_flags =");
2325 for (x = 0; x < 32; x++)
2326 if (gh->gh_flags & (1 << x))
2327 printk(" %u", x);
2328 printk(" \n");
2329 printk(" error = %d\n", gh->gh_error);
2330 printk(" gh_iflags =");
2331 for (x = 0; x < 32; x++)
2332 if (test_bit(x, &gh->gh_iflags))
2333 printk(" %u", x);
2334 printk(" \n");
2335
2336 error = 0;
2337
2338 return error;
2339}
2340
2341/**
2342 * dump_inode - print information about an inode
2343 * @ip: the inode
2344 *
2345 * Returns: 0 on success, -ENOBUFS when we run out of space
2346 */
2347
2348static int dump_inode(struct gfs2_inode *ip)
2349{
2350 unsigned int x;
2351 int error = -ENOBUFS;
2352
2353 printk(" Inode:\n");
2354 printk(" num = %llu %llu\n",
2355 ip->i_num.no_formal_ino, ip->i_num.no_addr);
2356 printk(" type = %u\n", IF2DT(ip->i_di.di_mode));
2357 printk(" i_count = %d\n", atomic_read(&ip->i_count));
2358 printk(" i_flags =");
2359 for (x = 0; x < 32; x++)
2360 if (test_bit(x, &ip->i_flags))
2361 printk(" %u", x);
2362 printk(" \n");
2363 printk(" vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
2364
2365 error = 0;
2366
2367 return error;
2368}
2369
2370/**
2371 * dump_glock - print information about a glock
2372 * @gl: the glock
2373 * @count: where we are in the buffer
2374 *
2375 * Returns: 0 on success, -ENOBUFS when we run out of space
2376 */
2377
2378static int dump_glock(struct gfs2_glock *gl)
2379{
2380 struct gfs2_holder *gh;
2381 unsigned int x;
2382 int error = -ENOBUFS;
2383
2384 spin_lock(&gl->gl_spin);
2385
2386 printk("Glock (%u, %llu)\n",
2387 gl->gl_name.ln_type,
2388 gl->gl_name.ln_number);
2389 printk(" gl_flags =");
2390 for (x = 0; x < 32; x++)
2391 if (test_bit(x, &gl->gl_flags))
2392 printk(" %u", x);
2393 printk(" \n");
2394 printk(" gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2395 printk(" gl_state = %u\n", gl->gl_state);
2396 printk(" req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2397 printk(" req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2398 printk(" lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2399 printk(" object = %s\n", (gl->gl_object) ? "yes" : "no");
2400 printk(" le = %s\n",
2401 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2402 printk(" reclaim = %s\n",
2403 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2404 if (gl->gl_aspace)
2405 printk(" aspace = %lu\n",
2406 gl->gl_aspace->i_mapping->nrpages);
2407 else
2408 printk(" aspace = no\n");
2409 printk(" ail = %d\n", atomic_read(&gl->gl_ail_count));
2410 if (gl->gl_req_gh) {
2411 error = dump_holder("Request", gl->gl_req_gh);
2412 if (error)
2413 goto out;
2414 }
2415 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2416 error = dump_holder("Holder", gh);
2417 if (error)
2418 goto out;
2419 }
2420 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2421 error = dump_holder("Waiter1", gh);
2422 if (error)
2423 goto out;
2424 }
2425 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2426 error = dump_holder("Waiter2", gh);
2427 if (error)
2428 goto out;
2429 }
2430 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2431 error = dump_holder("Waiter3", gh);
2432 if (error)
2433 goto out;
2434 }
2435 if (gl->gl_ops == &gfs2_inode_glops && get_gl2ip(gl)) {
2436 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2437 list_empty(&gl->gl_holders)) {
2438 error = dump_inode(get_gl2ip(gl));
2439 if (error)
2440 goto out;
2441 } else {
2442 error = -ENOBUFS;
2443 printk(" Inode: busy\n");
2444 }
2445 }
2446
2447 error = 0;
2448
2449 out:
2450 spin_unlock(&gl->gl_spin);
2451
2452 return error;
2453}
2454
2455/**
2456 * gfs2_dump_lockstate - print out the current lockstate
2457 * @sdp: the filesystem
2458 * @ub: the buffer to copy the information into
2459 *
2460 * If @ub is NULL, dump the lockstate to the console.
2461 *
2462 */
2463
2464int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2465{
2466 struct gfs2_gl_hash_bucket *bucket;
2467 struct gfs2_glock *gl;
2468 unsigned int x;
2469 int error = 0;
2470
2471 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2472 bucket = &sdp->sd_gl_hash[x];
2473
2474 read_lock(&bucket->hb_lock);
2475
2476 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2477 if (test_bit(GLF_PLUG, &gl->gl_flags))
2478 continue;
2479
2480 error = dump_glock(gl);
2481 if (error)
2482 break;
2483 }
2484
2485 read_unlock(&bucket->hb_lock);
2486
2487 if (error)
2488 break;
2489 }
2490
2491
2492 return error;
2493}
2494