| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved. | 
|  | 3 | * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved. | 
|  | 4 | * | 
|  | 5 | * This copyrighted material is made available to anyone wishing to use, | 
|  | 6 | * modify, copy, or redistribute it subject to the terms and conditions | 
|  | 7 | * of the GNU General Public License v.2. | 
|  | 8 | */ | 
|  | 9 |  | 
|  | 10 | #include <linux/sched.h> | 
|  | 11 | #include <linux/slab.h> | 
|  | 12 | #include <linux/spinlock.h> | 
|  | 13 | #include <linux/completion.h> | 
|  | 14 | #include <linux/buffer_head.h> | 
|  | 15 | #include <linux/delay.h> | 
|  | 16 | #include <linux/sort.h> | 
|  | 17 | #include <linux/jhash.h> | 
|  | 18 | #include <linux/kref.h> | 
| Steven Whitehouse | d0dc80d | 2006-03-29 14:36:49 -0500 | [diff] [blame] | 19 | #include <linux/kallsyms.h> | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 20 | #include <linux/gfs2_ondisk.h> | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 21 | #include <asm/semaphore.h> | 
|  | 22 | #include <asm/uaccess.h> | 
|  | 23 |  | 
|  | 24 | #include "gfs2.h" | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 25 | #include "lm_interface.h" | 
|  | 26 | #include "incore.h" | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 27 | #include "glock.h" | 
|  | 28 | #include "glops.h" | 
|  | 29 | #include "inode.h" | 
|  | 30 | #include "lm.h" | 
|  | 31 | #include "lops.h" | 
|  | 32 | #include "meta_io.h" | 
|  | 33 | #include "quota.h" | 
|  | 34 | #include "super.h" | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 35 | #include "util.h" | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 36 |  | 
|  | 37 | /*  Must be kept in sync with the beginning of struct gfs2_glock  */ | 
|  | 38 | struct glock_plug { | 
|  | 39 | struct list_head gl_list; | 
|  | 40 | unsigned long gl_flags; | 
|  | 41 | }; | 
|  | 42 |  | 
|  | 43 | struct greedy { | 
|  | 44 | struct gfs2_holder gr_gh; | 
|  | 45 | struct work_struct gr_work; | 
|  | 46 | }; | 
|  | 47 |  | 
|  | 48 | typedef void (*glock_examiner) (struct gfs2_glock * gl); | 
|  | 49 |  | 
|  | 50 | /** | 
|  | 51 | * relaxed_state_ok - is a requested lock compatible with the current lock mode? | 
|  | 52 | * @actual: the current state of the lock | 
|  | 53 | * @requested: the lock state that was requested by the caller | 
|  | 54 | * @flags: the modifier flags passed in by the caller | 
|  | 55 | * | 
|  | 56 | * Returns: 1 if the locks are compatible, 0 otherwise | 
|  | 57 | */ | 
|  | 58 |  | 
|  | 59 | static inline int relaxed_state_ok(unsigned int actual, unsigned requested, | 
|  | 60 | int flags) | 
|  | 61 | { | 
|  | 62 | if (actual == requested) | 
|  | 63 | return 1; | 
|  | 64 |  | 
|  | 65 | if (flags & GL_EXACT) | 
|  | 66 | return 0; | 
|  | 67 |  | 
|  | 68 | if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) | 
|  | 69 | return 1; | 
|  | 70 |  | 
|  | 71 | if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) | 
|  | 72 | return 1; | 
|  | 73 |  | 
|  | 74 | return 0; | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | /** | 
|  | 78 | * gl_hash() - Turn glock number into hash bucket number | 
|  | 79 | * @lock: The glock number | 
|  | 80 | * | 
|  | 81 | * Returns: The number of the corresponding hash bucket | 
|  | 82 | */ | 
|  | 83 |  | 
|  | 84 | static unsigned int gl_hash(struct lm_lockname *name) | 
|  | 85 | { | 
|  | 86 | unsigned int h; | 
|  | 87 |  | 
|  | 88 | h = jhash(&name->ln_number, sizeof(uint64_t), 0); | 
|  | 89 | h = jhash(&name->ln_type, sizeof(unsigned int), h); | 
|  | 90 | h &= GFS2_GL_HASH_MASK; | 
|  | 91 |  | 
|  | 92 | return h; | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | /** | 
|  | 96 | * glock_free() - Perform a few checks and then release struct gfs2_glock | 
|  | 97 | * @gl: The glock to release | 
|  | 98 | * | 
|  | 99 | * Also calls lock module to release its internal structure for this glock. | 
|  | 100 | * | 
|  | 101 | */ | 
|  | 102 |  | 
|  | 103 | static void glock_free(struct gfs2_glock *gl) | 
|  | 104 | { | 
|  | 105 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 106 | struct inode *aspace = gl->gl_aspace; | 
|  | 107 |  | 
|  | 108 | gfs2_lm_put_lock(sdp, gl->gl_lock); | 
|  | 109 |  | 
|  | 110 | if (aspace) | 
|  | 111 | gfs2_aspace_put(aspace); | 
|  | 112 |  | 
|  | 113 | kmem_cache_free(gfs2_glock_cachep, gl); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 114 | } | 
|  | 115 |  | 
|  | 116 | /** | 
|  | 117 | * gfs2_glock_hold() - increment reference count on glock | 
|  | 118 | * @gl: The glock to hold | 
|  | 119 | * | 
|  | 120 | */ | 
|  | 121 |  | 
|  | 122 | void gfs2_glock_hold(struct gfs2_glock *gl) | 
|  | 123 | { | 
|  | 124 | kref_get(&gl->gl_ref); | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | /* All work is done after the return from kref_put() so we | 
|  | 128 | can release the write_lock before the free. */ | 
|  | 129 |  | 
|  | 130 | static void kill_glock(struct kref *kref) | 
|  | 131 | { | 
|  | 132 | struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref); | 
|  | 133 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 134 |  | 
|  | 135 | gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); | 
|  | 136 | gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); | 
|  | 137 | gfs2_assert(sdp, list_empty(&gl->gl_holders)); | 
|  | 138 | gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); | 
|  | 139 | gfs2_assert(sdp, list_empty(&gl->gl_waiters2)); | 
|  | 140 | gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | /** | 
|  | 144 | * gfs2_glock_put() - Decrement reference count on glock | 
|  | 145 | * @gl: The glock to put | 
|  | 146 | * | 
|  | 147 | */ | 
|  | 148 |  | 
|  | 149 | int gfs2_glock_put(struct gfs2_glock *gl) | 
|  | 150 | { | 
|  | 151 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 152 | struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket; | 
|  | 153 | int rv = 0; | 
|  | 154 |  | 
| Steven Whitehouse | f55ab26 | 2006-02-21 12:51:39 +0000 | [diff] [blame] | 155 | mutex_lock(&sdp->sd_invalidate_inodes_mutex); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 156 |  | 
|  | 157 | write_lock(&bucket->hb_lock); | 
|  | 158 | if (kref_put(&gl->gl_ref, kill_glock)) { | 
|  | 159 | list_del_init(&gl->gl_list); | 
|  | 160 | write_unlock(&bucket->hb_lock); | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 161 | BUG_ON(spin_is_locked(&gl->gl_spin)); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 162 | glock_free(gl); | 
|  | 163 | rv = 1; | 
|  | 164 | goto out; | 
|  | 165 | } | 
|  | 166 | write_unlock(&bucket->hb_lock); | 
|  | 167 | out: | 
| Steven Whitehouse | f55ab26 | 2006-02-21 12:51:39 +0000 | [diff] [blame] | 168 | mutex_unlock(&sdp->sd_invalidate_inodes_mutex); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 169 | return rv; | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | /** | 
|  | 173 | * queue_empty - check to see if a glock's queue is empty | 
|  | 174 | * @gl: the glock | 
|  | 175 | * @head: the head of the queue to check | 
|  | 176 | * | 
|  | 177 | * This function protects the list in the event that a process already | 
|  | 178 | * has a holder on the list and is adding a second holder for itself. | 
|  | 179 | * The glmutex lock is what generally prevents processes from working | 
|  | 180 | * on the same glock at once, but the special case of adding a second | 
|  | 181 | * holder for yourself ("recursive" locking) doesn't involve locking | 
|  | 182 | * glmutex, making the spin lock necessary. | 
|  | 183 | * | 
|  | 184 | * Returns: 1 if the queue is empty | 
|  | 185 | */ | 
|  | 186 |  | 
|  | 187 | static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head) | 
|  | 188 | { | 
|  | 189 | int empty; | 
|  | 190 | spin_lock(&gl->gl_spin); | 
|  | 191 | empty = list_empty(head); | 
|  | 192 | spin_unlock(&gl->gl_spin); | 
|  | 193 | return empty; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | /** | 
|  | 197 | * search_bucket() - Find struct gfs2_glock by lock number | 
|  | 198 | * @bucket: the bucket to search | 
|  | 199 | * @name: The lock name | 
|  | 200 | * | 
|  | 201 | * Returns: NULL, or the struct gfs2_glock with the requested number | 
|  | 202 | */ | 
|  | 203 |  | 
|  | 204 | static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket, | 
|  | 205 | struct lm_lockname *name) | 
|  | 206 | { | 
|  | 207 | struct gfs2_glock *gl; | 
|  | 208 |  | 
|  | 209 | list_for_each_entry(gl, &bucket->hb_list, gl_list) { | 
|  | 210 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | 
|  | 211 | continue; | 
|  | 212 | if (!lm_name_equal(&gl->gl_name, name)) | 
|  | 213 | continue; | 
|  | 214 |  | 
|  | 215 | kref_get(&gl->gl_ref); | 
|  | 216 |  | 
|  | 217 | return gl; | 
|  | 218 | } | 
|  | 219 |  | 
|  | 220 | return NULL; | 
|  | 221 | } | 
|  | 222 |  | 
|  | 223 | /** | 
|  | 224 | * gfs2_glock_find() - Find glock by lock number | 
|  | 225 | * @sdp: The GFS2 superblock | 
|  | 226 | * @name: The lock name | 
|  | 227 | * | 
|  | 228 | * Returns: NULL, or the struct gfs2_glock with the requested number | 
|  | 229 | */ | 
|  | 230 |  | 
|  | 231 | struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp, | 
|  | 232 | struct lm_lockname *name) | 
|  | 233 | { | 
|  | 234 | struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)]; | 
|  | 235 | struct gfs2_glock *gl; | 
|  | 236 |  | 
|  | 237 | read_lock(&bucket->hb_lock); | 
|  | 238 | gl = search_bucket(bucket, name); | 
|  | 239 | read_unlock(&bucket->hb_lock); | 
|  | 240 |  | 
|  | 241 | return gl; | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | /** | 
|  | 245 | * gfs2_glock_get() - Get a glock, or create one if one doesn't exist | 
|  | 246 | * @sdp: The GFS2 superblock | 
|  | 247 | * @number: the lock number | 
|  | 248 | * @glops: The glock_operations to use | 
|  | 249 | * @create: If 0, don't create the glock if it doesn't exist | 
|  | 250 | * @glp: the glock is returned here | 
|  | 251 | * | 
|  | 252 | * This does not lock a glock, just finds/creates structures for one. | 
|  | 253 | * | 
|  | 254 | * Returns: errno | 
|  | 255 | */ | 
|  | 256 |  | 
|  | 257 | int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number, | 
|  | 258 | struct gfs2_glock_operations *glops, int create, | 
|  | 259 | struct gfs2_glock **glp) | 
|  | 260 | { | 
|  | 261 | struct lm_lockname name; | 
|  | 262 | struct gfs2_glock *gl, *tmp; | 
|  | 263 | struct gfs2_gl_hash_bucket *bucket; | 
|  | 264 | int error; | 
|  | 265 |  | 
|  | 266 | name.ln_number = number; | 
|  | 267 | name.ln_type = glops->go_type; | 
|  | 268 | bucket = &sdp->sd_gl_hash[gl_hash(&name)]; | 
|  | 269 |  | 
|  | 270 | read_lock(&bucket->hb_lock); | 
|  | 271 | gl = search_bucket(bucket, &name); | 
|  | 272 | read_unlock(&bucket->hb_lock); | 
|  | 273 |  | 
|  | 274 | if (gl || !create) { | 
|  | 275 | *glp = gl; | 
|  | 276 | return 0; | 
|  | 277 | } | 
|  | 278 |  | 
|  | 279 | gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); | 
|  | 280 | if (!gl) | 
|  | 281 | return -ENOMEM; | 
|  | 282 |  | 
|  | 283 | memset(gl, 0, sizeof(struct gfs2_glock)); | 
|  | 284 |  | 
|  | 285 | INIT_LIST_HEAD(&gl->gl_list); | 
|  | 286 | gl->gl_name = name; | 
|  | 287 | kref_init(&gl->gl_ref); | 
|  | 288 |  | 
|  | 289 | spin_lock_init(&gl->gl_spin); | 
|  | 290 |  | 
|  | 291 | gl->gl_state = LM_ST_UNLOCKED; | 
|  | 292 | INIT_LIST_HEAD(&gl->gl_holders); | 
|  | 293 | INIT_LIST_HEAD(&gl->gl_waiters1); | 
|  | 294 | INIT_LIST_HEAD(&gl->gl_waiters2); | 
|  | 295 | INIT_LIST_HEAD(&gl->gl_waiters3); | 
|  | 296 |  | 
|  | 297 | gl->gl_ops = glops; | 
|  | 298 |  | 
|  | 299 | gl->gl_bucket = bucket; | 
|  | 300 | INIT_LIST_HEAD(&gl->gl_reclaim); | 
|  | 301 |  | 
|  | 302 | gl->gl_sbd = sdp; | 
|  | 303 |  | 
|  | 304 | lops_init_le(&gl->gl_le, &gfs2_glock_lops); | 
|  | 305 | INIT_LIST_HEAD(&gl->gl_ail_list); | 
|  | 306 |  | 
|  | 307 | /* If this glock protects actual on-disk data or metadata blocks, | 
|  | 308 | create a VFS inode to manage the pages/buffers holding them. */ | 
|  | 309 | if (glops == &gfs2_inode_glops || | 
|  | 310 | glops == &gfs2_rgrp_glops || | 
|  | 311 | glops == &gfs2_meta_glops) { | 
|  | 312 | gl->gl_aspace = gfs2_aspace_get(sdp); | 
|  | 313 | if (!gl->gl_aspace) { | 
|  | 314 | error = -ENOMEM; | 
|  | 315 | goto fail; | 
|  | 316 | } | 
|  | 317 | } | 
|  | 318 |  | 
|  | 319 | error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); | 
|  | 320 | if (error) | 
|  | 321 | goto fail_aspace; | 
|  | 322 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 323 | write_lock(&bucket->hb_lock); | 
|  | 324 | tmp = search_bucket(bucket, &name); | 
|  | 325 | if (tmp) { | 
|  | 326 | write_unlock(&bucket->hb_lock); | 
|  | 327 | glock_free(gl); | 
|  | 328 | gl = tmp; | 
|  | 329 | } else { | 
|  | 330 | list_add_tail(&gl->gl_list, &bucket->hb_list); | 
|  | 331 | write_unlock(&bucket->hb_lock); | 
|  | 332 | } | 
|  | 333 |  | 
|  | 334 | *glp = gl; | 
|  | 335 |  | 
|  | 336 | return 0; | 
|  | 337 |  | 
|  | 338 | fail_aspace: | 
|  | 339 | if (gl->gl_aspace) | 
|  | 340 | gfs2_aspace_put(gl->gl_aspace); | 
|  | 341 |  | 
|  | 342 | fail: | 
|  | 343 | kmem_cache_free(gfs2_glock_cachep, gl); | 
|  | 344 |  | 
|  | 345 | return error; | 
|  | 346 | } | 
|  | 347 |  | 
|  | 348 | /** | 
|  | 349 | * gfs2_holder_init - initialize a struct gfs2_holder in the default way | 
|  | 350 | * @gl: the glock | 
|  | 351 | * @state: the state we're requesting | 
|  | 352 | * @flags: the modifier flags | 
|  | 353 | * @gh: the holder structure | 
|  | 354 | * | 
|  | 355 | */ | 
|  | 356 |  | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 357 | void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 358 | struct gfs2_holder *gh) | 
|  | 359 | { | 
|  | 360 | INIT_LIST_HEAD(&gh->gh_list); | 
|  | 361 | gh->gh_gl = gl; | 
| Steven Whitehouse | d0dc80d | 2006-03-29 14:36:49 -0500 | [diff] [blame] | 362 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 363 | gh->gh_owner = current; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 364 | gh->gh_state = state; | 
|  | 365 | gh->gh_flags = flags; | 
|  | 366 | gh->gh_error = 0; | 
|  | 367 | gh->gh_iflags = 0; | 
|  | 368 | init_completion(&gh->gh_wait); | 
|  | 369 |  | 
|  | 370 | if (gh->gh_state == LM_ST_EXCLUSIVE) | 
|  | 371 | gh->gh_flags |= GL_LOCAL_EXCL; | 
|  | 372 |  | 
|  | 373 | gfs2_glock_hold(gl); | 
|  | 374 | } | 
|  | 375 |  | 
|  | 376 | /** | 
|  | 377 | * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it | 
|  | 378 | * @state: the state we're requesting | 
|  | 379 | * @flags: the modifier flags | 
|  | 380 | * @gh: the holder structure | 
|  | 381 | * | 
|  | 382 | * Don't mess with the glock. | 
|  | 383 | * | 
|  | 384 | */ | 
|  | 385 |  | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 386 | void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 387 | { | 
|  | 388 | gh->gh_state = state; | 
| Steven Whitehouse | 579b78a | 2006-04-26 14:58:26 -0400 | [diff] [blame] | 389 | gh->gh_flags = flags; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 390 | if (gh->gh_state == LM_ST_EXCLUSIVE) | 
|  | 391 | gh->gh_flags |= GL_LOCAL_EXCL; | 
|  | 392 |  | 
|  | 393 | gh->gh_iflags &= 1 << HIF_ALLOCED; | 
| Steven Whitehouse | d0dc80d | 2006-03-29 14:36:49 -0500 | [diff] [blame] | 394 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 395 | } | 
|  | 396 |  | 
|  | 397 | /** | 
|  | 398 | * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) | 
|  | 399 | * @gh: the holder structure | 
|  | 400 | * | 
|  | 401 | */ | 
|  | 402 |  | 
|  | 403 | void gfs2_holder_uninit(struct gfs2_holder *gh) | 
|  | 404 | { | 
|  | 405 | gfs2_glock_put(gh->gh_gl); | 
|  | 406 | gh->gh_gl = NULL; | 
| Steven Whitehouse | d0dc80d | 2006-03-29 14:36:49 -0500 | [diff] [blame] | 407 | gh->gh_ip = 0; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 408 | } | 
|  | 409 |  | 
|  | 410 | /** | 
|  | 411 | * gfs2_holder_get - get a struct gfs2_holder structure | 
|  | 412 | * @gl: the glock | 
|  | 413 | * @state: the state we're requesting | 
|  | 414 | * @flags: the modifier flags | 
|  | 415 | * @gfp_flags: __GFP_NOFAIL | 
|  | 416 | * | 
|  | 417 | * Figure out how big an impact this function has.  Either: | 
|  | 418 | * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd | 
|  | 419 | * 2) Leave it like it is | 
|  | 420 | * | 
|  | 421 | * Returns: the holder structure, NULL on ENOMEM | 
|  | 422 | */ | 
|  | 423 |  | 
|  | 424 | struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state, | 
|  | 425 | int flags, gfp_t gfp_flags) | 
|  | 426 | { | 
|  | 427 | struct gfs2_holder *gh; | 
|  | 428 |  | 
|  | 429 | gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags); | 
|  | 430 | if (!gh) | 
|  | 431 | return NULL; | 
|  | 432 |  | 
|  | 433 | gfs2_holder_init(gl, state, flags, gh); | 
|  | 434 | set_bit(HIF_ALLOCED, &gh->gh_iflags); | 
| Steven Whitehouse | d0dc80d | 2006-03-29 14:36:49 -0500 | [diff] [blame] | 435 | gh->gh_ip = (unsigned long)__builtin_return_address(0); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 436 | return gh; | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 | /** | 
|  | 440 | * gfs2_holder_put - get rid of a struct gfs2_holder structure | 
|  | 441 | * @gh: the holder structure | 
|  | 442 | * | 
|  | 443 | */ | 
|  | 444 |  | 
|  | 445 | void gfs2_holder_put(struct gfs2_holder *gh) | 
|  | 446 | { | 
|  | 447 | gfs2_holder_uninit(gh); | 
|  | 448 | kfree(gh); | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | /** | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 452 | * rq_mutex - process a mutex request in the queue | 
|  | 453 | * @gh: the glock holder | 
|  | 454 | * | 
|  | 455 | * Returns: 1 if the queue is blocked | 
|  | 456 | */ | 
|  | 457 |  | 
|  | 458 | static int rq_mutex(struct gfs2_holder *gh) | 
|  | 459 | { | 
|  | 460 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 461 |  | 
|  | 462 | list_del_init(&gh->gh_list); | 
|  | 463 | /*  gh->gh_error never examined.  */ | 
|  | 464 | set_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 465 | complete(&gh->gh_wait); | 
|  | 466 |  | 
|  | 467 | return 1; | 
|  | 468 | } | 
|  | 469 |  | 
|  | 470 | /** | 
|  | 471 | * rq_promote - process a promote request in the queue | 
|  | 472 | * @gh: the glock holder | 
|  | 473 | * | 
|  | 474 | * Acquire a new inter-node lock, or change a lock state to more restrictive. | 
|  | 475 | * | 
|  | 476 | * Returns: 1 if the queue is blocked | 
|  | 477 | */ | 
|  | 478 |  | 
|  | 479 | static int rq_promote(struct gfs2_holder *gh) | 
|  | 480 | { | 
|  | 481 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 482 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 483 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 484 |  | 
|  | 485 | if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 
|  | 486 | if (list_empty(&gl->gl_holders)) { | 
|  | 487 | gl->gl_req_gh = gh; | 
|  | 488 | set_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 489 | spin_unlock(&gl->gl_spin); | 
|  | 490 |  | 
|  | 491 | if (atomic_read(&sdp->sd_reclaim_count) > | 
|  | 492 | gfs2_tune_get(sdp, gt_reclaim_limit) && | 
|  | 493 | !(gh->gh_flags & LM_FLAG_PRIORITY)) { | 
|  | 494 | gfs2_reclaim_glock(sdp); | 
|  | 495 | gfs2_reclaim_glock(sdp); | 
|  | 496 | } | 
|  | 497 |  | 
|  | 498 | glops->go_xmote_th(gl, gh->gh_state, | 
|  | 499 | gh->gh_flags); | 
|  | 500 |  | 
|  | 501 | spin_lock(&gl->gl_spin); | 
|  | 502 | } | 
|  | 503 | return 1; | 
|  | 504 | } | 
|  | 505 |  | 
|  | 506 | if (list_empty(&gl->gl_holders)) { | 
|  | 507 | set_bit(HIF_FIRST, &gh->gh_iflags); | 
|  | 508 | set_bit(GLF_LOCK, &gl->gl_flags); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 509 | } else { | 
|  | 510 | struct gfs2_holder *next_gh; | 
|  | 511 | if (gh->gh_flags & GL_LOCAL_EXCL) | 
|  | 512 | return 1; | 
|  | 513 | next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, | 
|  | 514 | gh_list); | 
|  | 515 | if (next_gh->gh_flags & GL_LOCAL_EXCL) | 
|  | 516 | return 1; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 517 | } | 
|  | 518 |  | 
|  | 519 | list_move_tail(&gh->gh_list, &gl->gl_holders); | 
|  | 520 | gh->gh_error = 0; | 
|  | 521 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 
|  | 522 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 523 | complete(&gh->gh_wait); | 
|  | 524 |  | 
|  | 525 | return 0; | 
|  | 526 | } | 
|  | 527 |  | 
|  | 528 | /** | 
|  | 529 | * rq_demote - process a demote request in the queue | 
|  | 530 | * @gh: the glock holder | 
|  | 531 | * | 
|  | 532 | * Returns: 1 if the queue is blocked | 
|  | 533 | */ | 
|  | 534 |  | 
|  | 535 | static int rq_demote(struct gfs2_holder *gh) | 
|  | 536 | { | 
|  | 537 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 538 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 539 |  | 
|  | 540 | if (!list_empty(&gl->gl_holders)) | 
|  | 541 | return 1; | 
|  | 542 |  | 
|  | 543 | if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) { | 
|  | 544 | list_del_init(&gh->gh_list); | 
|  | 545 | gh->gh_error = 0; | 
|  | 546 | spin_unlock(&gl->gl_spin); | 
|  | 547 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | 
|  | 548 | gfs2_holder_put(gh); | 
|  | 549 | else | 
|  | 550 | complete(&gh->gh_wait); | 
|  | 551 | spin_lock(&gl->gl_spin); | 
|  | 552 | } else { | 
|  | 553 | gl->gl_req_gh = gh; | 
|  | 554 | set_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 555 | spin_unlock(&gl->gl_spin); | 
|  | 556 |  | 
|  | 557 | if (gh->gh_state == LM_ST_UNLOCKED || | 
|  | 558 | gl->gl_state != LM_ST_EXCLUSIVE) | 
|  | 559 | glops->go_drop_th(gl); | 
|  | 560 | else | 
|  | 561 | glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags); | 
|  | 562 |  | 
|  | 563 | spin_lock(&gl->gl_spin); | 
|  | 564 | } | 
|  | 565 |  | 
|  | 566 | return 0; | 
|  | 567 | } | 
|  | 568 |  | 
|  | 569 | /** | 
|  | 570 | * rq_greedy - process a queued request to drop greedy status | 
|  | 571 | * @gh: the glock holder | 
|  | 572 | * | 
|  | 573 | * Returns: 1 if the queue is blocked | 
|  | 574 | */ | 
|  | 575 |  | 
|  | 576 | static int rq_greedy(struct gfs2_holder *gh) | 
|  | 577 | { | 
|  | 578 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 579 |  | 
|  | 580 | list_del_init(&gh->gh_list); | 
|  | 581 | /*  gh->gh_error never examined.  */ | 
|  | 582 | clear_bit(GLF_GREEDY, &gl->gl_flags); | 
|  | 583 | spin_unlock(&gl->gl_spin); | 
|  | 584 |  | 
|  | 585 | gfs2_holder_uninit(gh); | 
|  | 586 | kfree(container_of(gh, struct greedy, gr_gh)); | 
|  | 587 |  | 
|  | 588 | spin_lock(&gl->gl_spin); | 
|  | 589 |  | 
|  | 590 | return 0; | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | /** | 
|  | 594 | * run_queue - process holder structures on a glock | 
|  | 595 | * @gl: the glock | 
|  | 596 | * | 
|  | 597 | */ | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 598 | static void run_queue(struct gfs2_glock *gl) | 
|  | 599 | { | 
|  | 600 | struct gfs2_holder *gh; | 
|  | 601 | int blocked = 1; | 
|  | 602 |  | 
|  | 603 | for (;;) { | 
|  | 604 | if (test_bit(GLF_LOCK, &gl->gl_flags)) | 
|  | 605 | break; | 
|  | 606 |  | 
|  | 607 | if (!list_empty(&gl->gl_waiters1)) { | 
|  | 608 | gh = list_entry(gl->gl_waiters1.next, | 
|  | 609 | struct gfs2_holder, gh_list); | 
|  | 610 |  | 
|  | 611 | if (test_bit(HIF_MUTEX, &gh->gh_iflags)) | 
|  | 612 | blocked = rq_mutex(gh); | 
|  | 613 | else | 
|  | 614 | gfs2_assert_warn(gl->gl_sbd, 0); | 
|  | 615 |  | 
|  | 616 | } else if (!list_empty(&gl->gl_waiters2) && | 
|  | 617 | !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) { | 
|  | 618 | gh = list_entry(gl->gl_waiters2.next, | 
|  | 619 | struct gfs2_holder, gh_list); | 
|  | 620 |  | 
|  | 621 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) | 
|  | 622 | blocked = rq_demote(gh); | 
|  | 623 | else if (test_bit(HIF_GREEDY, &gh->gh_iflags)) | 
|  | 624 | blocked = rq_greedy(gh); | 
|  | 625 | else | 
|  | 626 | gfs2_assert_warn(gl->gl_sbd, 0); | 
|  | 627 |  | 
|  | 628 | } else if (!list_empty(&gl->gl_waiters3)) { | 
|  | 629 | gh = list_entry(gl->gl_waiters3.next, | 
|  | 630 | struct gfs2_holder, gh_list); | 
|  | 631 |  | 
|  | 632 | if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) | 
|  | 633 | blocked = rq_promote(gh); | 
|  | 634 | else | 
|  | 635 | gfs2_assert_warn(gl->gl_sbd, 0); | 
|  | 636 |  | 
|  | 637 | } else | 
|  | 638 | break; | 
|  | 639 |  | 
|  | 640 | if (blocked) | 
|  | 641 | break; | 
|  | 642 | } | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | /** | 
|  | 646 | * gfs2_glmutex_lock - acquire a local lock on a glock | 
|  | 647 | * @gl: the glock | 
|  | 648 | * | 
|  | 649 | * Gives caller exclusive access to manipulate a glock structure. | 
|  | 650 | */ | 
|  | 651 |  | 
|  | 652 | void gfs2_glmutex_lock(struct gfs2_glock *gl) | 
|  | 653 | { | 
|  | 654 | struct gfs2_holder gh; | 
|  | 655 |  | 
|  | 656 | gfs2_holder_init(gl, 0, 0, &gh); | 
|  | 657 | set_bit(HIF_MUTEX, &gh.gh_iflags); | 
|  | 658 |  | 
|  | 659 | spin_lock(&gl->gl_spin); | 
|  | 660 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) | 
|  | 661 | list_add_tail(&gh.gh_list, &gl->gl_waiters1); | 
|  | 662 | else | 
|  | 663 | complete(&gh.gh_wait); | 
|  | 664 | spin_unlock(&gl->gl_spin); | 
|  | 665 |  | 
|  | 666 | wait_for_completion(&gh.gh_wait); | 
|  | 667 | gfs2_holder_uninit(&gh); | 
|  | 668 | } | 
|  | 669 |  | 
|  | 670 | /** | 
|  | 671 | * gfs2_glmutex_trylock - try to acquire a local lock on a glock | 
|  | 672 | * @gl: the glock | 
|  | 673 | * | 
|  | 674 | * Returns: 1 if the glock is acquired | 
|  | 675 | */ | 
|  | 676 |  | 
|  | 677 | int gfs2_glmutex_trylock(struct gfs2_glock *gl) | 
|  | 678 | { | 
|  | 679 | int acquired = 1; | 
|  | 680 |  | 
|  | 681 | spin_lock(&gl->gl_spin); | 
|  | 682 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) | 
|  | 683 | acquired = 0; | 
|  | 684 | spin_unlock(&gl->gl_spin); | 
|  | 685 |  | 
|  | 686 | return acquired; | 
|  | 687 | } | 
|  | 688 |  | 
|  | 689 | /** | 
|  | 690 | * gfs2_glmutex_unlock - release a local lock on a glock | 
|  | 691 | * @gl: the glock | 
|  | 692 | * | 
|  | 693 | */ | 
|  | 694 |  | 
|  | 695 | void gfs2_glmutex_unlock(struct gfs2_glock *gl) | 
|  | 696 | { | 
|  | 697 | spin_lock(&gl->gl_spin); | 
|  | 698 | clear_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 699 | run_queue(gl); | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 700 | BUG_ON(!spin_is_locked(&gl->gl_spin)); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 701 | spin_unlock(&gl->gl_spin); | 
|  | 702 | } | 
|  | 703 |  | 
|  | 704 | /** | 
|  | 705 | * handle_callback - add a demote request to a lock's queue | 
|  | 706 | * @gl: the glock | 
|  | 707 | * @state: the state the caller wants us to change to | 
|  | 708 | * | 
|  | 709 | */ | 
|  | 710 |  | 
|  | 711 | static void handle_callback(struct gfs2_glock *gl, unsigned int state) | 
|  | 712 | { | 
|  | 713 | struct gfs2_holder *gh, *new_gh = NULL; | 
|  | 714 |  | 
|  | 715 | restart: | 
|  | 716 | spin_lock(&gl->gl_spin); | 
|  | 717 |  | 
|  | 718 | list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { | 
|  | 719 | if (test_bit(HIF_DEMOTE, &gh->gh_iflags) && | 
|  | 720 | gl->gl_req_gh != gh) { | 
|  | 721 | if (gh->gh_state != state) | 
|  | 722 | gh->gh_state = LM_ST_UNLOCKED; | 
|  | 723 | goto out; | 
|  | 724 | } | 
|  | 725 | } | 
|  | 726 |  | 
|  | 727 | if (new_gh) { | 
|  | 728 | list_add_tail(&new_gh->gh_list, &gl->gl_waiters2); | 
|  | 729 | new_gh = NULL; | 
|  | 730 | } else { | 
|  | 731 | spin_unlock(&gl->gl_spin); | 
|  | 732 |  | 
| Steven Whitehouse | 579b78a | 2006-04-26 14:58:26 -0400 | [diff] [blame] | 733 | new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 734 | GFP_KERNEL | __GFP_NOFAIL), | 
|  | 735 | set_bit(HIF_DEMOTE, &new_gh->gh_iflags); | 
|  | 736 | set_bit(HIF_DEALLOC, &new_gh->gh_iflags); | 
|  | 737 |  | 
|  | 738 | goto restart; | 
|  | 739 | } | 
|  | 740 |  | 
|  | 741 | out: | 
|  | 742 | spin_unlock(&gl->gl_spin); | 
|  | 743 |  | 
|  | 744 | if (new_gh) | 
|  | 745 | gfs2_holder_put(new_gh); | 
|  | 746 | } | 
|  | 747 |  | 
|  | 748 | /** | 
|  | 749 | * state_change - record that the glock is now in a different state | 
|  | 750 | * @gl: the glock | 
|  | 751 | * @new_state the new state | 
|  | 752 | * | 
|  | 753 | */ | 
|  | 754 |  | 
|  | 755 | static void state_change(struct gfs2_glock *gl, unsigned int new_state) | 
|  | 756 | { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 757 | int held1, held2; | 
|  | 758 |  | 
|  | 759 | held1 = (gl->gl_state != LM_ST_UNLOCKED); | 
|  | 760 | held2 = (new_state != LM_ST_UNLOCKED); | 
|  | 761 |  | 
|  | 762 | if (held1 != held2) { | 
| David Teigland | 6a6b3d0 | 2006-02-23 10:11:47 +0000 | [diff] [blame] | 763 | if (held2) | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 764 | gfs2_glock_hold(gl); | 
| David Teigland | 6a6b3d0 | 2006-02-23 10:11:47 +0000 | [diff] [blame] | 765 | else | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 766 | gfs2_glock_put(gl); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 767 | } | 
|  | 768 |  | 
|  | 769 | gl->gl_state = new_state; | 
|  | 770 | } | 
|  | 771 |  | 
|  | 772 | /** | 
|  | 773 | * xmote_bh - Called after the lock module is done acquiring a lock | 
|  | 774 | * @gl: The glock in question | 
|  | 775 | * @ret: the int returned from the lock module | 
|  | 776 | * | 
|  | 777 | */ | 
|  | 778 |  | 
|  | 779 | static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) | 
|  | 780 | { | 
|  | 781 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 782 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 783 | struct gfs2_holder *gh = gl->gl_req_gh; | 
|  | 784 | int prev_state = gl->gl_state; | 
|  | 785 | int op_done = 1; | 
|  | 786 |  | 
|  | 787 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 
|  | 788 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 
|  | 789 | gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); | 
|  | 790 |  | 
|  | 791 | state_change(gl, ret & LM_OUT_ST_MASK); | 
|  | 792 |  | 
|  | 793 | if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { | 
|  | 794 | if (glops->go_inval) | 
|  | 795 | glops->go_inval(gl, DIO_METADATA | DIO_DATA); | 
|  | 796 | } else if (gl->gl_state == LM_ST_DEFERRED) { | 
|  | 797 | /* We might not want to do this here. | 
|  | 798 | Look at moving to the inode glops. */ | 
|  | 799 | if (glops->go_inval) | 
|  | 800 | glops->go_inval(gl, DIO_DATA); | 
|  | 801 | } | 
|  | 802 |  | 
|  | 803 | /*  Deal with each possible exit condition  */ | 
|  | 804 |  | 
|  | 805 | if (!gh) | 
|  | 806 | gl->gl_stamp = jiffies; | 
|  | 807 |  | 
|  | 808 | else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { | 
|  | 809 | spin_lock(&gl->gl_spin); | 
|  | 810 | list_del_init(&gh->gh_list); | 
|  | 811 | gh->gh_error = -EIO; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 812 | spin_unlock(&gl->gl_spin); | 
|  | 813 |  | 
|  | 814 | } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) { | 
|  | 815 | spin_lock(&gl->gl_spin); | 
|  | 816 | list_del_init(&gh->gh_list); | 
|  | 817 | if (gl->gl_state == gh->gh_state || | 
|  | 818 | gl->gl_state == LM_ST_UNLOCKED) | 
|  | 819 | gh->gh_error = 0; | 
|  | 820 | else { | 
|  | 821 | if (gfs2_assert_warn(sdp, gh->gh_flags & | 
|  | 822 | (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1) | 
|  | 823 | fs_warn(sdp, "ret = 0x%.8X\n", ret); | 
|  | 824 | gh->gh_error = GLR_TRYFAILED; | 
|  | 825 | } | 
|  | 826 | spin_unlock(&gl->gl_spin); | 
|  | 827 |  | 
|  | 828 | if (ret & LM_OUT_CANCELED) | 
|  | 829 | handle_callback(gl, LM_ST_UNLOCKED); /* Lame */ | 
|  | 830 |  | 
|  | 831 | } else if (ret & LM_OUT_CANCELED) { | 
|  | 832 | spin_lock(&gl->gl_spin); | 
|  | 833 | list_del_init(&gh->gh_list); | 
|  | 834 | gh->gh_error = GLR_CANCELED; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 835 | spin_unlock(&gl->gl_spin); | 
|  | 836 |  | 
|  | 837 | } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { | 
|  | 838 | spin_lock(&gl->gl_spin); | 
|  | 839 | list_move_tail(&gh->gh_list, &gl->gl_holders); | 
|  | 840 | gh->gh_error = 0; | 
|  | 841 | set_bit(HIF_HOLDER, &gh->gh_iflags); | 
|  | 842 | spin_unlock(&gl->gl_spin); | 
|  | 843 |  | 
|  | 844 | set_bit(HIF_FIRST, &gh->gh_iflags); | 
|  | 845 |  | 
|  | 846 | op_done = 0; | 
|  | 847 |  | 
|  | 848 | } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | 
|  | 849 | spin_lock(&gl->gl_spin); | 
|  | 850 | list_del_init(&gh->gh_list); | 
|  | 851 | gh->gh_error = GLR_TRYFAILED; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 852 | spin_unlock(&gl->gl_spin); | 
|  | 853 |  | 
|  | 854 | } else { | 
|  | 855 | if (gfs2_assert_withdraw(sdp, 0) == -1) | 
|  | 856 | fs_err(sdp, "ret = 0x%.8X\n", ret); | 
|  | 857 | } | 
|  | 858 |  | 
|  | 859 | if (glops->go_xmote_bh) | 
|  | 860 | glops->go_xmote_bh(gl); | 
|  | 861 |  | 
|  | 862 | if (op_done) { | 
|  | 863 | spin_lock(&gl->gl_spin); | 
|  | 864 | gl->gl_req_gh = NULL; | 
|  | 865 | gl->gl_req_bh = NULL; | 
|  | 866 | clear_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 867 | run_queue(gl); | 
|  | 868 | spin_unlock(&gl->gl_spin); | 
|  | 869 | } | 
|  | 870 |  | 
|  | 871 | gfs2_glock_put(gl); | 
|  | 872 |  | 
|  | 873 | if (gh) { | 
|  | 874 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | 
|  | 875 | gfs2_holder_put(gh); | 
|  | 876 | else | 
|  | 877 | complete(&gh->gh_wait); | 
|  | 878 | } | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | /** | 
|  | 882 | * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock | 
|  | 883 | * @gl: The glock in question | 
|  | 884 | * @state: the requested state | 
|  | 885 | * @flags: modifier flags to the lock call | 
|  | 886 | * | 
|  | 887 | */ | 
|  | 888 |  | 
|  | 889 | void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags) | 
|  | 890 | { | 
|  | 891 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 892 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 893 | int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | | 
|  | 894 | LM_FLAG_NOEXP | LM_FLAG_ANY | | 
|  | 895 | LM_FLAG_PRIORITY); | 
|  | 896 | unsigned int lck_ret; | 
|  | 897 |  | 
|  | 898 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 
|  | 899 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 
|  | 900 | gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); | 
|  | 901 | gfs2_assert_warn(sdp, state != gl->gl_state); | 
|  | 902 |  | 
|  | 903 | if (gl->gl_state == LM_ST_EXCLUSIVE) { | 
|  | 904 | if (glops->go_sync) | 
|  | 905 | glops->go_sync(gl, | 
|  | 906 | DIO_METADATA | DIO_DATA | DIO_RELEASE); | 
|  | 907 | } | 
|  | 908 |  | 
|  | 909 | gfs2_glock_hold(gl); | 
|  | 910 | gl->gl_req_bh = xmote_bh; | 
|  | 911 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 912 | lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, | 
|  | 913 | lck_flags); | 
|  | 914 |  | 
|  | 915 | if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) | 
|  | 916 | return; | 
|  | 917 |  | 
|  | 918 | if (lck_ret & LM_OUT_ASYNC) | 
|  | 919 | gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); | 
|  | 920 | else | 
|  | 921 | xmote_bh(gl, lck_ret); | 
|  | 922 | } | 
|  | 923 |  | 
|  | 924 | /** | 
|  | 925 | * drop_bh - Called after a lock module unlock completes | 
|  | 926 | * @gl: the glock | 
|  | 927 | * @ret: the return status | 
|  | 928 | * | 
|  | 929 | * Doesn't wake up the process waiting on the struct gfs2_holder (if any) | 
|  | 930 | * Doesn't drop the reference on the glock the top half took out | 
|  | 931 | * | 
|  | 932 | */ | 
|  | 933 |  | 
|  | 934 | static void drop_bh(struct gfs2_glock *gl, unsigned int ret) | 
|  | 935 | { | 
|  | 936 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 937 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 938 | struct gfs2_holder *gh = gl->gl_req_gh; | 
|  | 939 |  | 
|  | 940 | clear_bit(GLF_PREFETCH, &gl->gl_flags); | 
|  | 941 |  | 
|  | 942 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 
|  | 943 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 
|  | 944 | gfs2_assert_warn(sdp, !ret); | 
|  | 945 |  | 
|  | 946 | state_change(gl, LM_ST_UNLOCKED); | 
|  | 947 |  | 
|  | 948 | if (glops->go_inval) | 
|  | 949 | glops->go_inval(gl, DIO_METADATA | DIO_DATA); | 
|  | 950 |  | 
|  | 951 | if (gh) { | 
|  | 952 | spin_lock(&gl->gl_spin); | 
|  | 953 | list_del_init(&gh->gh_list); | 
|  | 954 | gh->gh_error = 0; | 
|  | 955 | spin_unlock(&gl->gl_spin); | 
|  | 956 | } | 
|  | 957 |  | 
|  | 958 | if (glops->go_drop_bh) | 
|  | 959 | glops->go_drop_bh(gl); | 
|  | 960 |  | 
|  | 961 | spin_lock(&gl->gl_spin); | 
|  | 962 | gl->gl_req_gh = NULL; | 
|  | 963 | gl->gl_req_bh = NULL; | 
|  | 964 | clear_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 965 | run_queue(gl); | 
|  | 966 | spin_unlock(&gl->gl_spin); | 
|  | 967 |  | 
|  | 968 | gfs2_glock_put(gl); | 
|  | 969 |  | 
|  | 970 | if (gh) { | 
|  | 971 | if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) | 
|  | 972 | gfs2_holder_put(gh); | 
|  | 973 | else | 
|  | 974 | complete(&gh->gh_wait); | 
|  | 975 | } | 
|  | 976 | } | 
|  | 977 |  | 
|  | 978 | /** | 
|  | 979 | * gfs2_glock_drop_th - call into the lock module to unlock a lock | 
|  | 980 | * @gl: the glock | 
|  | 981 | * | 
|  | 982 | */ | 
|  | 983 |  | 
|  | 984 | void gfs2_glock_drop_th(struct gfs2_glock *gl) | 
|  | 985 | { | 
|  | 986 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 987 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 988 | unsigned int ret; | 
|  | 989 |  | 
|  | 990 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 
|  | 991 | gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders)); | 
|  | 992 | gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); | 
|  | 993 |  | 
|  | 994 | if (gl->gl_state == LM_ST_EXCLUSIVE) { | 
|  | 995 | if (glops->go_sync) | 
|  | 996 | glops->go_sync(gl, | 
|  | 997 | DIO_METADATA | DIO_DATA | DIO_RELEASE); | 
|  | 998 | } | 
|  | 999 |  | 
|  | 1000 | gfs2_glock_hold(gl); | 
|  | 1001 | gl->gl_req_bh = drop_bh; | 
|  | 1002 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1003 | ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); | 
|  | 1004 |  | 
|  | 1005 | if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) | 
|  | 1006 | return; | 
|  | 1007 |  | 
|  | 1008 | if (!ret) | 
|  | 1009 | drop_bh(gl, ret); | 
|  | 1010 | else | 
|  | 1011 | gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); | 
|  | 1012 | } | 
|  | 1013 |  | 
|  | 1014 | /** | 
|  | 1015 | * do_cancels - cancel requests for locks stuck waiting on an expire flag | 
|  | 1016 | * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock | 
|  | 1017 | * | 
|  | 1018 | * Don't cancel GL_NOCANCEL requests. | 
|  | 1019 | */ | 
|  | 1020 |  | 
|  | 1021 | static void do_cancels(struct gfs2_holder *gh) | 
|  | 1022 | { | 
|  | 1023 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 1024 |  | 
|  | 1025 | spin_lock(&gl->gl_spin); | 
|  | 1026 |  | 
|  | 1027 | while (gl->gl_req_gh != gh && | 
|  | 1028 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | 
|  | 1029 | !list_empty(&gh->gh_list)) { | 
|  | 1030 | if (gl->gl_req_bh && | 
|  | 1031 | !(gl->gl_req_gh && | 
|  | 1032 | (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { | 
|  | 1033 | spin_unlock(&gl->gl_spin); | 
|  | 1034 | gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); | 
|  | 1035 | msleep(100); | 
|  | 1036 | spin_lock(&gl->gl_spin); | 
|  | 1037 | } else { | 
|  | 1038 | spin_unlock(&gl->gl_spin); | 
|  | 1039 | msleep(100); | 
|  | 1040 | spin_lock(&gl->gl_spin); | 
|  | 1041 | } | 
|  | 1042 | } | 
|  | 1043 |  | 
|  | 1044 | spin_unlock(&gl->gl_spin); | 
|  | 1045 | } | 
|  | 1046 |  | 
|  | 1047 | /** | 
|  | 1048 | * glock_wait_internal - wait on a glock acquisition | 
|  | 1049 | * @gh: the glock holder | 
|  | 1050 | * | 
|  | 1051 | * Returns: 0 on success | 
|  | 1052 | */ | 
|  | 1053 |  | 
|  | 1054 | static int glock_wait_internal(struct gfs2_holder *gh) | 
|  | 1055 | { | 
|  | 1056 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 1057 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 1058 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 1059 |  | 
|  | 1060 | if (test_bit(HIF_ABORTED, &gh->gh_iflags)) | 
|  | 1061 | return -EIO; | 
|  | 1062 |  | 
|  | 1063 | if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { | 
|  | 1064 | spin_lock(&gl->gl_spin); | 
|  | 1065 | if (gl->gl_req_gh != gh && | 
|  | 1066 | !test_bit(HIF_HOLDER, &gh->gh_iflags) && | 
|  | 1067 | !list_empty(&gh->gh_list)) { | 
|  | 1068 | list_del_init(&gh->gh_list); | 
|  | 1069 | gh->gh_error = GLR_TRYFAILED; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1070 | run_queue(gl); | 
|  | 1071 | spin_unlock(&gl->gl_spin); | 
|  | 1072 | return gh->gh_error; | 
|  | 1073 | } | 
|  | 1074 | spin_unlock(&gl->gl_spin); | 
|  | 1075 | } | 
|  | 1076 |  | 
|  | 1077 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 
|  | 1078 | do_cancels(gh); | 
|  | 1079 |  | 
|  | 1080 | wait_for_completion(&gh->gh_wait); | 
|  | 1081 |  | 
|  | 1082 | if (gh->gh_error) | 
|  | 1083 | return gh->gh_error; | 
|  | 1084 |  | 
|  | 1085 | gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); | 
|  | 1086 | gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, | 
|  | 1087 | gh->gh_state, | 
|  | 1088 | gh->gh_flags)); | 
|  | 1089 |  | 
|  | 1090 | if (test_bit(HIF_FIRST, &gh->gh_iflags)) { | 
|  | 1091 | gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); | 
|  | 1092 |  | 
|  | 1093 | if (glops->go_lock) { | 
|  | 1094 | gh->gh_error = glops->go_lock(gh); | 
|  | 1095 | if (gh->gh_error) { | 
|  | 1096 | spin_lock(&gl->gl_spin); | 
|  | 1097 | list_del_init(&gh->gh_list); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1098 | spin_unlock(&gl->gl_spin); | 
|  | 1099 | } | 
|  | 1100 | } | 
|  | 1101 |  | 
|  | 1102 | spin_lock(&gl->gl_spin); | 
|  | 1103 | gl->gl_req_gh = NULL; | 
|  | 1104 | gl->gl_req_bh = NULL; | 
|  | 1105 | clear_bit(GLF_LOCK, &gl->gl_flags); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1106 | run_queue(gl); | 
|  | 1107 | spin_unlock(&gl->gl_spin); | 
|  | 1108 | } | 
|  | 1109 |  | 
|  | 1110 | return gh->gh_error; | 
|  | 1111 | } | 
|  | 1112 |  | 
|  | 1113 | static inline struct gfs2_holder * | 
|  | 1114 | find_holder_by_owner(struct list_head *head, struct task_struct *owner) | 
|  | 1115 | { | 
|  | 1116 | struct gfs2_holder *gh; | 
|  | 1117 |  | 
|  | 1118 | list_for_each_entry(gh, head, gh_list) { | 
|  | 1119 | if (gh->gh_owner == owner) | 
|  | 1120 | return gh; | 
|  | 1121 | } | 
|  | 1122 |  | 
|  | 1123 | return NULL; | 
|  | 1124 | } | 
|  | 1125 |  | 
|  | 1126 | /** | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1127 | * add_to_queue - Add a holder to the wait queue (but look for recursion) | 
|  | 1128 | * @gh: the holder structure to add | 
|  | 1129 | * | 
|  | 1130 | */ | 
|  | 1131 |  | 
|  | 1132 | static void add_to_queue(struct gfs2_holder *gh) | 
|  | 1133 | { | 
|  | 1134 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 1135 | struct gfs2_holder *existing; | 
|  | 1136 |  | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 1137 | BUG_ON(!gh->gh_owner); | 
|  | 1138 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1139 | existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner); | 
|  | 1140 | if (existing) { | 
| Steven Whitehouse | 5965b1f | 2006-04-26 13:21:55 -0400 | [diff] [blame] | 1141 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); | 
|  | 1142 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 
|  | 1143 | BUG(); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1144 | } | 
|  | 1145 |  | 
|  | 1146 | existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner); | 
|  | 1147 | if (existing) { | 
| Steven Whitehouse | 5965b1f | 2006-04-26 13:21:55 -0400 | [diff] [blame] | 1148 | print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip); | 
|  | 1149 | print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); | 
|  | 1150 | BUG(); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1151 | } | 
|  | 1152 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1153 | if (gh->gh_flags & LM_FLAG_PRIORITY) | 
|  | 1154 | list_add(&gh->gh_list, &gl->gl_waiters3); | 
|  | 1155 | else | 
|  | 1156 | list_add_tail(&gh->gh_list, &gl->gl_waiters3); | 
|  | 1157 | } | 
|  | 1158 |  | 
|  | 1159 | /** | 
|  | 1160 | * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) | 
|  | 1161 | * @gh: the holder structure | 
|  | 1162 | * | 
|  | 1163 | * if (gh->gh_flags & GL_ASYNC), this never returns an error | 
|  | 1164 | * | 
|  | 1165 | * Returns: 0, GLR_TRYFAILED, or errno on failure | 
|  | 1166 | */ | 
|  | 1167 |  | 
|  | 1168 | int gfs2_glock_nq(struct gfs2_holder *gh) | 
|  | 1169 | { | 
|  | 1170 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 1171 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 1172 | int error = 0; | 
|  | 1173 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1174 | restart: | 
|  | 1175 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { | 
|  | 1176 | set_bit(HIF_ABORTED, &gh->gh_iflags); | 
|  | 1177 | return -EIO; | 
|  | 1178 | } | 
|  | 1179 |  | 
|  | 1180 | set_bit(HIF_PROMOTE, &gh->gh_iflags); | 
|  | 1181 |  | 
|  | 1182 | spin_lock(&gl->gl_spin); | 
|  | 1183 | add_to_queue(gh); | 
|  | 1184 | run_queue(gl); | 
|  | 1185 | spin_unlock(&gl->gl_spin); | 
|  | 1186 |  | 
|  | 1187 | if (!(gh->gh_flags & GL_ASYNC)) { | 
|  | 1188 | error = glock_wait_internal(gh); | 
|  | 1189 | if (error == GLR_CANCELED) { | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 1190 | msleep(100); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1191 | goto restart; | 
|  | 1192 | } | 
|  | 1193 | } | 
|  | 1194 |  | 
|  | 1195 | clear_bit(GLF_PREFETCH, &gl->gl_flags); | 
|  | 1196 |  | 
|  | 1197 | return error; | 
|  | 1198 | } | 
|  | 1199 |  | 
|  | 1200 | /** | 
|  | 1201 | * gfs2_glock_poll - poll to see if an async request has been completed | 
|  | 1202 | * @gh: the holder | 
|  | 1203 | * | 
|  | 1204 | * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on | 
|  | 1205 | */ | 
|  | 1206 |  | 
|  | 1207 | int gfs2_glock_poll(struct gfs2_holder *gh) | 
|  | 1208 | { | 
|  | 1209 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 1210 | int ready = 0; | 
|  | 1211 |  | 
|  | 1212 | spin_lock(&gl->gl_spin); | 
|  | 1213 |  | 
|  | 1214 | if (test_bit(HIF_HOLDER, &gh->gh_iflags)) | 
|  | 1215 | ready = 1; | 
|  | 1216 | else if (list_empty(&gh->gh_list)) { | 
|  | 1217 | if (gh->gh_error == GLR_CANCELED) { | 
|  | 1218 | spin_unlock(&gl->gl_spin); | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 1219 | msleep(100); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1220 | if (gfs2_glock_nq(gh)) | 
|  | 1221 | return 1; | 
|  | 1222 | return 0; | 
|  | 1223 | } else | 
|  | 1224 | ready = 1; | 
|  | 1225 | } | 
|  | 1226 |  | 
|  | 1227 | spin_unlock(&gl->gl_spin); | 
|  | 1228 |  | 
|  | 1229 | return ready; | 
|  | 1230 | } | 
|  | 1231 |  | 
|  | 1232 | /** | 
|  | 1233 | * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC | 
|  | 1234 | * @gh: the holder structure | 
|  | 1235 | * | 
|  | 1236 | * Returns: 0, GLR_TRYFAILED, or errno on failure | 
|  | 1237 | */ | 
|  | 1238 |  | 
|  | 1239 | int gfs2_glock_wait(struct gfs2_holder *gh) | 
|  | 1240 | { | 
|  | 1241 | int error; | 
|  | 1242 |  | 
|  | 1243 | error = glock_wait_internal(gh); | 
|  | 1244 | if (error == GLR_CANCELED) { | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 1245 | msleep(100); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1246 | gh->gh_flags &= ~GL_ASYNC; | 
|  | 1247 | error = gfs2_glock_nq(gh); | 
|  | 1248 | } | 
|  | 1249 |  | 
|  | 1250 | return error; | 
|  | 1251 | } | 
|  | 1252 |  | 
|  | 1253 | /** | 
|  | 1254 | * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) | 
|  | 1255 | * @gh: the glock holder | 
|  | 1256 | * | 
|  | 1257 | */ | 
|  | 1258 |  | 
|  | 1259 | void gfs2_glock_dq(struct gfs2_holder *gh) | 
|  | 1260 | { | 
|  | 1261 | struct gfs2_glock *gl = gh->gh_gl; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1262 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 1263 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1264 | if (gh->gh_flags & GL_SYNC) | 
|  | 1265 | set_bit(GLF_SYNC, &gl->gl_flags); | 
|  | 1266 |  | 
|  | 1267 | if (gh->gh_flags & GL_NOCACHE) | 
|  | 1268 | handle_callback(gl, LM_ST_UNLOCKED); | 
|  | 1269 |  | 
|  | 1270 | gfs2_glmutex_lock(gl); | 
|  | 1271 |  | 
|  | 1272 | spin_lock(&gl->gl_spin); | 
|  | 1273 | list_del_init(&gh->gh_list); | 
|  | 1274 |  | 
|  | 1275 | if (list_empty(&gl->gl_holders)) { | 
|  | 1276 | spin_unlock(&gl->gl_spin); | 
|  | 1277 |  | 
|  | 1278 | if (glops->go_unlock) | 
|  | 1279 | glops->go_unlock(gh); | 
|  | 1280 |  | 
|  | 1281 | if (test_bit(GLF_SYNC, &gl->gl_flags)) { | 
|  | 1282 | if (glops->go_sync) | 
|  | 1283 | glops->go_sync(gl, DIO_METADATA | DIO_DATA); | 
|  | 1284 | } | 
|  | 1285 |  | 
|  | 1286 | gl->gl_stamp = jiffies; | 
|  | 1287 |  | 
|  | 1288 | spin_lock(&gl->gl_spin); | 
|  | 1289 | } | 
|  | 1290 |  | 
|  | 1291 | clear_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 1292 | run_queue(gl); | 
|  | 1293 | spin_unlock(&gl->gl_spin); | 
|  | 1294 | } | 
|  | 1295 |  | 
|  | 1296 | /** | 
|  | 1297 | * gfs2_glock_prefetch - Try to prefetch a glock | 
|  | 1298 | * @gl: the glock | 
|  | 1299 | * @state: the state to prefetch in | 
|  | 1300 | * @flags: flags passed to go_xmote_th() | 
|  | 1301 | * | 
|  | 1302 | */ | 
|  | 1303 |  | 
|  | 1304 | void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, int flags) | 
|  | 1305 | { | 
|  | 1306 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 1307 |  | 
|  | 1308 | spin_lock(&gl->gl_spin); | 
|  | 1309 |  | 
|  | 1310 | if (test_bit(GLF_LOCK, &gl->gl_flags) || | 
|  | 1311 | !list_empty(&gl->gl_holders) || | 
|  | 1312 | !list_empty(&gl->gl_waiters1) || | 
|  | 1313 | !list_empty(&gl->gl_waiters2) || | 
|  | 1314 | !list_empty(&gl->gl_waiters3) || | 
|  | 1315 | relaxed_state_ok(gl->gl_state, state, flags)) { | 
|  | 1316 | spin_unlock(&gl->gl_spin); | 
|  | 1317 | return; | 
|  | 1318 | } | 
|  | 1319 |  | 
|  | 1320 | set_bit(GLF_PREFETCH, &gl->gl_flags); | 
|  | 1321 | set_bit(GLF_LOCK, &gl->gl_flags); | 
|  | 1322 | spin_unlock(&gl->gl_spin); | 
|  | 1323 |  | 
|  | 1324 | glops->go_xmote_th(gl, state, flags); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1325 | } | 
|  | 1326 |  | 
|  | 1327 | /** | 
|  | 1328 | * gfs2_glock_force_drop - Force a glock to be uncached | 
|  | 1329 | * @gl: the glock | 
|  | 1330 | * | 
|  | 1331 | */ | 
|  | 1332 |  | 
|  | 1333 | void gfs2_glock_force_drop(struct gfs2_glock *gl) | 
|  | 1334 | { | 
|  | 1335 | struct gfs2_holder gh; | 
|  | 1336 |  | 
| Steven Whitehouse | 579b78a | 2006-04-26 14:58:26 -0400 | [diff] [blame] | 1337 | gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1338 | set_bit(HIF_DEMOTE, &gh.gh_iflags); | 
|  | 1339 |  | 
|  | 1340 | spin_lock(&gl->gl_spin); | 
|  | 1341 | list_add_tail(&gh.gh_list, &gl->gl_waiters2); | 
|  | 1342 | run_queue(gl); | 
|  | 1343 | spin_unlock(&gl->gl_spin); | 
|  | 1344 |  | 
|  | 1345 | wait_for_completion(&gh.gh_wait); | 
|  | 1346 | gfs2_holder_uninit(&gh); | 
|  | 1347 | } | 
|  | 1348 |  | 
|  | 1349 | static void greedy_work(void *data) | 
|  | 1350 | { | 
| David Teigland | e7f5c01 | 2006-04-27 11:25:45 -0400 | [diff] [blame^] | 1351 | struct greedy *gr = data; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1352 | struct gfs2_holder *gh = &gr->gr_gh; | 
|  | 1353 | struct gfs2_glock *gl = gh->gh_gl; | 
|  | 1354 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 1355 |  | 
|  | 1356 | clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | 
|  | 1357 |  | 
|  | 1358 | if (glops->go_greedy) | 
|  | 1359 | glops->go_greedy(gl); | 
|  | 1360 |  | 
|  | 1361 | spin_lock(&gl->gl_spin); | 
|  | 1362 |  | 
|  | 1363 | if (list_empty(&gl->gl_waiters2)) { | 
|  | 1364 | clear_bit(GLF_GREEDY, &gl->gl_flags); | 
|  | 1365 | spin_unlock(&gl->gl_spin); | 
|  | 1366 | gfs2_holder_uninit(gh); | 
|  | 1367 | kfree(gr); | 
|  | 1368 | } else { | 
|  | 1369 | gfs2_glock_hold(gl); | 
|  | 1370 | list_add_tail(&gh->gh_list, &gl->gl_waiters2); | 
|  | 1371 | run_queue(gl); | 
|  | 1372 | spin_unlock(&gl->gl_spin); | 
|  | 1373 | gfs2_glock_put(gl); | 
|  | 1374 | } | 
|  | 1375 | } | 
|  | 1376 |  | 
|  | 1377 | /** | 
|  | 1378 | * gfs2_glock_be_greedy - | 
|  | 1379 | * @gl: | 
|  | 1380 | * @time: | 
|  | 1381 | * | 
|  | 1382 | * Returns: 0 if go_greedy will be called, 1 otherwise | 
|  | 1383 | */ | 
|  | 1384 |  | 
|  | 1385 | int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) | 
|  | 1386 | { | 
|  | 1387 | struct greedy *gr; | 
|  | 1388 | struct gfs2_holder *gh; | 
|  | 1389 |  | 
|  | 1390 | if (!time || | 
|  | 1391 | gl->gl_sbd->sd_args.ar_localcaching || | 
|  | 1392 | test_and_set_bit(GLF_GREEDY, &gl->gl_flags)) | 
|  | 1393 | return 1; | 
|  | 1394 |  | 
|  | 1395 | gr = kmalloc(sizeof(struct greedy), GFP_KERNEL); | 
|  | 1396 | if (!gr) { | 
|  | 1397 | clear_bit(GLF_GREEDY, &gl->gl_flags); | 
|  | 1398 | return 1; | 
|  | 1399 | } | 
|  | 1400 | gh = &gr->gr_gh; | 
|  | 1401 |  | 
| Steven Whitehouse | 579b78a | 2006-04-26 14:58:26 -0400 | [diff] [blame] | 1402 | gfs2_holder_init(gl, 0, 0, gh); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1403 | set_bit(HIF_GREEDY, &gh->gh_iflags); | 
|  | 1404 | INIT_WORK(&gr->gr_work, greedy_work, gr); | 
|  | 1405 |  | 
|  | 1406 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | 
|  | 1407 | schedule_delayed_work(&gr->gr_work, time); | 
|  | 1408 |  | 
|  | 1409 | return 0; | 
|  | 1410 | } | 
|  | 1411 |  | 
|  | 1412 | /** | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1413 | * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it | 
|  | 1414 | * @gh: the holder structure | 
|  | 1415 | * | 
|  | 1416 | */ | 
|  | 1417 |  | 
|  | 1418 | void gfs2_glock_dq_uninit(struct gfs2_holder *gh) | 
|  | 1419 | { | 
|  | 1420 | gfs2_glock_dq(gh); | 
|  | 1421 | gfs2_holder_uninit(gh); | 
|  | 1422 | } | 
|  | 1423 |  | 
|  | 1424 | /** | 
|  | 1425 | * gfs2_glock_nq_num - acquire a glock based on lock number | 
|  | 1426 | * @sdp: the filesystem | 
|  | 1427 | * @number: the lock number | 
|  | 1428 | * @glops: the glock operations for the type of glock | 
|  | 1429 | * @state: the state to acquire the glock in | 
|  | 1430 | * @flags: modifier flags for the aquisition | 
|  | 1431 | * @gh: the struct gfs2_holder | 
|  | 1432 | * | 
|  | 1433 | * Returns: errno | 
|  | 1434 | */ | 
|  | 1435 |  | 
|  | 1436 | int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number, | 
|  | 1437 | struct gfs2_glock_operations *glops, unsigned int state, | 
|  | 1438 | int flags, struct gfs2_holder *gh) | 
|  | 1439 | { | 
|  | 1440 | struct gfs2_glock *gl; | 
|  | 1441 | int error; | 
|  | 1442 |  | 
|  | 1443 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); | 
|  | 1444 | if (!error) { | 
|  | 1445 | error = gfs2_glock_nq_init(gl, state, flags, gh); | 
|  | 1446 | gfs2_glock_put(gl); | 
|  | 1447 | } | 
|  | 1448 |  | 
|  | 1449 | return error; | 
|  | 1450 | } | 
|  | 1451 |  | 
|  | 1452 | /** | 
|  | 1453 | * glock_compare - Compare two struct gfs2_glock structures for sorting | 
|  | 1454 | * @arg_a: the first structure | 
|  | 1455 | * @arg_b: the second structure | 
|  | 1456 | * | 
|  | 1457 | */ | 
|  | 1458 |  | 
|  | 1459 | static int glock_compare(const void *arg_a, const void *arg_b) | 
|  | 1460 | { | 
|  | 1461 | struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a; | 
|  | 1462 | struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b; | 
|  | 1463 | struct lm_lockname *a = &gh_a->gh_gl->gl_name; | 
|  | 1464 | struct lm_lockname *b = &gh_b->gh_gl->gl_name; | 
|  | 1465 | int ret = 0; | 
|  | 1466 |  | 
|  | 1467 | if (a->ln_number > b->ln_number) | 
|  | 1468 | ret = 1; | 
|  | 1469 | else if (a->ln_number < b->ln_number) | 
|  | 1470 | ret = -1; | 
|  | 1471 | else { | 
|  | 1472 | if (gh_a->gh_state == LM_ST_SHARED && | 
|  | 1473 | gh_b->gh_state == LM_ST_EXCLUSIVE) | 
|  | 1474 | ret = 1; | 
|  | 1475 | else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && | 
|  | 1476 | (gh_b->gh_flags & GL_LOCAL_EXCL)) | 
|  | 1477 | ret = 1; | 
|  | 1478 | } | 
|  | 1479 |  | 
|  | 1480 | return ret; | 
|  | 1481 | } | 
|  | 1482 |  | 
|  | 1483 | /** | 
|  | 1484 | * nq_m_sync - synchonously acquire more than one glock in deadlock free order | 
|  | 1485 | * @num_gh: the number of structures | 
|  | 1486 | * @ghs: an array of struct gfs2_holder structures | 
|  | 1487 | * | 
|  | 1488 | * Returns: 0 on success (all glocks acquired), | 
|  | 1489 | *          errno on failure (no glocks acquired) | 
|  | 1490 | */ | 
|  | 1491 |  | 
|  | 1492 | static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, | 
|  | 1493 | struct gfs2_holder **p) | 
|  | 1494 | { | 
|  | 1495 | unsigned int x; | 
|  | 1496 | int error = 0; | 
|  | 1497 |  | 
|  | 1498 | for (x = 0; x < num_gh; x++) | 
|  | 1499 | p[x] = &ghs[x]; | 
|  | 1500 |  | 
|  | 1501 | sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); | 
|  | 1502 |  | 
|  | 1503 | for (x = 0; x < num_gh; x++) { | 
|  | 1504 | p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | 
|  | 1505 |  | 
|  | 1506 | error = gfs2_glock_nq(p[x]); | 
|  | 1507 | if (error) { | 
|  | 1508 | while (x--) | 
|  | 1509 | gfs2_glock_dq(p[x]); | 
|  | 1510 | break; | 
|  | 1511 | } | 
|  | 1512 | } | 
|  | 1513 |  | 
|  | 1514 | return error; | 
|  | 1515 | } | 
|  | 1516 |  | 
|  | 1517 | /** | 
|  | 1518 | * gfs2_glock_nq_m - acquire multiple glocks | 
|  | 1519 | * @num_gh: the number of structures | 
|  | 1520 | * @ghs: an array of struct gfs2_holder structures | 
|  | 1521 | * | 
|  | 1522 | * Figure out how big an impact this function has.  Either: | 
|  | 1523 | * 1) Replace this code with code that calls gfs2_glock_prefetch() | 
|  | 1524 | * 2) Forget async stuff and just call nq_m_sync() | 
|  | 1525 | * 3) Leave it like it is | 
|  | 1526 | * | 
|  | 1527 | * Returns: 0 on success (all glocks acquired), | 
|  | 1528 | *          errno on failure (no glocks acquired) | 
|  | 1529 | */ | 
|  | 1530 |  | 
|  | 1531 | int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) | 
|  | 1532 | { | 
|  | 1533 | int *e; | 
|  | 1534 | unsigned int x; | 
|  | 1535 | int borked = 0, serious = 0; | 
|  | 1536 | int error = 0; | 
|  | 1537 |  | 
|  | 1538 | if (!num_gh) | 
|  | 1539 | return 0; | 
|  | 1540 |  | 
|  | 1541 | if (num_gh == 1) { | 
|  | 1542 | ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); | 
|  | 1543 | return gfs2_glock_nq(ghs); | 
|  | 1544 | } | 
|  | 1545 |  | 
|  | 1546 | e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL); | 
|  | 1547 | if (!e) | 
|  | 1548 | return -ENOMEM; | 
|  | 1549 |  | 
|  | 1550 | for (x = 0; x < num_gh; x++) { | 
|  | 1551 | ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC; | 
|  | 1552 | error = gfs2_glock_nq(&ghs[x]); | 
|  | 1553 | if (error) { | 
|  | 1554 | borked = 1; | 
|  | 1555 | serious = error; | 
|  | 1556 | num_gh = x; | 
|  | 1557 | break; | 
|  | 1558 | } | 
|  | 1559 | } | 
|  | 1560 |  | 
|  | 1561 | for (x = 0; x < num_gh; x++) { | 
|  | 1562 | error = e[x] = glock_wait_internal(&ghs[x]); | 
|  | 1563 | if (error) { | 
|  | 1564 | borked = 1; | 
|  | 1565 | if (error != GLR_TRYFAILED && error != GLR_CANCELED) | 
|  | 1566 | serious = error; | 
|  | 1567 | } | 
|  | 1568 | } | 
|  | 1569 |  | 
|  | 1570 | if (!borked) { | 
|  | 1571 | kfree(e); | 
|  | 1572 | return 0; | 
|  | 1573 | } | 
|  | 1574 |  | 
|  | 1575 | for (x = 0; x < num_gh; x++) | 
|  | 1576 | if (!e[x]) | 
|  | 1577 | gfs2_glock_dq(&ghs[x]); | 
|  | 1578 |  | 
|  | 1579 | if (serious) | 
|  | 1580 | error = serious; | 
|  | 1581 | else { | 
|  | 1582 | for (x = 0; x < num_gh; x++) | 
|  | 1583 | gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags, | 
|  | 1584 | &ghs[x]); | 
|  | 1585 | error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e); | 
|  | 1586 | } | 
|  | 1587 |  | 
|  | 1588 | kfree(e); | 
|  | 1589 |  | 
|  | 1590 | return error; | 
|  | 1591 | } | 
|  | 1592 |  | 
|  | 1593 | /** | 
|  | 1594 | * gfs2_glock_dq_m - release multiple glocks | 
|  | 1595 | * @num_gh: the number of structures | 
|  | 1596 | * @ghs: an array of struct gfs2_holder structures | 
|  | 1597 | * | 
|  | 1598 | */ | 
|  | 1599 |  | 
|  | 1600 | void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) | 
|  | 1601 | { | 
|  | 1602 | unsigned int x; | 
|  | 1603 |  | 
|  | 1604 | for (x = 0; x < num_gh; x++) | 
|  | 1605 | gfs2_glock_dq(&ghs[x]); | 
|  | 1606 | } | 
|  | 1607 |  | 
|  | 1608 | /** | 
|  | 1609 | * gfs2_glock_dq_uninit_m - release multiple glocks | 
|  | 1610 | * @num_gh: the number of structures | 
|  | 1611 | * @ghs: an array of struct gfs2_holder structures | 
|  | 1612 | * | 
|  | 1613 | */ | 
|  | 1614 |  | 
|  | 1615 | void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) | 
|  | 1616 | { | 
|  | 1617 | unsigned int x; | 
|  | 1618 |  | 
|  | 1619 | for (x = 0; x < num_gh; x++) | 
|  | 1620 | gfs2_glock_dq_uninit(&ghs[x]); | 
|  | 1621 | } | 
|  | 1622 |  | 
|  | 1623 | /** | 
|  | 1624 | * gfs2_glock_prefetch_num - prefetch a glock based on lock number | 
|  | 1625 | * @sdp: the filesystem | 
|  | 1626 | * @number: the lock number | 
|  | 1627 | * @glops: the glock operations for the type of glock | 
|  | 1628 | * @state: the state to acquire the glock in | 
|  | 1629 | * @flags: modifier flags for the aquisition | 
|  | 1630 | * | 
|  | 1631 | * Returns: errno | 
|  | 1632 | */ | 
|  | 1633 |  | 
|  | 1634 | void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number, | 
|  | 1635 | struct gfs2_glock_operations *glops, | 
|  | 1636 | unsigned int state, int flags) | 
|  | 1637 | { | 
|  | 1638 | struct gfs2_glock *gl; | 
|  | 1639 | int error; | 
|  | 1640 |  | 
|  | 1641 | if (atomic_read(&sdp->sd_reclaim_count) < | 
|  | 1642 | gfs2_tune_get(sdp, gt_reclaim_limit)) { | 
|  | 1643 | error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); | 
|  | 1644 | if (!error) { | 
|  | 1645 | gfs2_glock_prefetch(gl, state, flags); | 
|  | 1646 | gfs2_glock_put(gl); | 
|  | 1647 | } | 
|  | 1648 | } | 
|  | 1649 | } | 
|  | 1650 |  | 
|  | 1651 | /** | 
|  | 1652 | * gfs2_lvb_hold - attach a LVB from a glock | 
|  | 1653 | * @gl: The glock in question | 
|  | 1654 | * | 
|  | 1655 | */ | 
|  | 1656 |  | 
|  | 1657 | int gfs2_lvb_hold(struct gfs2_glock *gl) | 
|  | 1658 | { | 
|  | 1659 | int error; | 
|  | 1660 |  | 
|  | 1661 | gfs2_glmutex_lock(gl); | 
|  | 1662 |  | 
|  | 1663 | if (!atomic_read(&gl->gl_lvb_count)) { | 
|  | 1664 | error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); | 
|  | 1665 | if (error) { | 
|  | 1666 | gfs2_glmutex_unlock(gl); | 
|  | 1667 | return error; | 
|  | 1668 | } | 
|  | 1669 | gfs2_glock_hold(gl); | 
|  | 1670 | } | 
|  | 1671 | atomic_inc(&gl->gl_lvb_count); | 
|  | 1672 |  | 
|  | 1673 | gfs2_glmutex_unlock(gl); | 
|  | 1674 |  | 
|  | 1675 | return 0; | 
|  | 1676 | } | 
|  | 1677 |  | 
|  | 1678 | /** | 
|  | 1679 | * gfs2_lvb_unhold - detach a LVB from a glock | 
|  | 1680 | * @gl: The glock in question | 
|  | 1681 | * | 
|  | 1682 | */ | 
|  | 1683 |  | 
|  | 1684 | void gfs2_lvb_unhold(struct gfs2_glock *gl) | 
|  | 1685 | { | 
|  | 1686 | gfs2_glock_hold(gl); | 
|  | 1687 | gfs2_glmutex_lock(gl); | 
|  | 1688 |  | 
|  | 1689 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); | 
|  | 1690 | if (atomic_dec_and_test(&gl->gl_lvb_count)) { | 
|  | 1691 | gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); | 
|  | 1692 | gl->gl_lvb = NULL; | 
|  | 1693 | gfs2_glock_put(gl); | 
|  | 1694 | } | 
|  | 1695 |  | 
|  | 1696 | gfs2_glmutex_unlock(gl); | 
|  | 1697 | gfs2_glock_put(gl); | 
|  | 1698 | } | 
|  | 1699 |  | 
|  | 1700 | void gfs2_lvb_sync(struct gfs2_glock *gl) | 
|  | 1701 | { | 
|  | 1702 | gfs2_glmutex_lock(gl); | 
|  | 1703 |  | 
|  | 1704 | gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count)); | 
|  | 1705 | if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl))) | 
|  | 1706 | gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); | 
|  | 1707 |  | 
|  | 1708 | gfs2_glmutex_unlock(gl); | 
|  | 1709 | } | 
|  | 1710 |  | 
|  | 1711 | static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, | 
|  | 1712 | unsigned int state) | 
|  | 1713 | { | 
|  | 1714 | struct gfs2_glock *gl; | 
|  | 1715 |  | 
|  | 1716 | gl = gfs2_glock_find(sdp, name); | 
|  | 1717 | if (!gl) | 
|  | 1718 | return; | 
|  | 1719 |  | 
|  | 1720 | if (gl->gl_ops->go_callback) | 
|  | 1721 | gl->gl_ops->go_callback(gl, state); | 
|  | 1722 | handle_callback(gl, state); | 
|  | 1723 |  | 
|  | 1724 | spin_lock(&gl->gl_spin); | 
|  | 1725 | run_queue(gl); | 
|  | 1726 | spin_unlock(&gl->gl_spin); | 
|  | 1727 |  | 
|  | 1728 | gfs2_glock_put(gl); | 
|  | 1729 | } | 
|  | 1730 |  | 
|  | 1731 | /** | 
|  | 1732 | * gfs2_glock_cb - Callback used by locking module | 
|  | 1733 | * @fsdata: Pointer to the superblock | 
|  | 1734 | * @type: Type of callback | 
|  | 1735 | * @data: Type dependent data pointer | 
|  | 1736 | * | 
|  | 1737 | * Called by the locking module when it wants to tell us something. | 
|  | 1738 | * Either we need to drop a lock, one of our ASYNC requests completed, or | 
|  | 1739 | * a journal from another client needs to be recovered. | 
|  | 1740 | */ | 
|  | 1741 |  | 
|  | 1742 | void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data) | 
|  | 1743 | { | 
|  | 1744 | struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata; | 
|  | 1745 |  | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1746 | switch (type) { | 
|  | 1747 | case LM_CB_NEED_E: | 
| David Teigland | e7f5c01 | 2006-04-27 11:25:45 -0400 | [diff] [blame^] | 1748 | blocking_cb(sdp, data, LM_ST_UNLOCKED); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1749 | return; | 
|  | 1750 |  | 
|  | 1751 | case LM_CB_NEED_D: | 
| David Teigland | e7f5c01 | 2006-04-27 11:25:45 -0400 | [diff] [blame^] | 1752 | blocking_cb(sdp, data, LM_ST_DEFERRED); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1753 | return; | 
|  | 1754 |  | 
|  | 1755 | case LM_CB_NEED_S: | 
| David Teigland | e7f5c01 | 2006-04-27 11:25:45 -0400 | [diff] [blame^] | 1756 | blocking_cb(sdp, data, LM_ST_SHARED); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1757 | return; | 
|  | 1758 |  | 
|  | 1759 | case LM_CB_ASYNC: { | 
| David Teigland | e7f5c01 | 2006-04-27 11:25:45 -0400 | [diff] [blame^] | 1760 | struct lm_async_cb *async = data; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1761 | struct gfs2_glock *gl; | 
|  | 1762 |  | 
|  | 1763 | gl = gfs2_glock_find(sdp, &async->lc_name); | 
|  | 1764 | if (gfs2_assert_warn(sdp, gl)) | 
|  | 1765 | return; | 
|  | 1766 | if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) | 
|  | 1767 | gl->gl_req_bh(gl, async->lc_ret); | 
|  | 1768 | gfs2_glock_put(gl); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1769 | return; | 
|  | 1770 | } | 
|  | 1771 |  | 
|  | 1772 | case LM_CB_NEED_RECOVERY: | 
|  | 1773 | gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); | 
|  | 1774 | if (sdp->sd_recoverd_process) | 
|  | 1775 | wake_up_process(sdp->sd_recoverd_process); | 
|  | 1776 | return; | 
|  | 1777 |  | 
|  | 1778 | case LM_CB_DROPLOCKS: | 
|  | 1779 | gfs2_gl_hash_clear(sdp, NO_WAIT); | 
|  | 1780 | gfs2_quota_scan(sdp); | 
|  | 1781 | return; | 
|  | 1782 |  | 
|  | 1783 | default: | 
|  | 1784 | gfs2_assert_warn(sdp, 0); | 
|  | 1785 | return; | 
|  | 1786 | } | 
|  | 1787 | } | 
|  | 1788 |  | 
|  | 1789 | /** | 
|  | 1790 | * gfs2_try_toss_inode - try to remove a particular inode struct from cache | 
|  | 1791 | * sdp: the filesystem | 
|  | 1792 | * inum: the inode number | 
|  | 1793 | * | 
|  | 1794 | */ | 
|  | 1795 |  | 
|  | 1796 | void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum) | 
|  | 1797 | { | 
|  | 1798 | struct gfs2_glock *gl; | 
|  | 1799 | struct gfs2_inode *ip; | 
|  | 1800 | int error; | 
|  | 1801 |  | 
|  | 1802 | error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops, | 
|  | 1803 | NO_CREATE, &gl); | 
|  | 1804 | if (error || !gl) | 
|  | 1805 | return; | 
|  | 1806 |  | 
|  | 1807 | if (!gfs2_glmutex_trylock(gl)) | 
|  | 1808 | goto out; | 
|  | 1809 |  | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 1810 | ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1811 | if (!ip) | 
|  | 1812 | goto out_unlock; | 
|  | 1813 |  | 
|  | 1814 | if (atomic_read(&ip->i_count)) | 
|  | 1815 | goto out_unlock; | 
|  | 1816 |  | 
|  | 1817 | gfs2_inode_destroy(ip); | 
|  | 1818 |  | 
|  | 1819 | out_unlock: | 
|  | 1820 | gfs2_glmutex_unlock(gl); | 
|  | 1821 |  | 
|  | 1822 | out: | 
|  | 1823 | gfs2_glock_put(gl); | 
|  | 1824 | } | 
|  | 1825 |  | 
|  | 1826 | /** | 
|  | 1827 | * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an | 
|  | 1828 | *                          iopen glock from memory | 
|  | 1829 | * @io_gl: the iopen glock | 
|  | 1830 | * @state: the state into which the glock should be put | 
|  | 1831 | * | 
|  | 1832 | */ | 
|  | 1833 |  | 
|  | 1834 | void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state) | 
|  | 1835 | { | 
|  | 1836 | struct gfs2_glock *i_gl; | 
|  | 1837 |  | 
|  | 1838 | if (state != LM_ST_UNLOCKED) | 
|  | 1839 | return; | 
|  | 1840 |  | 
|  | 1841 | spin_lock(&io_gl->gl_spin); | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 1842 | i_gl = io_gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1843 | if (i_gl) { | 
|  | 1844 | gfs2_glock_hold(i_gl); | 
|  | 1845 | spin_unlock(&io_gl->gl_spin); | 
|  | 1846 | } else { | 
|  | 1847 | spin_unlock(&io_gl->gl_spin); | 
|  | 1848 | return; | 
|  | 1849 | } | 
|  | 1850 |  | 
|  | 1851 | if (gfs2_glmutex_trylock(i_gl)) { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 1852 | struct gfs2_inode *ip = i_gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1853 | if (ip) { | 
|  | 1854 | gfs2_try_toss_vnode(ip); | 
|  | 1855 | gfs2_glmutex_unlock(i_gl); | 
|  | 1856 | gfs2_glock_schedule_for_reclaim(i_gl); | 
|  | 1857 | goto out; | 
|  | 1858 | } | 
|  | 1859 | gfs2_glmutex_unlock(i_gl); | 
|  | 1860 | } | 
|  | 1861 |  | 
|  | 1862 | out: | 
|  | 1863 | gfs2_glock_put(i_gl); | 
|  | 1864 | } | 
|  | 1865 |  | 
|  | 1866 | /** | 
|  | 1867 | * demote_ok - Check to see if it's ok to unlock a glock | 
|  | 1868 | * @gl: the glock | 
|  | 1869 | * | 
|  | 1870 | * Returns: 1 if it's ok | 
|  | 1871 | */ | 
|  | 1872 |  | 
|  | 1873 | static int demote_ok(struct gfs2_glock *gl) | 
|  | 1874 | { | 
|  | 1875 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 1876 | struct gfs2_glock_operations *glops = gl->gl_ops; | 
|  | 1877 | int demote = 1; | 
|  | 1878 |  | 
|  | 1879 | if (test_bit(GLF_STICKY, &gl->gl_flags)) | 
|  | 1880 | demote = 0; | 
|  | 1881 | else if (test_bit(GLF_PREFETCH, &gl->gl_flags)) | 
|  | 1882 | demote = time_after_eq(jiffies, | 
|  | 1883 | gl->gl_stamp + | 
|  | 1884 | gfs2_tune_get(sdp, gt_prefetch_secs) * HZ); | 
|  | 1885 | else if (glops->go_demote_ok) | 
|  | 1886 | demote = glops->go_demote_ok(gl); | 
|  | 1887 |  | 
|  | 1888 | return demote; | 
|  | 1889 | } | 
|  | 1890 |  | 
|  | 1891 | /** | 
|  | 1892 | * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list | 
|  | 1893 | * @gl: the glock | 
|  | 1894 | * | 
|  | 1895 | */ | 
|  | 1896 |  | 
|  | 1897 | void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) | 
|  | 1898 | { | 
|  | 1899 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 1900 |  | 
|  | 1901 | spin_lock(&sdp->sd_reclaim_lock); | 
|  | 1902 | if (list_empty(&gl->gl_reclaim)) { | 
|  | 1903 | gfs2_glock_hold(gl); | 
|  | 1904 | list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); | 
|  | 1905 | atomic_inc(&sdp->sd_reclaim_count); | 
|  | 1906 | } | 
|  | 1907 | spin_unlock(&sdp->sd_reclaim_lock); | 
|  | 1908 |  | 
|  | 1909 | wake_up(&sdp->sd_reclaim_wq); | 
|  | 1910 | } | 
|  | 1911 |  | 
|  | 1912 | /** | 
|  | 1913 | * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list | 
|  | 1914 | * @sdp: the filesystem | 
|  | 1915 | * | 
|  | 1916 | * Called from gfs2_glockd() glock reclaim daemon, or when promoting a | 
|  | 1917 | * different glock and we notice that there are a lot of glocks in the | 
|  | 1918 | * reclaim list. | 
|  | 1919 | * | 
|  | 1920 | */ | 
|  | 1921 |  | 
|  | 1922 | void gfs2_reclaim_glock(struct gfs2_sbd *sdp) | 
|  | 1923 | { | 
|  | 1924 | struct gfs2_glock *gl; | 
|  | 1925 |  | 
|  | 1926 | spin_lock(&sdp->sd_reclaim_lock); | 
|  | 1927 | if (list_empty(&sdp->sd_reclaim_list)) { | 
|  | 1928 | spin_unlock(&sdp->sd_reclaim_lock); | 
|  | 1929 | return; | 
|  | 1930 | } | 
|  | 1931 | gl = list_entry(sdp->sd_reclaim_list.next, | 
|  | 1932 | struct gfs2_glock, gl_reclaim); | 
|  | 1933 | list_del_init(&gl->gl_reclaim); | 
|  | 1934 | spin_unlock(&sdp->sd_reclaim_lock); | 
|  | 1935 |  | 
|  | 1936 | atomic_dec(&sdp->sd_reclaim_count); | 
|  | 1937 | atomic_inc(&sdp->sd_reclaimed); | 
|  | 1938 |  | 
|  | 1939 | if (gfs2_glmutex_trylock(gl)) { | 
|  | 1940 | if (gl->gl_ops == &gfs2_inode_glops) { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 1941 | struct gfs2_inode *ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 1942 | if (ip && !atomic_read(&ip->i_count)) | 
|  | 1943 | gfs2_inode_destroy(ip); | 
|  | 1944 | } | 
|  | 1945 | if (queue_empty(gl, &gl->gl_holders) && | 
|  | 1946 | gl->gl_state != LM_ST_UNLOCKED && | 
|  | 1947 | demote_ok(gl)) | 
|  | 1948 | handle_callback(gl, LM_ST_UNLOCKED); | 
|  | 1949 | gfs2_glmutex_unlock(gl); | 
|  | 1950 | } | 
|  | 1951 |  | 
|  | 1952 | gfs2_glock_put(gl); | 
|  | 1953 | } | 
|  | 1954 |  | 
|  | 1955 | /** | 
|  | 1956 | * examine_bucket - Call a function for glock in a hash bucket | 
|  | 1957 | * @examiner: the function | 
|  | 1958 | * @sdp: the filesystem | 
|  | 1959 | * @bucket: the bucket | 
|  | 1960 | * | 
|  | 1961 | * Returns: 1 if the bucket has entries | 
|  | 1962 | */ | 
|  | 1963 |  | 
|  | 1964 | static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | 
|  | 1965 | struct gfs2_gl_hash_bucket *bucket) | 
|  | 1966 | { | 
|  | 1967 | struct glock_plug plug; | 
|  | 1968 | struct list_head *tmp; | 
|  | 1969 | struct gfs2_glock *gl; | 
|  | 1970 | int entries; | 
|  | 1971 |  | 
|  | 1972 | /* Add "plug" to end of bucket list, work back up list from there */ | 
|  | 1973 | memset(&plug.gl_flags, 0, sizeof(unsigned long)); | 
|  | 1974 | set_bit(GLF_PLUG, &plug.gl_flags); | 
|  | 1975 |  | 
|  | 1976 | write_lock(&bucket->hb_lock); | 
|  | 1977 | list_add(&plug.gl_list, &bucket->hb_list); | 
|  | 1978 | write_unlock(&bucket->hb_lock); | 
|  | 1979 |  | 
|  | 1980 | for (;;) { | 
|  | 1981 | write_lock(&bucket->hb_lock); | 
|  | 1982 |  | 
|  | 1983 | for (;;) { | 
|  | 1984 | tmp = plug.gl_list.next; | 
|  | 1985 |  | 
|  | 1986 | if (tmp == &bucket->hb_list) { | 
|  | 1987 | list_del(&plug.gl_list); | 
|  | 1988 | entries = !list_empty(&bucket->hb_list); | 
|  | 1989 | write_unlock(&bucket->hb_lock); | 
|  | 1990 | return entries; | 
|  | 1991 | } | 
|  | 1992 | gl = list_entry(tmp, struct gfs2_glock, gl_list); | 
|  | 1993 |  | 
|  | 1994 | /* Move plug up list */ | 
|  | 1995 | list_move(&plug.gl_list, &gl->gl_list); | 
|  | 1996 |  | 
|  | 1997 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | 
|  | 1998 | continue; | 
|  | 1999 |  | 
|  | 2000 | /* examiner() must glock_put() */ | 
|  | 2001 | gfs2_glock_hold(gl); | 
|  | 2002 |  | 
|  | 2003 | break; | 
|  | 2004 | } | 
|  | 2005 |  | 
|  | 2006 | write_unlock(&bucket->hb_lock); | 
|  | 2007 |  | 
|  | 2008 | examiner(gl); | 
|  | 2009 | } | 
|  | 2010 | } | 
|  | 2011 |  | 
|  | 2012 | /** | 
|  | 2013 | * scan_glock - look at a glock and see if we can reclaim it | 
|  | 2014 | * @gl: the glock to look at | 
|  | 2015 | * | 
|  | 2016 | */ | 
|  | 2017 |  | 
|  | 2018 | static void scan_glock(struct gfs2_glock *gl) | 
|  | 2019 | { | 
|  | 2020 | if (gfs2_glmutex_trylock(gl)) { | 
|  | 2021 | if (gl->gl_ops == &gfs2_inode_glops) { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 2022 | struct gfs2_inode *ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2023 | if (ip && !atomic_read(&ip->i_count)) | 
|  | 2024 | goto out_schedule; | 
|  | 2025 | } | 
|  | 2026 | if (queue_empty(gl, &gl->gl_holders) && | 
|  | 2027 | gl->gl_state != LM_ST_UNLOCKED && | 
|  | 2028 | demote_ok(gl)) | 
|  | 2029 | goto out_schedule; | 
|  | 2030 |  | 
|  | 2031 | gfs2_glmutex_unlock(gl); | 
|  | 2032 | } | 
|  | 2033 |  | 
|  | 2034 | gfs2_glock_put(gl); | 
|  | 2035 |  | 
|  | 2036 | return; | 
|  | 2037 |  | 
|  | 2038 | out_schedule: | 
|  | 2039 | gfs2_glmutex_unlock(gl); | 
|  | 2040 | gfs2_glock_schedule_for_reclaim(gl); | 
|  | 2041 | gfs2_glock_put(gl); | 
|  | 2042 | } | 
|  | 2043 |  | 
|  | 2044 | /** | 
|  | 2045 | * gfs2_scand_internal - Look for glocks and inodes to toss from memory | 
|  | 2046 | * @sdp: the filesystem | 
|  | 2047 | * | 
|  | 2048 | */ | 
|  | 2049 |  | 
|  | 2050 | void gfs2_scand_internal(struct gfs2_sbd *sdp) | 
|  | 2051 | { | 
|  | 2052 | unsigned int x; | 
|  | 2053 |  | 
|  | 2054 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 
|  | 2055 | examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]); | 
|  | 2056 | cond_resched(); | 
|  | 2057 | } | 
|  | 2058 | } | 
|  | 2059 |  | 
|  | 2060 | /** | 
|  | 2061 | * clear_glock - look at a glock and see if we can free it from glock cache | 
|  | 2062 | * @gl: the glock to look at | 
|  | 2063 | * | 
|  | 2064 | */ | 
|  | 2065 |  | 
|  | 2066 | static void clear_glock(struct gfs2_glock *gl) | 
|  | 2067 | { | 
|  | 2068 | struct gfs2_sbd *sdp = gl->gl_sbd; | 
|  | 2069 | int released; | 
|  | 2070 |  | 
|  | 2071 | spin_lock(&sdp->sd_reclaim_lock); | 
|  | 2072 | if (!list_empty(&gl->gl_reclaim)) { | 
|  | 2073 | list_del_init(&gl->gl_reclaim); | 
|  | 2074 | atomic_dec(&sdp->sd_reclaim_count); | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 2075 | spin_unlock(&sdp->sd_reclaim_lock); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2076 | released = gfs2_glock_put(gl); | 
|  | 2077 | gfs2_assert(sdp, !released); | 
| Steven Whitehouse | 190562b | 2006-04-20 16:57:23 -0400 | [diff] [blame] | 2078 | } else { | 
|  | 2079 | spin_unlock(&sdp->sd_reclaim_lock); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2080 | } | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2081 |  | 
|  | 2082 | if (gfs2_glmutex_trylock(gl)) { | 
|  | 2083 | if (gl->gl_ops == &gfs2_inode_glops) { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 2084 | struct gfs2_inode *ip = gl->gl_object; | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2085 | if (ip && !atomic_read(&ip->i_count)) | 
|  | 2086 | gfs2_inode_destroy(ip); | 
|  | 2087 | } | 
|  | 2088 | if (queue_empty(gl, &gl->gl_holders) && | 
|  | 2089 | gl->gl_state != LM_ST_UNLOCKED) | 
|  | 2090 | handle_callback(gl, LM_ST_UNLOCKED); | 
|  | 2091 |  | 
|  | 2092 | gfs2_glmutex_unlock(gl); | 
|  | 2093 | } | 
|  | 2094 |  | 
|  | 2095 | gfs2_glock_put(gl); | 
|  | 2096 | } | 
|  | 2097 |  | 
|  | 2098 | /** | 
|  | 2099 | * gfs2_gl_hash_clear - Empty out the glock hash table | 
|  | 2100 | * @sdp: the filesystem | 
|  | 2101 | * @wait: wait until it's all gone | 
|  | 2102 | * | 
|  | 2103 | * Called when unmounting the filesystem, or when inter-node lock manager | 
|  | 2104 | * requests DROPLOCKS because it is running out of capacity. | 
|  | 2105 | */ | 
|  | 2106 |  | 
|  | 2107 | void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | 
|  | 2108 | { | 
|  | 2109 | unsigned long t; | 
|  | 2110 | unsigned int x; | 
|  | 2111 | int cont; | 
|  | 2112 |  | 
|  | 2113 | t = jiffies; | 
|  | 2114 |  | 
|  | 2115 | for (;;) { | 
|  | 2116 | cont = 0; | 
|  | 2117 |  | 
|  | 2118 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) | 
|  | 2119 | if (examine_bucket(clear_glock, sdp, | 
|  | 2120 | &sdp->sd_gl_hash[x])) | 
|  | 2121 | cont = 1; | 
|  | 2122 |  | 
|  | 2123 | if (!wait || !cont) | 
|  | 2124 | break; | 
|  | 2125 |  | 
|  | 2126 | if (time_after_eq(jiffies, | 
|  | 2127 | t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { | 
|  | 2128 | fs_warn(sdp, "Unmount seems to be stalled. " | 
|  | 2129 | "Dumping lock state...\n"); | 
|  | 2130 | gfs2_dump_lockstate(sdp); | 
|  | 2131 | t = jiffies; | 
|  | 2132 | } | 
|  | 2133 |  | 
|  | 2134 | /* invalidate_inodes() requires that the sb inodes list | 
|  | 2135 | not change, but an async completion callback for an | 
|  | 2136 | unlock can occur which does glock_put() which | 
|  | 2137 | can call iput() which will change the sb inodes list. | 
|  | 2138 | invalidate_inodes_mutex prevents glock_put()'s during | 
|  | 2139 | an invalidate_inodes() */ | 
|  | 2140 |  | 
| Steven Whitehouse | f55ab26 | 2006-02-21 12:51:39 +0000 | [diff] [blame] | 2141 | mutex_lock(&sdp->sd_invalidate_inodes_mutex); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2142 | invalidate_inodes(sdp->sd_vfs); | 
| Steven Whitehouse | f55ab26 | 2006-02-21 12:51:39 +0000 | [diff] [blame] | 2143 | mutex_unlock(&sdp->sd_invalidate_inodes_mutex); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2144 | yield(); | 
|  | 2145 | } | 
|  | 2146 | } | 
|  | 2147 |  | 
|  | 2148 | /* | 
|  | 2149 | *  Diagnostic routines to help debug distributed deadlock | 
|  | 2150 | */ | 
|  | 2151 |  | 
|  | 2152 | /** | 
|  | 2153 | * dump_holder - print information about a glock holder | 
|  | 2154 | * @str: a string naming the type of holder | 
|  | 2155 | * @gh: the glock holder | 
|  | 2156 | * | 
|  | 2157 | * Returns: 0 on success, -ENOBUFS when we run out of space | 
|  | 2158 | */ | 
|  | 2159 |  | 
|  | 2160 | static int dump_holder(char *str, struct gfs2_holder *gh) | 
|  | 2161 | { | 
|  | 2162 | unsigned int x; | 
|  | 2163 | int error = -ENOBUFS; | 
|  | 2164 |  | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2165 | printk(KERN_INFO "  %s\n", str); | 
|  | 2166 | printk(KERN_INFO "    owner = %ld\n", | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2167 | (gh->gh_owner) ? (long)gh->gh_owner->pid : -1); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2168 | printk(KERN_INFO "    gh_state = %u\n", gh->gh_state); | 
|  | 2169 | printk(KERN_INFO "    gh_flags ="); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2170 | for (x = 0; x < 32; x++) | 
|  | 2171 | if (gh->gh_flags & (1 << x)) | 
|  | 2172 | printk(" %u", x); | 
|  | 2173 | printk(" \n"); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2174 | printk(KERN_INFO "    error = %d\n", gh->gh_error); | 
|  | 2175 | printk(KERN_INFO "    gh_iflags ="); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2176 | for (x = 0; x < 32; x++) | 
|  | 2177 | if (test_bit(x, &gh->gh_iflags)) | 
|  | 2178 | printk(" %u", x); | 
|  | 2179 | printk(" \n"); | 
| Steven Whitehouse | d0dc80d | 2006-03-29 14:36:49 -0500 | [diff] [blame] | 2180 | print_symbol(KERN_INFO "    initialized at: %s\n", gh->gh_ip); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2181 |  | 
|  | 2182 | error = 0; | 
|  | 2183 |  | 
|  | 2184 | return error; | 
|  | 2185 | } | 
|  | 2186 |  | 
|  | 2187 | /** | 
|  | 2188 | * dump_inode - print information about an inode | 
|  | 2189 | * @ip: the inode | 
|  | 2190 | * | 
|  | 2191 | * Returns: 0 on success, -ENOBUFS when we run out of space | 
|  | 2192 | */ | 
|  | 2193 |  | 
|  | 2194 | static int dump_inode(struct gfs2_inode *ip) | 
|  | 2195 | { | 
|  | 2196 | unsigned int x; | 
|  | 2197 | int error = -ENOBUFS; | 
|  | 2198 |  | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2199 | printk(KERN_INFO "  Inode:\n"); | 
|  | 2200 | printk(KERN_INFO "    num = %llu %llu\n", | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2201 | ip->i_num.no_formal_ino, ip->i_num.no_addr); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2202 | printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_di.di_mode)); | 
|  | 2203 | printk(KERN_INFO "    i_count = %d\n", atomic_read(&ip->i_count)); | 
|  | 2204 | printk(KERN_INFO "    i_flags ="); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2205 | for (x = 0; x < 32; x++) | 
|  | 2206 | if (test_bit(x, &ip->i_flags)) | 
|  | 2207 | printk(" %u", x); | 
|  | 2208 | printk(" \n"); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2209 | printk(KERN_INFO "    vnode = %s\n", (ip->i_vnode) ? "yes" : "no"); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2210 |  | 
|  | 2211 | error = 0; | 
|  | 2212 |  | 
|  | 2213 | return error; | 
|  | 2214 | } | 
|  | 2215 |  | 
|  | 2216 | /** | 
|  | 2217 | * dump_glock - print information about a glock | 
|  | 2218 | * @gl: the glock | 
|  | 2219 | * @count: where we are in the buffer | 
|  | 2220 | * | 
|  | 2221 | * Returns: 0 on success, -ENOBUFS when we run out of space | 
|  | 2222 | */ | 
|  | 2223 |  | 
|  | 2224 | static int dump_glock(struct gfs2_glock *gl) | 
|  | 2225 | { | 
|  | 2226 | struct gfs2_holder *gh; | 
|  | 2227 | unsigned int x; | 
|  | 2228 | int error = -ENOBUFS; | 
|  | 2229 |  | 
|  | 2230 | spin_lock(&gl->gl_spin); | 
|  | 2231 |  | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2232 | printk(KERN_INFO "Glock (%u, %llu)\n", | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2233 | gl->gl_name.ln_type, | 
|  | 2234 | gl->gl_name.ln_number); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2235 | printk(KERN_INFO "  gl_flags ="); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2236 | for (x = 0; x < 32; x++) | 
|  | 2237 | if (test_bit(x, &gl->gl_flags)) | 
|  | 2238 | printk(" %u", x); | 
|  | 2239 | printk(" \n"); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2240 | printk(KERN_INFO "  gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount)); | 
|  | 2241 | printk(KERN_INFO "  gl_state = %u\n", gl->gl_state); | 
|  | 2242 | printk(KERN_INFO "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); | 
|  | 2243 | printk(KERN_INFO "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); | 
|  | 2244 | printk(KERN_INFO "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); | 
|  | 2245 | printk(KERN_INFO "  object = %s\n", (gl->gl_object) ? "yes" : "no"); | 
|  | 2246 | printk(KERN_INFO "  le = %s\n", | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2247 | (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2248 | printk(KERN_INFO "  reclaim = %s\n", | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2249 | (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); | 
|  | 2250 | if (gl->gl_aspace) | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2251 | printk(KERN_INFO "  aspace = %lu\n", | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2252 | gl->gl_aspace->i_mapping->nrpages); | 
|  | 2253 | else | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2254 | printk(KERN_INFO "  aspace = no\n"); | 
|  | 2255 | printk(KERN_INFO "  ail = %d\n", atomic_read(&gl->gl_ail_count)); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2256 | if (gl->gl_req_gh) { | 
|  | 2257 | error = dump_holder("Request", gl->gl_req_gh); | 
|  | 2258 | if (error) | 
|  | 2259 | goto out; | 
|  | 2260 | } | 
|  | 2261 | list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 
|  | 2262 | error = dump_holder("Holder", gh); | 
|  | 2263 | if (error) | 
|  | 2264 | goto out; | 
|  | 2265 | } | 
|  | 2266 | list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { | 
|  | 2267 | error = dump_holder("Waiter1", gh); | 
|  | 2268 | if (error) | 
|  | 2269 | goto out; | 
|  | 2270 | } | 
|  | 2271 | list_for_each_entry(gh, &gl->gl_waiters2, gh_list) { | 
|  | 2272 | error = dump_holder("Waiter2", gh); | 
|  | 2273 | if (error) | 
|  | 2274 | goto out; | 
|  | 2275 | } | 
|  | 2276 | list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { | 
|  | 2277 | error = dump_holder("Waiter3", gh); | 
|  | 2278 | if (error) | 
|  | 2279 | goto out; | 
|  | 2280 | } | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 2281 | if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2282 | if (!test_bit(GLF_LOCK, &gl->gl_flags) && | 
|  | 2283 | list_empty(&gl->gl_holders)) { | 
| Steven Whitehouse | 5c676f6 | 2006-02-27 17:23:27 -0500 | [diff] [blame] | 2284 | error = dump_inode(gl->gl_object); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2285 | if (error) | 
|  | 2286 | goto out; | 
|  | 2287 | } else { | 
|  | 2288 | error = -ENOBUFS; | 
| Steven Whitehouse | d92a8d4 | 2006-02-27 10:57:14 -0500 | [diff] [blame] | 2289 | printk(KERN_INFO "  Inode: busy\n"); | 
| David Teigland | b3b94fa | 2006-01-16 16:50:04 +0000 | [diff] [blame] | 2290 | } | 
|  | 2291 | } | 
|  | 2292 |  | 
|  | 2293 | error = 0; | 
|  | 2294 |  | 
|  | 2295 | out: | 
|  | 2296 | spin_unlock(&gl->gl_spin); | 
|  | 2297 |  | 
|  | 2298 | return error; | 
|  | 2299 | } | 
|  | 2300 |  | 
|  | 2301 | /** | 
|  | 2302 | * gfs2_dump_lockstate - print out the current lockstate | 
|  | 2303 | * @sdp: the filesystem | 
|  | 2304 | * @ub: the buffer to copy the information into | 
|  | 2305 | * | 
|  | 2306 | * If @ub is NULL, dump the lockstate to the console. | 
|  | 2307 | * | 
|  | 2308 | */ | 
|  | 2309 |  | 
|  | 2310 | int gfs2_dump_lockstate(struct gfs2_sbd *sdp) | 
|  | 2311 | { | 
|  | 2312 | struct gfs2_gl_hash_bucket *bucket; | 
|  | 2313 | struct gfs2_glock *gl; | 
|  | 2314 | unsigned int x; | 
|  | 2315 | int error = 0; | 
|  | 2316 |  | 
|  | 2317 | for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { | 
|  | 2318 | bucket = &sdp->sd_gl_hash[x]; | 
|  | 2319 |  | 
|  | 2320 | read_lock(&bucket->hb_lock); | 
|  | 2321 |  | 
|  | 2322 | list_for_each_entry(gl, &bucket->hb_list, gl_list) { | 
|  | 2323 | if (test_bit(GLF_PLUG, &gl->gl_flags)) | 
|  | 2324 | continue; | 
|  | 2325 |  | 
|  | 2326 | error = dump_glock(gl); | 
|  | 2327 | if (error) | 
|  | 2328 | break; | 
|  | 2329 | } | 
|  | 2330 |  | 
|  | 2331 | read_unlock(&bucket->hb_lock); | 
|  | 2332 |  | 
|  | 2333 | if (error) | 
|  | 2334 | break; | 
|  | 2335 | } | 
|  | 2336 |  | 
|  | 2337 |  | 
|  | 2338 | return error; | 
|  | 2339 | } | 
|  | 2340 |  |