blob: 9c046dbf472954c3e9a5532b1b9aecb3d8f82c61 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Steven Whitehouse3a8a9a12006-05-18 15:09:15 -04003 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050015#include <linux/gfs2_ondisk.h>
Fabio Massimo Di Nitto7d308592006-09-19 07:56:29 +020016#include <linux/lm_interface.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000017
18#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050019#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000020#include "bmap.h"
21#include "glock.h"
22#include "glops.h"
23#include "inode.h"
24#include "log.h"
25#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000026#include "recovery.h"
27#include "rgrp.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050028#include "util.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029
Steven Whitehouseba7f7292006-07-26 11:27:10 -040030
31/**
32 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
33 * @gl: the glock
34 *
35 */
36
37static void gfs2_pte_inval(struct gfs2_glock *gl)
38{
39 struct gfs2_inode *ip;
40 struct inode *inode;
41
42 ip = gl->gl_object;
43 inode = &ip->i_inode;
44 if (!ip || !S_ISREG(ip->i_di.di_mode))
45 return;
46
47 if (!test_bit(GIF_PAGED, &ip->i_flags))
48 return;
49
50 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
51
52 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
53 set_bit(GLF_DIRTY, &gl->gl_flags);
54
55 clear_bit(GIF_SW_PAGED, &ip->i_flags);
56}
57
58/**
59 * gfs2_page_inval - Invalidate all pages associated with a glock
60 * @gl: the glock
61 *
62 */
63
64static void gfs2_page_inval(struct gfs2_glock *gl)
65{
66 struct gfs2_inode *ip;
67 struct inode *inode;
68
69 ip = gl->gl_object;
70 inode = &ip->i_inode;
71 if (!ip || !S_ISREG(ip->i_di.di_mode))
72 return;
73
74 truncate_inode_pages(inode->i_mapping, 0);
75 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
76 clear_bit(GIF_PAGED, &ip->i_flags);
77}
78
79/**
80 * gfs2_page_sync - Sync the data pages (not metadata) associated with a glock
81 * @gl: the glock
82 * @flags: DIO_START | DIO_WAIT
83 *
84 * Syncs data (not metadata) for a regular file.
85 * No-op for all other types.
86 */
87
88static void gfs2_page_sync(struct gfs2_glock *gl, int flags)
89{
90 struct gfs2_inode *ip;
91 struct inode *inode;
92 struct address_space *mapping;
93 int error = 0;
94
95 ip = gl->gl_object;
96 inode = &ip->i_inode;
97 if (!ip || !S_ISREG(ip->i_di.di_mode))
98 return;
99
100 mapping = inode->i_mapping;
101
102 if (flags & DIO_START)
103 filemap_fdatawrite(mapping);
104 if (!error && (flags & DIO_WAIT))
105 error = filemap_fdatawait(mapping);
106
107 /* Put back any errors cleared by filemap_fdatawait()
108 so they can be caught by someone who can pass them
109 up to user space. */
110
111 if (error == -ENOSPC)
112 set_bit(AS_ENOSPC, &mapping->flags);
113 else if (error)
114 set_bit(AS_EIO, &mapping->flags);
115
116}
117
David Teiglandb3b94fa2006-01-16 16:50:04 +0000118/**
119 * meta_go_sync - sync out the metadata for this glock
120 * @gl: the glock
121 * @flags: DIO_*
122 *
123 * Called when demoting or unlocking an EX glock. We must flush
124 * to disk all dirty buffers/pages relating to this glock, and must not
125 * not return to caller to demote/unlock the glock until I/O is complete.
126 */
127
128static void meta_go_sync(struct gfs2_glock *gl, int flags)
129{
130 if (!(flags & DIO_METADATA))
131 return;
132
133 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400134 gfs2_log_flush(gl->gl_sbd, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000135 gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
136 if (flags & DIO_RELEASE)
137 gfs2_ail_empty_gl(gl);
138 }
139
David Teiglandb3b94fa2006-01-16 16:50:04 +0000140}
141
142/**
143 * meta_go_inval - invalidate the metadata for this glock
144 * @gl: the glock
145 * @flags:
146 *
147 */
148
149static void meta_go_inval(struct gfs2_glock *gl, int flags)
150{
151 if (!(flags & DIO_METADATA))
152 return;
153
154 gfs2_meta_inval(gl);
155 gl->gl_vn++;
156}
157
158/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000159 * inode_go_xmote_th - promote/demote a glock
160 * @gl: the glock
161 * @state: the requested state
162 * @flags:
163 *
164 */
165
166static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
167 int flags)
168{
169 if (gl->gl_state != LM_ST_UNLOCKED)
170 gfs2_pte_inval(gl);
171 gfs2_glock_xmote_th(gl, state, flags);
172}
173
174/**
175 * inode_go_xmote_bh - After promoting/demoting a glock
176 * @gl: the glock
177 *
178 */
179
180static void inode_go_xmote_bh(struct gfs2_glock *gl)
181{
182 struct gfs2_holder *gh = gl->gl_req_gh;
183 struct buffer_head *bh;
184 int error;
185
186 if (gl->gl_state != LM_ST_UNLOCKED &&
187 (!gh || !(gh->gh_flags & GL_SKIP))) {
188 error = gfs2_meta_read(gl, gl->gl_name.ln_number, DIO_START,
189 &bh);
190 if (!error)
191 brelse(bh);
192 }
193}
194
195/**
196 * inode_go_drop_th - unlock a glock
197 * @gl: the glock
198 *
199 * Invoked from rq_demote().
200 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
201 * is being purged from our node's glock cache; we're dropping lock.
202 */
203
204static void inode_go_drop_th(struct gfs2_glock *gl)
205{
206 gfs2_pte_inval(gl);
207 gfs2_glock_drop_th(gl);
208}
209
210/**
211 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
212 * @gl: the glock protecting the inode
213 * @flags:
214 *
215 */
216
217static void inode_go_sync(struct gfs2_glock *gl, int flags)
218{
219 int meta = (flags & DIO_METADATA);
220 int data = (flags & DIO_DATA);
221
222 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
223 if (meta && data) {
224 gfs2_page_sync(gl, flags | DIO_START);
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400225 gfs2_log_flush(gl->gl_sbd, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000226 gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
227 gfs2_page_sync(gl, flags | DIO_WAIT);
228 clear_bit(GLF_DIRTY, &gl->gl_flags);
229 } else if (meta) {
Steven Whitehouseb09e5932006-04-07 11:17:32 -0400230 gfs2_log_flush(gl->gl_sbd, gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000231 gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
232 } else if (data)
233 gfs2_page_sync(gl, flags | DIO_START | DIO_WAIT);
234 if (flags & DIO_RELEASE)
235 gfs2_ail_empty_gl(gl);
236 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000237}
238
239/**
240 * inode_go_inval - prepare a inode glock to be released
241 * @gl: the glock
242 * @flags:
243 *
244 */
245
246static void inode_go_inval(struct gfs2_glock *gl, int flags)
247{
248 int meta = (flags & DIO_METADATA);
249 int data = (flags & DIO_DATA);
250
251 if (meta) {
252 gfs2_meta_inval(gl);
253 gl->gl_vn++;
254 }
255 if (data)
256 gfs2_page_inval(gl);
257}
258
259/**
260 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
261 * @gl: the glock
262 *
263 * Returns: 1 if it's ok
264 */
265
266static int inode_go_demote_ok(struct gfs2_glock *gl)
267{
268 struct gfs2_sbd *sdp = gl->gl_sbd;
269 int demote = 0;
270
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500271 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000272 demote = 1;
273 else if (!sdp->sd_args.ar_localcaching &&
274 time_after_eq(jiffies, gl->gl_stamp +
275 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
276 demote = 1;
277
278 return demote;
279}
280
281/**
282 * inode_go_lock - operation done after an inode lock is locked by a process
283 * @gl: the glock
284 * @flags:
285 *
286 * Returns: errno
287 */
288
289static int inode_go_lock(struct gfs2_holder *gh)
290{
291 struct gfs2_glock *gl = gh->gh_gl;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500292 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000293 int error = 0;
294
295 if (!ip)
296 return 0;
297
298 if (ip->i_vn != gl->gl_vn) {
299 error = gfs2_inode_refresh(ip);
300 if (error)
301 return error;
302 gfs2_inode_attr_in(ip);
303 }
304
305 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
306 (gl->gl_state == LM_ST_EXCLUSIVE) &&
307 (gh->gh_flags & GL_LOCAL_EXCL))
308 error = gfs2_truncatei_resume(ip);
309
310 return error;
311}
312
313/**
314 * inode_go_unlock - operation done before an inode lock is unlocked by a
315 * process
316 * @gl: the glock
317 * @flags:
318 *
319 */
320
321static void inode_go_unlock(struct gfs2_holder *gh)
322{
323 struct gfs2_glock *gl = gh->gh_gl;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500324 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000325
Steven Whitehouse75d3b812006-09-04 11:41:31 -0400326 if (ip == NULL)
327 return;
328 if (test_bit(GLF_DIRTY, &gl->gl_flags))
329 gfs2_inode_attr_in(ip);
330 gfs2_meta_cache_flush(ip);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000331}
332
333/**
334 * inode_greedy -
335 * @gl: the glock
336 *
337 */
338
339static void inode_greedy(struct gfs2_glock *gl)
340{
341 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500342 struct gfs2_inode *ip = gl->gl_object;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000343 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
344 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
345 unsigned int new_time;
346
347 spin_lock(&ip->i_spin);
348
349 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
350 new_time = ip->i_greedy + quantum;
351 if (new_time > max)
352 new_time = max;
353 } else {
354 new_time = ip->i_greedy - quantum;
355 if (!new_time || new_time > max)
356 new_time = 1;
357 }
358
359 ip->i_greedy = new_time;
360
361 spin_unlock(&ip->i_spin);
362
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400363 iput(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000364}
365
366/**
367 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
368 * @gl: the glock
369 *
370 * Returns: 1 if it's ok
371 */
372
373static int rgrp_go_demote_ok(struct gfs2_glock *gl)
374{
375 return !gl->gl_aspace->i_mapping->nrpages;
376}
377
378/**
379 * rgrp_go_lock - operation done after an rgrp lock is locked by
380 * a first holder on this node.
381 * @gl: the glock
382 * @flags:
383 *
384 * Returns: errno
385 */
386
387static int rgrp_go_lock(struct gfs2_holder *gh)
388{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500389 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000390}
391
392/**
393 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
394 * a last holder on this node.
395 * @gl: the glock
396 * @flags:
397 *
398 */
399
400static void rgrp_go_unlock(struct gfs2_holder *gh)
401{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500402 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000403}
404
405/**
406 * trans_go_xmote_th - promote/demote the transaction glock
407 * @gl: the glock
408 * @state: the requested state
409 * @flags:
410 *
411 */
412
413static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
414 int flags)
415{
416 struct gfs2_sbd *sdp = gl->gl_sbd;
417
418 if (gl->gl_state != LM_ST_UNLOCKED &&
419 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
420 gfs2_meta_syncfs(sdp);
421 gfs2_log_shutdown(sdp);
422 }
423
424 gfs2_glock_xmote_th(gl, state, flags);
425}
426
427/**
428 * trans_go_xmote_bh - After promoting/demoting the transaction glock
429 * @gl: the glock
430 *
431 */
432
433static void trans_go_xmote_bh(struct gfs2_glock *gl)
434{
435 struct gfs2_sbd *sdp = gl->gl_sbd;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400436 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500437 struct gfs2_glock *j_gl = ip->i_gl;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000438 struct gfs2_log_header head;
439 int error;
440
441 if (gl->gl_state != LM_ST_UNLOCKED &&
442 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400443 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000444 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
445
446 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
447 if (error)
448 gfs2_consist(sdp);
449 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
450 gfs2_consist(sdp);
451
452 /* Initialize some head of the log stuff */
453 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
454 sdp->sd_log_sequence = head.lh_sequence + 1;
455 gfs2_log_pointers_init(sdp, head.lh_blkno);
456 }
457 }
458}
459
460/**
461 * trans_go_drop_th - unlock the transaction glock
462 * @gl: the glock
463 *
464 * We want to sync the device even with localcaching. Remember
465 * that localcaching journal replay only marks buffers dirty.
466 */
467
468static void trans_go_drop_th(struct gfs2_glock *gl)
469{
470 struct gfs2_sbd *sdp = gl->gl_sbd;
471
472 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
473 gfs2_meta_syncfs(sdp);
474 gfs2_log_shutdown(sdp);
475 }
476
477 gfs2_glock_drop_th(gl);
478}
479
480/**
481 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
482 * @gl: the glock
483 *
484 * Returns: 1 if it's ok
485 */
486
487static int quota_go_demote_ok(struct gfs2_glock *gl)
488{
489 return !atomic_read(&gl->gl_lvb_count);
490}
491
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400492const struct gfs2_glock_operations gfs2_meta_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000493 .go_xmote_th = gfs2_glock_xmote_th,
494 .go_drop_th = gfs2_glock_drop_th,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400495 .go_type = LM_TYPE_META,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000496};
497
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400498const struct gfs2_glock_operations gfs2_inode_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000499 .go_xmote_th = inode_go_xmote_th,
500 .go_xmote_bh = inode_go_xmote_bh,
501 .go_drop_th = inode_go_drop_th,
502 .go_sync = inode_go_sync,
503 .go_inval = inode_go_inval,
504 .go_demote_ok = inode_go_demote_ok,
505 .go_lock = inode_go_lock,
506 .go_unlock = inode_go_unlock,
507 .go_greedy = inode_greedy,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400508 .go_type = LM_TYPE_INODE,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509};
510
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400511const struct gfs2_glock_operations gfs2_rgrp_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000512 .go_xmote_th = gfs2_glock_xmote_th,
513 .go_drop_th = gfs2_glock_drop_th,
514 .go_sync = meta_go_sync,
515 .go_inval = meta_go_inval,
516 .go_demote_ok = rgrp_go_demote_ok,
517 .go_lock = rgrp_go_lock,
518 .go_unlock = rgrp_go_unlock,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400519 .go_type = LM_TYPE_RGRP,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000520};
521
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400522const struct gfs2_glock_operations gfs2_trans_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000523 .go_xmote_th = trans_go_xmote_th,
524 .go_xmote_bh = trans_go_xmote_bh,
525 .go_drop_th = trans_go_drop_th,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400526 .go_type = LM_TYPE_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000527};
528
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400529const struct gfs2_glock_operations gfs2_iopen_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000530 .go_xmote_th = gfs2_glock_xmote_th,
531 .go_drop_th = gfs2_glock_drop_th,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400532 .go_type = LM_TYPE_IOPEN,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000533};
534
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400535const struct gfs2_glock_operations gfs2_flock_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000536 .go_xmote_th = gfs2_glock_xmote_th,
537 .go_drop_th = gfs2_glock_drop_th,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400538 .go_type = LM_TYPE_FLOCK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000539};
540
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400541const struct gfs2_glock_operations gfs2_nondisk_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000542 .go_xmote_th = gfs2_glock_xmote_th,
543 .go_drop_th = gfs2_glock_drop_th,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400544 .go_type = LM_TYPE_NONDISK,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000545};
546
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400547const struct gfs2_glock_operations gfs2_quota_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000548 .go_xmote_th = gfs2_glock_xmote_th,
549 .go_drop_th = gfs2_glock_drop_th,
550 .go_demote_ok = quota_go_demote_ok,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400551 .go_type = LM_TYPE_QUOTA,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000552};
553
Steven Whitehouse8fb4b532006-08-30 09:30:00 -0400554const struct gfs2_glock_operations gfs2_journal_glops = {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000555 .go_xmote_th = gfs2_glock_xmote_th,
556 .go_drop_th = gfs2_glock_drop_th,
Steven Whitehouseea67eed2006-09-05 10:53:09 -0400557 .go_type = LM_TYPE_JOURNAL,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000558};
559