blob: e7b24d59d4ff6bf916d147f70c019a32b988da4d [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonda6dd402007-12-11 18:49:21 -06003 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010010#include <linux/bio.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000011#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/buffer_head.h>
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010016#include <linux/statfs.h>
17#include <linux/seq_file.h>
18#include <linux/mount.h>
19#include <linux/kthread.h>
20#include <linux/delay.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include <linux/gfs2_ondisk.h>
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010022#include <linux/crc32.h>
23#include <linux/time.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000024
25#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050026#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000027#include "bmap.h"
28#include "dir.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "glock.h"
30#include "glops.h"
31#include "inode.h"
32#include "log.h"
33#include "meta_io.h"
34#include "quota.h"
35#include "recovery.h"
36#include "rgrp.h"
37#include "super.h"
38#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050039#include "util.h"
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010040#include "sys.h"
Steven Whitehouse307cf6e2009-08-26 18:51:04 +010041#include "xattr.h"
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010042
43#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
44
45enum {
46 Opt_lockproto,
47 Opt_locktable,
48 Opt_hostdata,
49 Opt_spectator,
50 Opt_ignore_local_fs,
51 Opt_localflocks,
52 Opt_localcaching,
53 Opt_debug,
54 Opt_nodebug,
55 Opt_upgrade,
56 Opt_acl,
57 Opt_noacl,
58 Opt_quota_off,
59 Opt_quota_account,
60 Opt_quota_on,
61 Opt_quota,
62 Opt_noquota,
63 Opt_suiddir,
64 Opt_nosuiddir,
65 Opt_data_writeback,
66 Opt_data_ordered,
67 Opt_meta,
68 Opt_discard,
69 Opt_nodiscard,
70 Opt_commit,
Bob Petersond34843d2009-08-24 10:44:18 +010071 Opt_err_withdraw,
72 Opt_err_panic,
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010073 Opt_error,
74};
75
76static const match_table_t tokens = {
77 {Opt_lockproto, "lockproto=%s"},
78 {Opt_locktable, "locktable=%s"},
79 {Opt_hostdata, "hostdata=%s"},
80 {Opt_spectator, "spectator"},
81 {Opt_ignore_local_fs, "ignore_local_fs"},
82 {Opt_localflocks, "localflocks"},
83 {Opt_localcaching, "localcaching"},
84 {Opt_debug, "debug"},
85 {Opt_nodebug, "nodebug"},
86 {Opt_upgrade, "upgrade"},
87 {Opt_acl, "acl"},
88 {Opt_noacl, "noacl"},
89 {Opt_quota_off, "quota=off"},
90 {Opt_quota_account, "quota=account"},
91 {Opt_quota_on, "quota=on"},
92 {Opt_quota, "quota"},
93 {Opt_noquota, "noquota"},
94 {Opt_suiddir, "suiddir"},
95 {Opt_nosuiddir, "nosuiddir"},
96 {Opt_data_writeback, "data=writeback"},
97 {Opt_data_ordered, "data=ordered"},
98 {Opt_meta, "meta"},
99 {Opt_discard, "discard"},
100 {Opt_nodiscard, "nodiscard"},
101 {Opt_commit, "commit=%d"},
Bob Petersond34843d2009-08-24 10:44:18 +0100102 {Opt_err_withdraw, "errors=withdraw"},
103 {Opt_err_panic, "errors=panic"},
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100104 {Opt_error, NULL}
105};
106
107/**
108 * gfs2_mount_args - Parse mount options
Steven Whitehousef55073f2009-09-28 10:30:49 +0100109 * @args: The structure into which the parsed options will be written
110 * @options: The options to parse
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100111 *
112 * Return: errno
113 */
114
Steven Whitehousef55073f2009-09-28 10:30:49 +0100115int gfs2_mount_args(struct gfs2_args *args, char *options)
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100116{
117 char *o;
118 int token;
119 substring_t tmp[MAX_OPT_ARGS];
120 int rv;
121
122 /* Split the options into tokens with the "," character and
123 process them */
124
125 while (1) {
126 o = strsep(&options, ",");
127 if (o == NULL)
128 break;
129 if (*o == '\0')
130 continue;
131
132 token = match_token(o, tokens, tmp);
133 switch (token) {
134 case Opt_lockproto:
135 match_strlcpy(args->ar_lockproto, &tmp[0],
136 GFS2_LOCKNAME_LEN);
137 break;
138 case Opt_locktable:
139 match_strlcpy(args->ar_locktable, &tmp[0],
140 GFS2_LOCKNAME_LEN);
141 break;
142 case Opt_hostdata:
143 match_strlcpy(args->ar_hostdata, &tmp[0],
144 GFS2_LOCKNAME_LEN);
145 break;
146 case Opt_spectator:
147 args->ar_spectator = 1;
148 break;
149 case Opt_ignore_local_fs:
150 args->ar_ignore_local_fs = 1;
151 break;
152 case Opt_localflocks:
153 args->ar_localflocks = 1;
154 break;
155 case Opt_localcaching:
156 args->ar_localcaching = 1;
157 break;
158 case Opt_debug:
Bob Petersond34843d2009-08-24 10:44:18 +0100159 if (args->ar_errors == GFS2_ERRORS_PANIC) {
Steven Whitehousef55073f2009-09-28 10:30:49 +0100160 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
Bob Petersond34843d2009-08-24 10:44:18 +0100161 "are mutually exclusive.\n");
162 return -EINVAL;
163 }
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100164 args->ar_debug = 1;
165 break;
166 case Opt_nodebug:
167 args->ar_debug = 0;
168 break;
169 case Opt_upgrade:
170 args->ar_upgrade = 1;
171 break;
172 case Opt_acl:
173 args->ar_posix_acl = 1;
174 break;
175 case Opt_noacl:
176 args->ar_posix_acl = 0;
177 break;
178 case Opt_quota_off:
179 case Opt_noquota:
180 args->ar_quota = GFS2_QUOTA_OFF;
181 break;
182 case Opt_quota_account:
183 args->ar_quota = GFS2_QUOTA_ACCOUNT;
184 break;
185 case Opt_quota_on:
186 case Opt_quota:
187 args->ar_quota = GFS2_QUOTA_ON;
188 break;
189 case Opt_suiddir:
190 args->ar_suiddir = 1;
191 break;
192 case Opt_nosuiddir:
193 args->ar_suiddir = 0;
194 break;
195 case Opt_data_writeback:
196 args->ar_data = GFS2_DATA_WRITEBACK;
197 break;
198 case Opt_data_ordered:
199 args->ar_data = GFS2_DATA_ORDERED;
200 break;
201 case Opt_meta:
202 args->ar_meta = 1;
203 break;
204 case Opt_discard:
205 args->ar_discard = 1;
206 break;
207 case Opt_nodiscard:
208 args->ar_discard = 0;
209 break;
210 case Opt_commit:
211 rv = match_int(&tmp[0], &args->ar_commit);
212 if (rv || args->ar_commit <= 0) {
Steven Whitehousef55073f2009-09-28 10:30:49 +0100213 printk(KERN_WARNING "GFS2: commit mount option requires a positive numeric argument\n");
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100214 return rv ? rv : -EINVAL;
215 }
216 break;
Bob Petersond34843d2009-08-24 10:44:18 +0100217 case Opt_err_withdraw:
218 args->ar_errors = GFS2_ERRORS_WITHDRAW;
219 break;
220 case Opt_err_panic:
221 if (args->ar_debug) {
Steven Whitehousef55073f2009-09-28 10:30:49 +0100222 printk(KERN_WARNING "GFS2: -o debug and -o errors=panic "
Bob Petersond34843d2009-08-24 10:44:18 +0100223 "are mutually exclusive.\n");
224 return -EINVAL;
225 }
226 args->ar_errors = GFS2_ERRORS_PANIC;
227 break;
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100228 case Opt_error:
229 default:
Steven Whitehousef55073f2009-09-28 10:30:49 +0100230 printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100231 return -EINVAL;
232 }
233 }
234
235 return 0;
236}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000237
Steven Whitehousefefc03b2008-12-19 15:32:06 +0000238/**
239 * gfs2_jindex_free - Clear all the journal index information
240 * @sdp: The GFS2 superblock
241 *
242 */
243
244void gfs2_jindex_free(struct gfs2_sbd *sdp)
245{
246 struct list_head list, *head;
247 struct gfs2_jdesc *jd;
248 struct gfs2_journal_extent *jext;
249
250 spin_lock(&sdp->sd_jindex_spin);
251 list_add(&list, &sdp->sd_jindex_list);
252 list_del_init(&sdp->sd_jindex_list);
253 sdp->sd_journals = 0;
254 spin_unlock(&sdp->sd_jindex_spin);
255
256 while (!list_empty(&list)) {
257 jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
258 head = &jd->extent_list;
259 while (!list_empty(head)) {
260 jext = list_entry(head->next,
261 struct gfs2_journal_extent,
262 extent_list);
263 list_del(&jext->extent_list);
264 kfree(jext);
265 }
266 list_del(&jd->jd_list);
267 iput(jd->jd_inode);
268 kfree(jd);
269 }
270}
271
David Teiglandb3b94fa2006-01-16 16:50:04 +0000272static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
273{
274 struct gfs2_jdesc *jd;
275 int found = 0;
276
277 list_for_each_entry(jd, head, jd_list) {
278 if (jd->jd_jid == jid) {
279 found = 1;
280 break;
281 }
282 }
283
284 if (!found)
285 jd = NULL;
286
287 return jd;
288}
289
290struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
291{
292 struct gfs2_jdesc *jd;
293
294 spin_lock(&sdp->sd_jindex_spin);
295 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
296 spin_unlock(&sdp->sd_jindex_spin);
297
298 return jd;
299}
300
David Teiglandb3b94fa2006-01-16 16:50:04 +0000301int gfs2_jdesc_check(struct gfs2_jdesc *jd)
302{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400303 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
304 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000305 int ar;
306 int error;
307
Steven Whitehousec9e98882008-11-04 09:47:33 +0000308 if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
309 (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000310 gfs2_consist_inode(ip);
311 return -EIO;
312 }
Steven Whitehousec9e98882008-11-04 09:47:33 +0000313 jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000314
Steven Whitehousec9e98882008-11-04 09:47:33 +0000315 error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000316 if (!error && ar) {
317 gfs2_consist_inode(ip);
318 error = -EIO;
319 }
320
321 return error;
322}
323
David Teiglandb3b94fa2006-01-16 16:50:04 +0000324/**
325 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
326 * @sdp: the filesystem
327 *
328 * Returns: errno
329 */
330
331int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
332{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400333 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500334 struct gfs2_glock *j_gl = ip->i_gl;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000335 struct gfs2_holder t_gh;
Al Viro55167622006-10-13 21:47:13 -0400336 struct gfs2_log_header_host head;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000337 int error;
338
Steven Whitehouse1c0f4872007-01-22 12:10:39 -0500339 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000340 if (error)
341 return error;
342
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500343 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000344
345 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
346 if (error)
347 goto fail;
348
349 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
350 gfs2_consist(sdp);
351 error = -EIO;
352 goto fail;
353 }
354
355 /* Initialize some head of the log stuff */
356 sdp->sd_log_sequence = head.lh_sequence + 1;
357 gfs2_log_pointers_init(sdp, head.lh_blkno);
358
David Teiglandb3b94fa2006-01-16 16:50:04 +0000359 error = gfs2_quota_init(sdp);
360 if (error)
Steven Whitehousea91ea692006-09-04 12:04:26 -0400361 goto fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000362
363 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
364
365 gfs2_glock_dq_uninit(&t_gh);
366
367 return 0;
368
Steven Whitehousea91ea692006-09-04 12:04:26 -0400369fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000370 t_gh.gh_flags |= GL_NOCACHE;
371 gfs2_glock_dq_uninit(&t_gh);
372
373 return error;
374}
375
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500376void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100377{
378 const struct gfs2_statfs_change *str = buf;
379
380 sc->sc_total = be64_to_cpu(str->sc_total);
381 sc->sc_free = be64_to_cpu(str->sc_free);
382 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
383}
384
385static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
386{
387 struct gfs2_statfs_change *str = buf;
388
389 str->sc_total = cpu_to_be64(sc->sc_total);
390 str->sc_free = cpu_to_be64(sc->sc_free);
391 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
392}
393
David Teiglandb3b94fa2006-01-16 16:50:04 +0000394int gfs2_statfs_init(struct gfs2_sbd *sdp)
395{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400396 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400397 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400398 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400399 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000400 struct buffer_head *m_bh, *l_bh;
401 struct gfs2_holder gh;
402 int error;
403
404 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
405 &gh);
406 if (error)
407 return error;
408
409 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
410 if (error)
411 goto out;
412
413 if (sdp->sd_args.ar_spectator) {
414 spin_lock(&sdp->sd_statfs_spin);
415 gfs2_statfs_change_in(m_sc, m_bh->b_data +
416 sizeof(struct gfs2_dinode));
417 spin_unlock(&sdp->sd_statfs_spin);
418 } else {
419 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
420 if (error)
421 goto out_m_bh;
422
423 spin_lock(&sdp->sd_statfs_spin);
424 gfs2_statfs_change_in(m_sc, m_bh->b_data +
425 sizeof(struct gfs2_dinode));
426 gfs2_statfs_change_in(l_sc, l_bh->b_data +
427 sizeof(struct gfs2_dinode));
428 spin_unlock(&sdp->sd_statfs_spin);
429
430 brelse(l_bh);
431 }
432
Steven Whitehousea91ea692006-09-04 12:04:26 -0400433out_m_bh:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000434 brelse(m_bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400435out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000436 gfs2_glock_dq_uninit(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000437 return 0;
438}
439
Steven Whitehousecd915492006-09-04 12:49:07 -0400440void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
441 s64 dinodes)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000442{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400443 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400444 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000445 struct buffer_head *l_bh;
446 int error;
447
448 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
449 if (error)
450 return;
451
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000452 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000453
454 spin_lock(&sdp->sd_statfs_spin);
455 l_sc->sc_total += total;
456 l_sc->sc_free += free;
457 l_sc->sc_dinodes += dinodes;
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400458 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000459 spin_unlock(&sdp->sd_statfs_spin);
460
461 brelse(l_bh);
462}
463
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500464void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
465 struct buffer_head *l_bh)
466{
467 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
468 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
469 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
470 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
471
472 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
473
474 spin_lock(&sdp->sd_statfs_spin);
475 m_sc->sc_total += l_sc->sc_total;
476 m_sc->sc_free += l_sc->sc_free;
477 m_sc->sc_dinodes += l_sc->sc_dinodes;
478 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
479 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
480 0, sizeof(struct gfs2_statfs_change));
481 spin_unlock(&sdp->sd_statfs_spin);
482
483 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
484 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
485}
486
Steven Whitehouse8c42d632009-09-11 14:36:44 +0100487int gfs2_statfs_sync(struct super_block *sb, int type)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000488{
Steven Whitehouse8c42d632009-09-11 14:36:44 +0100489 struct gfs2_sbd *sdp = sb->s_fs_info;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400490 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
491 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400492 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
493 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000494 struct gfs2_holder gh;
495 struct buffer_head *m_bh, *l_bh;
496 int error;
497
498 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
499 &gh);
500 if (error)
501 return error;
502
503 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
504 if (error)
505 goto out;
506
507 spin_lock(&sdp->sd_statfs_spin);
508 gfs2_statfs_change_in(m_sc, m_bh->b_data +
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400509 sizeof(struct gfs2_dinode));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000510 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
511 spin_unlock(&sdp->sd_statfs_spin);
512 goto out_bh;
513 }
514 spin_unlock(&sdp->sd_statfs_spin);
515
516 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
517 if (error)
518 goto out_bh;
519
520 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
521 if (error)
522 goto out_bh2;
523
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500524 update_statfs(sdp, m_bh, l_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000525
526 gfs2_trans_end(sdp);
527
Steven Whitehousea91ea692006-09-04 12:04:26 -0400528out_bh2:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000529 brelse(l_bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400530out_bh:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000531 brelse(m_bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400532out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000533 gfs2_glock_dq_uninit(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000534 return error;
535}
536
David Teiglandb3b94fa2006-01-16 16:50:04 +0000537struct lfcc {
538 struct list_head list;
539 struct gfs2_holder gh;
540};
541
542/**
543 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
544 * journals are clean
545 * @sdp: the file system
546 * @state: the state to put the transaction lock into
547 * @t_gh: the hold on the transaction lock
548 *
549 * Returns: errno
550 */
551
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400552static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
553 struct gfs2_holder *t_gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000554{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500555 struct gfs2_inode *ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000556 struct gfs2_jdesc *jd;
557 struct lfcc *lfcc;
558 LIST_HEAD(list);
Al Viro55167622006-10-13 21:47:13 -0400559 struct gfs2_log_header_host lh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000560 int error;
561
David Teiglandb3b94fa2006-01-16 16:50:04 +0000562 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
563 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
564 if (!lfcc) {
565 error = -ENOMEM;
566 goto out;
567 }
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400568 ip = GFS2_I(jd->jd_inode);
569 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000570 if (error) {
571 kfree(lfcc);
572 goto out;
573 }
574 list_add(&lfcc->list, &list);
575 }
576
577 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
Steven Whitehouse6802e342008-05-21 17:03:22 +0100578 GL_NOCACHE, t_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000579
580 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
581 error = gfs2_jdesc_check(jd);
582 if (error)
583 break;
584 error = gfs2_find_jhead(jd, &lh);
585 if (error)
586 break;
587 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
588 error = -EBUSY;
589 break;
590 }
591 }
592
593 if (error)
594 gfs2_glock_dq_uninit(t_gh);
595
Steven Whitehousea91ea692006-09-04 12:04:26 -0400596out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000597 while (!list_empty(&list)) {
598 lfcc = list_entry(list.next, struct lfcc, list);
599 list_del(&lfcc->list);
600 gfs2_glock_dq_uninit(&lfcc->gh);
601 kfree(lfcc);
602 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000603 return error;
604}
605
606/**
607 * gfs2_freeze_fs - freezes the file system
608 * @sdp: the file system
609 *
610 * This function flushes data and meta data for all machines by
611 * aquiring the transaction log exclusively. All journals are
612 * ensured to be in a clean state as well.
613 *
614 * Returns: errno
615 */
616
617int gfs2_freeze_fs(struct gfs2_sbd *sdp)
618{
619 int error = 0;
620
Steven Whitehousef55ab262006-02-21 12:51:39 +0000621 mutex_lock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000622
623 if (!sdp->sd_freeze_count++) {
624 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
625 if (error)
626 sdp->sd_freeze_count--;
627 }
628
Steven Whitehousef55ab262006-02-21 12:51:39 +0000629 mutex_unlock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000630
631 return error;
632}
633
634/**
635 * gfs2_unfreeze_fs - unfreezes the file system
636 * @sdp: the file system
637 *
638 * This function allows the file system to proceed by unlocking
639 * the exclusively held transaction lock. Other GFS2 nodes are
640 * now free to acquire the lock shared and go on with their lives.
641 *
642 */
643
644void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
645{
Steven Whitehousef55ab262006-02-21 12:51:39 +0000646 mutex_lock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000647
648 if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
649 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
650
Steven Whitehousef55ab262006-02-21 12:51:39 +0000651 mutex_unlock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000652}
653
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100654
655/**
656 * gfs2_write_inode - Make sure the inode is stable on the disk
657 * @inode: The inode
658 * @sync: synchronous write flag
659 *
660 * Returns: errno
661 */
662
663static int gfs2_write_inode(struct inode *inode, int sync)
664{
665 struct gfs2_inode *ip = GFS2_I(inode);
666 struct gfs2_sbd *sdp = GFS2_SB(inode);
667 struct gfs2_holder gh;
668 struct buffer_head *bh;
669 struct timespec atime;
670 struct gfs2_dinode *di;
671 int ret = 0;
672
673 /* Check this is a "normal" inode, etc */
674 if (!test_bit(GIF_USER, &ip->i_flags) ||
675 (current->flags & PF_MEMALLOC))
676 return 0;
677 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
678 if (ret)
679 goto do_flush;
680 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
681 if (ret)
682 goto do_unlock;
683 ret = gfs2_meta_inode_buffer(ip, &bh);
684 if (ret == 0) {
685 di = (struct gfs2_dinode *)bh->b_data;
686 atime.tv_sec = be64_to_cpu(di->di_atime);
687 atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
688 if (timespec_compare(&inode->i_atime, &atime) > 0) {
689 gfs2_trans_add_bh(ip->i_gl, bh, 1);
690 gfs2_dinode_out(ip, bh->b_data);
691 }
692 brelse(bh);
693 }
694 gfs2_trans_end(sdp);
695do_unlock:
696 gfs2_glock_dq_uninit(&gh);
697do_flush:
698 if (sync != 0)
699 gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
700 return ret;
701}
702
703/**
704 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
705 * @sdp: the filesystem
706 *
707 * Returns: errno
708 */
709
710static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
711{
712 struct gfs2_holder t_gh;
713 int error;
714
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500715 flush_workqueue(gfs2_delete_workqueue);
Steven Whitehouse8c42d632009-09-11 14:36:44 +0100716 gfs2_quota_sync(sdp->sd_vfs, 0);
717 gfs2_statfs_sync(sdp->sd_vfs, 0);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100718
719 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
720 &t_gh);
721 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
722 return error;
723
724 gfs2_meta_syncfs(sdp);
725 gfs2_log_shutdown(sdp);
726
727 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
728
729 if (t_gh.gh_gl)
730 gfs2_glock_dq_uninit(&t_gh);
731
732 gfs2_quota_cleanup(sdp);
733
734 return error;
735}
736
737static int gfs2_umount_recovery_wait(void *word)
738{
739 schedule();
740 return 0;
741}
742
743/**
744 * gfs2_put_super - Unmount the filesystem
745 * @sb: The VFS superblock
746 *
747 */
748
749static void gfs2_put_super(struct super_block *sb)
750{
751 struct gfs2_sbd *sdp = sb->s_fs_info;
752 int error;
753 struct gfs2_jdesc *jd;
754
755 /* Unfreeze the filesystem, if we need to */
756
757 mutex_lock(&sdp->sd_freeze_lock);
758 if (sdp->sd_freeze_count)
759 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
760 mutex_unlock(&sdp->sd_freeze_lock);
761
762 /* No more recovery requests */
763 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
764 smp_mb();
765
766 /* Wait on outstanding recovery */
767restart:
768 spin_lock(&sdp->sd_jindex_spin);
769 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
770 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
771 continue;
772 spin_unlock(&sdp->sd_jindex_spin);
773 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
774 gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
775 goto restart;
776 }
777 spin_unlock(&sdp->sd_jindex_spin);
778
779 kthread_stop(sdp->sd_quotad_process);
780 kthread_stop(sdp->sd_logd_process);
781
782 if (!(sb->s_flags & MS_RDONLY)) {
783 error = gfs2_make_fs_ro(sdp);
784 if (error)
785 gfs2_io_error(sdp);
786 }
787 /* At this point, we're through modifying the disk */
788
789 /* Release stuff */
790
791 iput(sdp->sd_jindex);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100792 iput(sdp->sd_statfs_inode);
793 iput(sdp->sd_rindex);
794 iput(sdp->sd_quota_inode);
795
796 gfs2_glock_put(sdp->sd_rename_gl);
797 gfs2_glock_put(sdp->sd_trans_gl);
798
799 if (!sdp->sd_args.ar_spectator) {
800 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
801 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100802 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
803 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100804 iput(sdp->sd_sc_inode);
805 iput(sdp->sd_qc_inode);
806 }
807
808 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
809 gfs2_clear_rgrpd(sdp);
810 gfs2_jindex_free(sdp);
811 /* Take apart glock structures and buffer lists */
812 gfs2_gl_hash_clear(sdp);
813 /* Unmount the locking protocol */
814 gfs2_lm_unmount(sdp);
815
816 /* At this point, we're through participating in the lockspace */
817 gfs2_sys_fs_del(sdp);
818}
819
820/**
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100821 * gfs2_sync_fs - sync the filesystem
822 * @sb: the superblock
823 *
824 * Flushes the log to disk.
825 */
826
827static int gfs2_sync_fs(struct super_block *sb, int wait)
828{
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100829 if (wait && sb->s_fs_info)
830 gfs2_log_flush(sb->s_fs_info, NULL);
831 return 0;
832}
833
834/**
835 * gfs2_freeze - prevent further writes to the filesystem
836 * @sb: the VFS structure for the filesystem
837 *
838 */
839
840static int gfs2_freeze(struct super_block *sb)
841{
842 struct gfs2_sbd *sdp = sb->s_fs_info;
843 int error;
844
845 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
846 return -EINVAL;
847
848 for (;;) {
849 error = gfs2_freeze_fs(sdp);
850 if (!error)
851 break;
852
853 switch (error) {
854 case -EBUSY:
855 fs_err(sdp, "waiting for recovery before freeze\n");
856 break;
857
858 default:
859 fs_err(sdp, "error freezing FS: %d\n", error);
860 break;
861 }
862
863 fs_err(sdp, "retrying...\n");
864 msleep(1000);
865 }
866 return 0;
867}
868
869/**
870 * gfs2_unfreeze - reallow writes to the filesystem
871 * @sb: the VFS structure for the filesystem
872 *
873 */
874
875static int gfs2_unfreeze(struct super_block *sb)
876{
877 gfs2_unfreeze_fs(sb->s_fs_info);
878 return 0;
879}
880
881/**
882 * statfs_fill - fill in the sg for a given RG
883 * @rgd: the RG
884 * @sc: the sc structure
885 *
886 * Returns: 0 on success, -ESTALE if the LVB is invalid
887 */
888
889static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
890 struct gfs2_statfs_change_host *sc)
891{
892 gfs2_rgrp_verify(rgd);
893 sc->sc_total += rgd->rd_data;
894 sc->sc_free += rgd->rd_free;
895 sc->sc_dinodes += rgd->rd_dinodes;
896 return 0;
897}
898
899/**
900 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
901 * @sdp: the filesystem
902 * @sc: the sc info that will be returned
903 *
904 * Any error (other than a signal) will cause this routine to fall back
905 * to the synchronous version.
906 *
907 * FIXME: This really shouldn't busy wait like this.
908 *
909 * Returns: errno
910 */
911
912static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
913{
914 struct gfs2_holder ri_gh;
915 struct gfs2_rgrpd *rgd_next;
916 struct gfs2_holder *gha, *gh;
917 unsigned int slots = 64;
918 unsigned int x;
919 int done;
920 int error = 0, err;
921
922 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
923 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
924 if (!gha)
925 return -ENOMEM;
926
927 error = gfs2_rindex_hold(sdp, &ri_gh);
928 if (error)
929 goto out;
930
931 rgd_next = gfs2_rgrpd_get_first(sdp);
932
933 for (;;) {
934 done = 1;
935
936 for (x = 0; x < slots; x++) {
937 gh = gha + x;
938
939 if (gh->gh_gl && gfs2_glock_poll(gh)) {
940 err = gfs2_glock_wait(gh);
941 if (err) {
942 gfs2_holder_uninit(gh);
943 error = err;
944 } else {
945 if (!error)
946 error = statfs_slow_fill(
947 gh->gh_gl->gl_object, sc);
948 gfs2_glock_dq_uninit(gh);
949 }
950 }
951
952 if (gh->gh_gl)
953 done = 0;
954 else if (rgd_next && !error) {
955 error = gfs2_glock_nq_init(rgd_next->rd_gl,
956 LM_ST_SHARED,
957 GL_ASYNC,
958 gh);
959 rgd_next = gfs2_rgrpd_get_next(rgd_next);
960 done = 0;
961 }
962
963 if (signal_pending(current))
964 error = -ERESTARTSYS;
965 }
966
967 if (done)
968 break;
969
970 yield();
971 }
972
973 gfs2_glock_dq_uninit(&ri_gh);
974
975out:
976 kfree(gha);
977 return error;
978}
979
980/**
981 * gfs2_statfs_i - Do a statfs
982 * @sdp: the filesystem
983 * @sg: the sg structure
984 *
985 * Returns: errno
986 */
987
988static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
989{
990 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
991 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
992
993 spin_lock(&sdp->sd_statfs_spin);
994
995 *sc = *m_sc;
996 sc->sc_total += l_sc->sc_total;
997 sc->sc_free += l_sc->sc_free;
998 sc->sc_dinodes += l_sc->sc_dinodes;
999
1000 spin_unlock(&sdp->sd_statfs_spin);
1001
1002 if (sc->sc_free < 0)
1003 sc->sc_free = 0;
1004 if (sc->sc_free > sc->sc_total)
1005 sc->sc_free = sc->sc_total;
1006 if (sc->sc_dinodes < 0)
1007 sc->sc_dinodes = 0;
1008
1009 return 0;
1010}
1011
1012/**
1013 * gfs2_statfs - Gather and return stats about the filesystem
1014 * @sb: The superblock
1015 * @statfsbuf: The buffer
1016 *
1017 * Returns: 0 on success or error code
1018 */
1019
1020static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1021{
1022 struct super_block *sb = dentry->d_inode->i_sb;
1023 struct gfs2_sbd *sdp = sb->s_fs_info;
1024 struct gfs2_statfs_change_host sc;
1025 int error;
1026
1027 if (gfs2_tune_get(sdp, gt_statfs_slow))
1028 error = gfs2_statfs_slow(sdp, &sc);
1029 else
1030 error = gfs2_statfs_i(sdp, &sc);
1031
1032 if (error)
1033 return error;
1034
1035 buf->f_type = GFS2_MAGIC;
1036 buf->f_bsize = sdp->sd_sb.sb_bsize;
1037 buf->f_blocks = sc.sc_total;
1038 buf->f_bfree = sc.sc_free;
1039 buf->f_bavail = sc.sc_free;
1040 buf->f_files = sc.sc_dinodes + sc.sc_free;
1041 buf->f_ffree = sc.sc_free;
1042 buf->f_namelen = GFS2_FNAMESIZE;
1043
1044 return 0;
1045}
1046
1047/**
1048 * gfs2_remount_fs - called when the FS is remounted
1049 * @sb: the filesystem
1050 * @flags: the remount flags
1051 * @data: extra data passed in (not used right now)
1052 *
1053 * Returns: errno
1054 */
1055
1056static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1057{
1058 struct gfs2_sbd *sdp = sb->s_fs_info;
1059 struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1060 struct gfs2_tune *gt = &sdp->sd_tune;
1061 int error;
1062
1063 spin_lock(&gt->gt_spin);
1064 args.ar_commit = gt->gt_log_flush_secs;
1065 spin_unlock(&gt->gt_spin);
Steven Whitehousef55073f2009-09-28 10:30:49 +01001066 error = gfs2_mount_args(&args, data);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001067 if (error)
1068 return error;
1069
1070 /* Not allowed to change locking details */
1071 if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1072 strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1073 strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1074 return -EINVAL;
1075
1076 /* Some flags must not be changed */
1077 if (args_neq(&args, &sdp->sd_args, spectator) ||
1078 args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
1079 args_neq(&args, &sdp->sd_args, localflocks) ||
1080 args_neq(&args, &sdp->sd_args, localcaching) ||
1081 args_neq(&args, &sdp->sd_args, meta))
1082 return -EINVAL;
1083
1084 if (sdp->sd_args.ar_spectator)
1085 *flags |= MS_RDONLY;
1086
1087 if ((sb->s_flags ^ *flags) & MS_RDONLY) {
1088 if (*flags & MS_RDONLY)
1089 error = gfs2_make_fs_ro(sdp);
1090 else
1091 error = gfs2_make_fs_rw(sdp);
1092 if (error)
1093 return error;
1094 }
1095
1096 sdp->sd_args = args;
1097 if (sdp->sd_args.ar_posix_acl)
1098 sb->s_flags |= MS_POSIXACL;
1099 else
1100 sb->s_flags &= ~MS_POSIXACL;
1101 spin_lock(&gt->gt_spin);
1102 gt->gt_log_flush_secs = args.ar_commit;
1103 spin_unlock(&gt->gt_spin);
1104
Steven Whitehouse8633ecf2009-07-31 11:07:29 +01001105 gfs2_online_uevent(sdp);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001106 return 0;
1107}
1108
1109/**
1110 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1111 * @inode: The inode to drop
1112 *
1113 * If we've received a callback on an iopen lock then its because a
1114 * remote node tried to deallocate the inode but failed due to this node
1115 * still having the inode open. Here we mark the link count zero
1116 * since we know that it must have reached zero if the GLF_DEMOTE flag
1117 * is set on the iopen glock. If we didn't do a disk read since the
1118 * remote node removed the final link then we might otherwise miss
1119 * this event. This check ensures that this node will deallocate the
1120 * inode's blocks, or alternatively pass the baton on to another
1121 * node for later deallocation.
1122 */
1123
1124static void gfs2_drop_inode(struct inode *inode)
1125{
1126 struct gfs2_inode *ip = GFS2_I(inode);
1127
1128 if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
1129 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1130 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
1131 clear_nlink(inode);
1132 }
1133 generic_drop_inode(inode);
1134}
1135
1136/**
1137 * gfs2_clear_inode - Deallocate an inode when VFS is done with it
1138 * @inode: The VFS inode
1139 *
1140 */
1141
1142static void gfs2_clear_inode(struct inode *inode)
1143{
1144 struct gfs2_inode *ip = GFS2_I(inode);
1145
1146 /* This tells us its a "real" inode and not one which only
1147 * serves to contain an address space (see rgrp.c, meta_io.c)
1148 * which therefore doesn't have its own glocks.
1149 */
1150 if (test_bit(GIF_USER, &ip->i_flags)) {
1151 ip->i_gl->gl_object = NULL;
1152 gfs2_glock_put(ip->i_gl);
1153 ip->i_gl = NULL;
1154 if (ip->i_iopen_gh.gh_gl) {
1155 ip->i_iopen_gh.gh_gl->gl_object = NULL;
1156 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1157 }
1158 }
1159}
1160
1161static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1162{
1163 do {
1164 if (d1 == d2)
1165 return 1;
1166 d1 = d1->d_parent;
1167 } while (!IS_ROOT(d1));
1168 return 0;
1169}
1170
1171/**
1172 * gfs2_show_options - Show mount options for /proc/mounts
1173 * @s: seq_file structure
1174 * @mnt: vfsmount
1175 *
1176 * Returns: 0 on success or error code
1177 */
1178
1179static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1180{
1181 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
1182 struct gfs2_args *args = &sdp->sd_args;
1183 int lfsecs;
1184
1185 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
1186 seq_printf(s, ",meta");
1187 if (args->ar_lockproto[0])
1188 seq_printf(s, ",lockproto=%s", args->ar_lockproto);
1189 if (args->ar_locktable[0])
1190 seq_printf(s, ",locktable=%s", args->ar_locktable);
1191 if (args->ar_hostdata[0])
1192 seq_printf(s, ",hostdata=%s", args->ar_hostdata);
1193 if (args->ar_spectator)
1194 seq_printf(s, ",spectator");
1195 if (args->ar_ignore_local_fs)
1196 seq_printf(s, ",ignore_local_fs");
1197 if (args->ar_localflocks)
1198 seq_printf(s, ",localflocks");
1199 if (args->ar_localcaching)
1200 seq_printf(s, ",localcaching");
1201 if (args->ar_debug)
1202 seq_printf(s, ",debug");
1203 if (args->ar_upgrade)
1204 seq_printf(s, ",upgrade");
1205 if (args->ar_posix_acl)
1206 seq_printf(s, ",acl");
1207 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1208 char *state;
1209 switch (args->ar_quota) {
1210 case GFS2_QUOTA_OFF:
1211 state = "off";
1212 break;
1213 case GFS2_QUOTA_ACCOUNT:
1214 state = "account";
1215 break;
1216 case GFS2_QUOTA_ON:
1217 state = "on";
1218 break;
1219 default:
1220 state = "unknown";
1221 break;
1222 }
1223 seq_printf(s, ",quota=%s", state);
1224 }
1225 if (args->ar_suiddir)
1226 seq_printf(s, ",suiddir");
1227 if (args->ar_data != GFS2_DATA_DEFAULT) {
1228 char *state;
1229 switch (args->ar_data) {
1230 case GFS2_DATA_WRITEBACK:
1231 state = "writeback";
1232 break;
1233 case GFS2_DATA_ORDERED:
1234 state = "ordered";
1235 break;
1236 default:
1237 state = "unknown";
1238 break;
1239 }
1240 seq_printf(s, ",data=%s", state);
1241 }
1242 if (args->ar_discard)
1243 seq_printf(s, ",discard");
1244 lfsecs = sdp->sd_tune.gt_log_flush_secs;
1245 if (lfsecs != 60)
1246 seq_printf(s, ",commit=%d", lfsecs);
Bob Petersond34843d2009-08-24 10:44:18 +01001247 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1248 const char *state;
1249
1250 switch (args->ar_errors) {
1251 case GFS2_ERRORS_WITHDRAW:
1252 state = "withdraw";
1253 break;
1254 case GFS2_ERRORS_PANIC:
1255 state = "panic";
1256 break;
1257 default:
1258 state = "unknown";
1259 break;
1260 }
1261 seq_printf(s, ",errors=%s", state);
1262 }
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001263 return 0;
1264}
1265
1266/*
1267 * We have to (at the moment) hold the inodes main lock to cover
1268 * the gap between unlocking the shared lock on the iopen lock and
1269 * taking the exclusive lock. I'd rather do a shared -> exclusive
1270 * conversion on the iopen lock, but we can change that later. This
1271 * is safe, just less efficient.
1272 */
1273
1274static void gfs2_delete_inode(struct inode *inode)
1275{
1276 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
1277 struct gfs2_inode *ip = GFS2_I(inode);
1278 struct gfs2_holder gh;
1279 int error;
1280
1281 if (!test_bit(GIF_USER, &ip->i_flags))
1282 goto out;
1283
1284 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1285 if (unlikely(error)) {
1286 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1287 goto out;
1288 }
1289
Steven Whitehouseacf7e242009-09-08 18:00:30 +01001290 error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1291 if (error)
1292 goto out_truncate;
1293
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001294 gfs2_glock_dq_wait(&ip->i_iopen_gh);
1295 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
1296 error = gfs2_glock_nq(&ip->i_iopen_gh);
1297 if (error)
1298 goto out_truncate;
1299
1300 if (S_ISDIR(inode->i_mode) &&
1301 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1302 error = gfs2_dir_exhash_dealloc(ip);
1303 if (error)
1304 goto out_unlock;
1305 }
1306
1307 if (ip->i_eattr) {
1308 error = gfs2_ea_dealloc(ip);
1309 if (error)
1310 goto out_unlock;
1311 }
1312
1313 if (!gfs2_is_stuffed(ip)) {
1314 error = gfs2_file_dealloc(ip);
1315 if (error)
1316 goto out_unlock;
1317 }
1318
1319 error = gfs2_dinode_dealloc(ip);
1320 if (error)
1321 goto out_unlock;
1322
1323out_truncate:
1324 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1325 if (error)
1326 goto out_unlock;
1327 /* Needs to be done before glock release & also in a transaction */
1328 truncate_inode_pages(&inode->i_data, 0);
1329 gfs2_trans_end(sdp);
1330
1331out_unlock:
1332 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
1333 gfs2_glock_dq(&ip->i_iopen_gh);
1334 gfs2_holder_uninit(&ip->i_iopen_gh);
1335 gfs2_glock_dq_uninit(&gh);
1336 if (error && error != GLR_TRYFAILED && error != -EROFS)
1337 fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
1338out:
1339 truncate_inode_pages(&inode->i_data, 0);
1340 clear_inode(inode);
1341}
1342
1343static struct inode *gfs2_alloc_inode(struct super_block *sb)
1344{
1345 struct gfs2_inode *ip;
1346
1347 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1348 if (ip) {
1349 ip->i_flags = 0;
1350 ip->i_gl = NULL;
1351 }
1352 return &ip->i_inode;
1353}
1354
1355static void gfs2_destroy_inode(struct inode *inode)
1356{
1357 kmem_cache_free(gfs2_inode_cachep, inode);
1358}
1359
1360const struct super_operations gfs2_super_ops = {
1361 .alloc_inode = gfs2_alloc_inode,
1362 .destroy_inode = gfs2_destroy_inode,
1363 .write_inode = gfs2_write_inode,
1364 .delete_inode = gfs2_delete_inode,
1365 .put_super = gfs2_put_super,
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001366 .sync_fs = gfs2_sync_fs,
1367 .freeze_fs = gfs2_freeze,
1368 .unfreeze_fs = gfs2_unfreeze,
1369 .statfs = gfs2_statfs,
1370 .remount_fs = gfs2_remount_fs,
1371 .clear_inode = gfs2_clear_inode,
1372 .drop_inode = gfs2_drop_inode,
1373 .show_options = gfs2_show_options,
1374};
1375