blob: 7a5c128e8776c2c32b3d0c6093748522e5f67c4d [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonda6dd402007-12-11 18:49:21 -06003 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010010#include <linux/bio.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000011#include <linux/sched.h>
12#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/buffer_head.h>
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010016#include <linux/statfs.h>
17#include <linux/seq_file.h>
18#include <linux/mount.h>
19#include <linux/kthread.h>
20#include <linux/delay.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include <linux/gfs2_ondisk.h>
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010022#include <linux/crc32.h>
23#include <linux/time.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000024
25#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050026#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000027#include "bmap.h"
28#include "dir.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "glock.h"
30#include "glops.h"
31#include "inode.h"
32#include "log.h"
33#include "meta_io.h"
34#include "quota.h"
35#include "recovery.h"
36#include "rgrp.h"
37#include "super.h"
38#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050039#include "util.h"
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010040#include "sys.h"
41#include "eattr.h"
42
43#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
44
45enum {
46 Opt_lockproto,
47 Opt_locktable,
48 Opt_hostdata,
49 Opt_spectator,
50 Opt_ignore_local_fs,
51 Opt_localflocks,
52 Opt_localcaching,
53 Opt_debug,
54 Opt_nodebug,
55 Opt_upgrade,
56 Opt_acl,
57 Opt_noacl,
58 Opt_quota_off,
59 Opt_quota_account,
60 Opt_quota_on,
61 Opt_quota,
62 Opt_noquota,
63 Opt_suiddir,
64 Opt_nosuiddir,
65 Opt_data_writeback,
66 Opt_data_ordered,
67 Opt_meta,
68 Opt_discard,
69 Opt_nodiscard,
70 Opt_commit,
Bob Petersond34843d2009-08-24 10:44:18 +010071 Opt_err_withdraw,
72 Opt_err_panic,
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +010073 Opt_error,
74};
75
76static const match_table_t tokens = {
77 {Opt_lockproto, "lockproto=%s"},
78 {Opt_locktable, "locktable=%s"},
79 {Opt_hostdata, "hostdata=%s"},
80 {Opt_spectator, "spectator"},
81 {Opt_ignore_local_fs, "ignore_local_fs"},
82 {Opt_localflocks, "localflocks"},
83 {Opt_localcaching, "localcaching"},
84 {Opt_debug, "debug"},
85 {Opt_nodebug, "nodebug"},
86 {Opt_upgrade, "upgrade"},
87 {Opt_acl, "acl"},
88 {Opt_noacl, "noacl"},
89 {Opt_quota_off, "quota=off"},
90 {Opt_quota_account, "quota=account"},
91 {Opt_quota_on, "quota=on"},
92 {Opt_quota, "quota"},
93 {Opt_noquota, "noquota"},
94 {Opt_suiddir, "suiddir"},
95 {Opt_nosuiddir, "nosuiddir"},
96 {Opt_data_writeback, "data=writeback"},
97 {Opt_data_ordered, "data=ordered"},
98 {Opt_meta, "meta"},
99 {Opt_discard, "discard"},
100 {Opt_nodiscard, "nodiscard"},
101 {Opt_commit, "commit=%d"},
Bob Petersond34843d2009-08-24 10:44:18 +0100102 {Opt_err_withdraw, "errors=withdraw"},
103 {Opt_err_panic, "errors=panic"},
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100104 {Opt_error, NULL}
105};
106
107/**
108 * gfs2_mount_args - Parse mount options
109 * @sdp:
110 * @data:
111 *
112 * Return: errno
113 */
114
115int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
116{
117 char *o;
118 int token;
119 substring_t tmp[MAX_OPT_ARGS];
120 int rv;
121
122 /* Split the options into tokens with the "," character and
123 process them */
124
125 while (1) {
126 o = strsep(&options, ",");
127 if (o == NULL)
128 break;
129 if (*o == '\0')
130 continue;
131
132 token = match_token(o, tokens, tmp);
133 switch (token) {
134 case Opt_lockproto:
135 match_strlcpy(args->ar_lockproto, &tmp[0],
136 GFS2_LOCKNAME_LEN);
137 break;
138 case Opt_locktable:
139 match_strlcpy(args->ar_locktable, &tmp[0],
140 GFS2_LOCKNAME_LEN);
141 break;
142 case Opt_hostdata:
143 match_strlcpy(args->ar_hostdata, &tmp[0],
144 GFS2_LOCKNAME_LEN);
145 break;
146 case Opt_spectator:
147 args->ar_spectator = 1;
148 break;
149 case Opt_ignore_local_fs:
150 args->ar_ignore_local_fs = 1;
151 break;
152 case Opt_localflocks:
153 args->ar_localflocks = 1;
154 break;
155 case Opt_localcaching:
156 args->ar_localcaching = 1;
157 break;
158 case Opt_debug:
Bob Petersond34843d2009-08-24 10:44:18 +0100159 if (args->ar_errors == GFS2_ERRORS_PANIC) {
160 fs_info(sdp, "-o debug and -o errors=panic "
161 "are mutually exclusive.\n");
162 return -EINVAL;
163 }
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100164 args->ar_debug = 1;
165 break;
166 case Opt_nodebug:
167 args->ar_debug = 0;
168 break;
169 case Opt_upgrade:
170 args->ar_upgrade = 1;
171 break;
172 case Opt_acl:
173 args->ar_posix_acl = 1;
174 break;
175 case Opt_noacl:
176 args->ar_posix_acl = 0;
177 break;
178 case Opt_quota_off:
179 case Opt_noquota:
180 args->ar_quota = GFS2_QUOTA_OFF;
181 break;
182 case Opt_quota_account:
183 args->ar_quota = GFS2_QUOTA_ACCOUNT;
184 break;
185 case Opt_quota_on:
186 case Opt_quota:
187 args->ar_quota = GFS2_QUOTA_ON;
188 break;
189 case Opt_suiddir:
190 args->ar_suiddir = 1;
191 break;
192 case Opt_nosuiddir:
193 args->ar_suiddir = 0;
194 break;
195 case Opt_data_writeback:
196 args->ar_data = GFS2_DATA_WRITEBACK;
197 break;
198 case Opt_data_ordered:
199 args->ar_data = GFS2_DATA_ORDERED;
200 break;
201 case Opt_meta:
202 args->ar_meta = 1;
203 break;
204 case Opt_discard:
205 args->ar_discard = 1;
206 break;
207 case Opt_nodiscard:
208 args->ar_discard = 0;
209 break;
210 case Opt_commit:
211 rv = match_int(&tmp[0], &args->ar_commit);
212 if (rv || args->ar_commit <= 0) {
213 fs_info(sdp, "commit mount option requires a positive numeric argument\n");
214 return rv ? rv : -EINVAL;
215 }
216 break;
Bob Petersond34843d2009-08-24 10:44:18 +0100217 case Opt_err_withdraw:
218 args->ar_errors = GFS2_ERRORS_WITHDRAW;
219 break;
220 case Opt_err_panic:
221 if (args->ar_debug) {
222 fs_info(sdp, "-o debug and -o errors=panic "
223 "are mutually exclusive.\n");
224 return -EINVAL;
225 }
226 args->ar_errors = GFS2_ERRORS_PANIC;
227 break;
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100228 case Opt_error:
229 default:
230 fs_info(sdp, "invalid mount option: %s\n", o);
231 return -EINVAL;
232 }
233 }
234
235 return 0;
236}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000237
Steven Whitehousefefc03b2008-12-19 15:32:06 +0000238/**
239 * gfs2_jindex_free - Clear all the journal index information
240 * @sdp: The GFS2 superblock
241 *
242 */
243
244void gfs2_jindex_free(struct gfs2_sbd *sdp)
245{
246 struct list_head list, *head;
247 struct gfs2_jdesc *jd;
248 struct gfs2_journal_extent *jext;
249
250 spin_lock(&sdp->sd_jindex_spin);
251 list_add(&list, &sdp->sd_jindex_list);
252 list_del_init(&sdp->sd_jindex_list);
253 sdp->sd_journals = 0;
254 spin_unlock(&sdp->sd_jindex_spin);
255
256 while (!list_empty(&list)) {
257 jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
258 head = &jd->extent_list;
259 while (!list_empty(head)) {
260 jext = list_entry(head->next,
261 struct gfs2_journal_extent,
262 extent_list);
263 list_del(&jext->extent_list);
264 kfree(jext);
265 }
266 list_del(&jd->jd_list);
267 iput(jd->jd_inode);
268 kfree(jd);
269 }
270}
271
David Teiglandb3b94fa2006-01-16 16:50:04 +0000272static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
273{
274 struct gfs2_jdesc *jd;
275 int found = 0;
276
277 list_for_each_entry(jd, head, jd_list) {
278 if (jd->jd_jid == jid) {
279 found = 1;
280 break;
281 }
282 }
283
284 if (!found)
285 jd = NULL;
286
287 return jd;
288}
289
290struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
291{
292 struct gfs2_jdesc *jd;
293
294 spin_lock(&sdp->sd_jindex_spin);
295 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
296 spin_unlock(&sdp->sd_jindex_spin);
297
298 return jd;
299}
300
David Teiglandb3b94fa2006-01-16 16:50:04 +0000301int gfs2_jdesc_check(struct gfs2_jdesc *jd)
302{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400303 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
304 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000305 int ar;
306 int error;
307
Steven Whitehousec9e98882008-11-04 09:47:33 +0000308 if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) ||
309 (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000310 gfs2_consist_inode(ip);
311 return -EIO;
312 }
Steven Whitehousec9e98882008-11-04 09:47:33 +0000313 jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000314
Steven Whitehousec9e98882008-11-04 09:47:33 +0000315 error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000316 if (!error && ar) {
317 gfs2_consist_inode(ip);
318 error = -EIO;
319 }
320
321 return error;
322}
323
David Teiglandb3b94fa2006-01-16 16:50:04 +0000324/**
325 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
326 * @sdp: the filesystem
327 *
328 * Returns: errno
329 */
330
331int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
332{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400333 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500334 struct gfs2_glock *j_gl = ip->i_gl;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000335 struct gfs2_holder t_gh;
Al Viro55167622006-10-13 21:47:13 -0400336 struct gfs2_log_header_host head;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000337 int error;
338
Steven Whitehouse1c0f4872007-01-22 12:10:39 -0500339 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000340 if (error)
341 return error;
342
Steven Whitehouse1a14d3a2006-11-20 10:37:45 -0500343 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000344
345 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
346 if (error)
347 goto fail;
348
349 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
350 gfs2_consist(sdp);
351 error = -EIO;
352 goto fail;
353 }
354
355 /* Initialize some head of the log stuff */
356 sdp->sd_log_sequence = head.lh_sequence + 1;
357 gfs2_log_pointers_init(sdp, head.lh_blkno);
358
David Teiglandb3b94fa2006-01-16 16:50:04 +0000359 error = gfs2_quota_init(sdp);
360 if (error)
Steven Whitehousea91ea692006-09-04 12:04:26 -0400361 goto fail;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000362
363 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
364
365 gfs2_glock_dq_uninit(&t_gh);
366
367 return 0;
368
Steven Whitehousea91ea692006-09-04 12:04:26 -0400369fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000370 t_gh.gh_flags |= GL_NOCACHE;
371 gfs2_glock_dq_uninit(&t_gh);
372
373 return error;
374}
375
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500376void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100377{
378 const struct gfs2_statfs_change *str = buf;
379
380 sc->sc_total = be64_to_cpu(str->sc_total);
381 sc->sc_free = be64_to_cpu(str->sc_free);
382 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
383}
384
385static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
386{
387 struct gfs2_statfs_change *str = buf;
388
389 str->sc_total = cpu_to_be64(sc->sc_total);
390 str->sc_free = cpu_to_be64(sc->sc_free);
391 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
392}
393
David Teiglandb3b94fa2006-01-16 16:50:04 +0000394int gfs2_statfs_init(struct gfs2_sbd *sdp)
395{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400396 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400397 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400398 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400399 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000400 struct buffer_head *m_bh, *l_bh;
401 struct gfs2_holder gh;
402 int error;
403
404 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
405 &gh);
406 if (error)
407 return error;
408
409 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
410 if (error)
411 goto out;
412
413 if (sdp->sd_args.ar_spectator) {
414 spin_lock(&sdp->sd_statfs_spin);
415 gfs2_statfs_change_in(m_sc, m_bh->b_data +
416 sizeof(struct gfs2_dinode));
417 spin_unlock(&sdp->sd_statfs_spin);
418 } else {
419 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
420 if (error)
421 goto out_m_bh;
422
423 spin_lock(&sdp->sd_statfs_spin);
424 gfs2_statfs_change_in(m_sc, m_bh->b_data +
425 sizeof(struct gfs2_dinode));
426 gfs2_statfs_change_in(l_sc, l_bh->b_data +
427 sizeof(struct gfs2_dinode));
428 spin_unlock(&sdp->sd_statfs_spin);
429
430 brelse(l_bh);
431 }
432
Steven Whitehousea91ea692006-09-04 12:04:26 -0400433out_m_bh:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000434 brelse(m_bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400435out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000436 gfs2_glock_dq_uninit(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000437 return 0;
438}
439
Steven Whitehousecd915492006-09-04 12:49:07 -0400440void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
441 s64 dinodes)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000442{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400443 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400444 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000445 struct buffer_head *l_bh;
446 int error;
447
448 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
449 if (error)
450 return;
451
Steven Whitehoused4e9c4c2006-01-18 11:19:28 +0000452 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000453
454 spin_lock(&sdp->sd_statfs_spin);
455 l_sc->sc_total += total;
456 l_sc->sc_free += free;
457 l_sc->sc_dinodes += dinodes;
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400458 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000459 spin_unlock(&sdp->sd_statfs_spin);
460
461 brelse(l_bh);
462}
463
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500464void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
465 struct buffer_head *l_bh)
466{
467 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
468 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
469 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
470 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
471
472 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
473
474 spin_lock(&sdp->sd_statfs_spin);
475 m_sc->sc_total += l_sc->sc_total;
476 m_sc->sc_free += l_sc->sc_free;
477 m_sc->sc_dinodes += l_sc->sc_dinodes;
478 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
479 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
480 0, sizeof(struct gfs2_statfs_change));
481 spin_unlock(&sdp->sd_statfs_spin);
482
483 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
484 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
485}
486
David Teiglandb3b94fa2006-01-16 16:50:04 +0000487int gfs2_statfs_sync(struct gfs2_sbd *sdp)
488{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400489 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
490 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Al Virobd209cc2006-10-13 23:43:19 -0400491 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
492 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000493 struct gfs2_holder gh;
494 struct buffer_head *m_bh, *l_bh;
495 int error;
496
497 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
498 &gh);
499 if (error)
500 return error;
501
502 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
503 if (error)
504 goto out;
505
506 spin_lock(&sdp->sd_statfs_spin);
507 gfs2_statfs_change_in(m_sc, m_bh->b_data +
Steven Whitehouse907b9bc2006-09-25 09:26:04 -0400508 sizeof(struct gfs2_dinode));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000509 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
510 spin_unlock(&sdp->sd_statfs_spin);
511 goto out_bh;
512 }
513 spin_unlock(&sdp->sd_statfs_spin);
514
515 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
516 if (error)
517 goto out_bh;
518
519 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
520 if (error)
521 goto out_bh2;
522
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500523 update_statfs(sdp, m_bh, l_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000524
525 gfs2_trans_end(sdp);
526
Steven Whitehousea91ea692006-09-04 12:04:26 -0400527out_bh2:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000528 brelse(l_bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400529out_bh:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000530 brelse(m_bh);
Steven Whitehousea91ea692006-09-04 12:04:26 -0400531out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000532 gfs2_glock_dq_uninit(&gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000533 return error;
534}
535
David Teiglandb3b94fa2006-01-16 16:50:04 +0000536struct lfcc {
537 struct list_head list;
538 struct gfs2_holder gh;
539};
540
541/**
542 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
543 * journals are clean
544 * @sdp: the file system
545 * @state: the state to put the transaction lock into
546 * @t_gh: the hold on the transaction lock
547 *
548 * Returns: errno
549 */
550
Adrian Bunk08bc2db2006-04-28 10:59:12 -0400551static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
552 struct gfs2_holder *t_gh)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000553{
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500554 struct gfs2_inode *ip;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000555 struct gfs2_jdesc *jd;
556 struct lfcc *lfcc;
557 LIST_HEAD(list);
Al Viro55167622006-10-13 21:47:13 -0400558 struct gfs2_log_header_host lh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000559 int error;
560
David Teiglandb3b94fa2006-01-16 16:50:04 +0000561 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
562 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
563 if (!lfcc) {
564 error = -ENOMEM;
565 goto out;
566 }
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400567 ip = GFS2_I(jd->jd_inode);
568 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000569 if (error) {
570 kfree(lfcc);
571 goto out;
572 }
573 list_add(&lfcc->list, &list);
574 }
575
576 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
Steven Whitehouse6802e342008-05-21 17:03:22 +0100577 GL_NOCACHE, t_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000578
579 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
580 error = gfs2_jdesc_check(jd);
581 if (error)
582 break;
583 error = gfs2_find_jhead(jd, &lh);
584 if (error)
585 break;
586 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
587 error = -EBUSY;
588 break;
589 }
590 }
591
592 if (error)
593 gfs2_glock_dq_uninit(t_gh);
594
Steven Whitehousea91ea692006-09-04 12:04:26 -0400595out:
David Teiglandb3b94fa2006-01-16 16:50:04 +0000596 while (!list_empty(&list)) {
597 lfcc = list_entry(list.next, struct lfcc, list);
598 list_del(&lfcc->list);
599 gfs2_glock_dq_uninit(&lfcc->gh);
600 kfree(lfcc);
601 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000602 return error;
603}
604
605/**
606 * gfs2_freeze_fs - freezes the file system
607 * @sdp: the file system
608 *
609 * This function flushes data and meta data for all machines by
610 * aquiring the transaction log exclusively. All journals are
611 * ensured to be in a clean state as well.
612 *
613 * Returns: errno
614 */
615
616int gfs2_freeze_fs(struct gfs2_sbd *sdp)
617{
618 int error = 0;
619
Steven Whitehousef55ab262006-02-21 12:51:39 +0000620 mutex_lock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000621
622 if (!sdp->sd_freeze_count++) {
623 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
624 if (error)
625 sdp->sd_freeze_count--;
626 }
627
Steven Whitehousef55ab262006-02-21 12:51:39 +0000628 mutex_unlock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000629
630 return error;
631}
632
633/**
634 * gfs2_unfreeze_fs - unfreezes the file system
635 * @sdp: the file system
636 *
637 * This function allows the file system to proceed by unlocking
638 * the exclusively held transaction lock. Other GFS2 nodes are
639 * now free to acquire the lock shared and go on with their lives.
640 *
641 */
642
643void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
644{
Steven Whitehousef55ab262006-02-21 12:51:39 +0000645 mutex_lock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000646
647 if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
648 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
649
Steven Whitehousef55ab262006-02-21 12:51:39 +0000650 mutex_unlock(&sdp->sd_freeze_lock);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000651}
652
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100653
654/**
655 * gfs2_write_inode - Make sure the inode is stable on the disk
656 * @inode: The inode
657 * @sync: synchronous write flag
658 *
659 * Returns: errno
660 */
661
662static int gfs2_write_inode(struct inode *inode, int sync)
663{
664 struct gfs2_inode *ip = GFS2_I(inode);
665 struct gfs2_sbd *sdp = GFS2_SB(inode);
666 struct gfs2_holder gh;
667 struct buffer_head *bh;
668 struct timespec atime;
669 struct gfs2_dinode *di;
670 int ret = 0;
671
672 /* Check this is a "normal" inode, etc */
673 if (!test_bit(GIF_USER, &ip->i_flags) ||
674 (current->flags & PF_MEMALLOC))
675 return 0;
676 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
677 if (ret)
678 goto do_flush;
679 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
680 if (ret)
681 goto do_unlock;
682 ret = gfs2_meta_inode_buffer(ip, &bh);
683 if (ret == 0) {
684 di = (struct gfs2_dinode *)bh->b_data;
685 atime.tv_sec = be64_to_cpu(di->di_atime);
686 atime.tv_nsec = be32_to_cpu(di->di_atime_nsec);
687 if (timespec_compare(&inode->i_atime, &atime) > 0) {
688 gfs2_trans_add_bh(ip->i_gl, bh, 1);
689 gfs2_dinode_out(ip, bh->b_data);
690 }
691 brelse(bh);
692 }
693 gfs2_trans_end(sdp);
694do_unlock:
695 gfs2_glock_dq_uninit(&gh);
696do_flush:
697 if (sync != 0)
698 gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
699 return ret;
700}
701
702/**
703 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
704 * @sdp: the filesystem
705 *
706 * Returns: errno
707 */
708
709static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
710{
711 struct gfs2_holder t_gh;
712 int error;
713
Benjamin Marzinskib94a1702009-07-23 18:52:34 -0500714 flush_workqueue(gfs2_delete_workqueue);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100715 gfs2_quota_sync(sdp);
716 gfs2_statfs_sync(sdp);
717
718 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
719 &t_gh);
720 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
721 return error;
722
723 gfs2_meta_syncfs(sdp);
724 gfs2_log_shutdown(sdp);
725
726 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
727
728 if (t_gh.gh_gl)
729 gfs2_glock_dq_uninit(&t_gh);
730
731 gfs2_quota_cleanup(sdp);
732
733 return error;
734}
735
736static int gfs2_umount_recovery_wait(void *word)
737{
738 schedule();
739 return 0;
740}
741
742/**
743 * gfs2_put_super - Unmount the filesystem
744 * @sb: The VFS superblock
745 *
746 */
747
748static void gfs2_put_super(struct super_block *sb)
749{
750 struct gfs2_sbd *sdp = sb->s_fs_info;
751 int error;
752 struct gfs2_jdesc *jd;
753
754 /* Unfreeze the filesystem, if we need to */
755
756 mutex_lock(&sdp->sd_freeze_lock);
757 if (sdp->sd_freeze_count)
758 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
759 mutex_unlock(&sdp->sd_freeze_lock);
760
761 /* No more recovery requests */
762 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
763 smp_mb();
764
765 /* Wait on outstanding recovery */
766restart:
767 spin_lock(&sdp->sd_jindex_spin);
768 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
769 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
770 continue;
771 spin_unlock(&sdp->sd_jindex_spin);
772 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
773 gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
774 goto restart;
775 }
776 spin_unlock(&sdp->sd_jindex_spin);
777
778 kthread_stop(sdp->sd_quotad_process);
779 kthread_stop(sdp->sd_logd_process);
780
781 if (!(sb->s_flags & MS_RDONLY)) {
782 error = gfs2_make_fs_ro(sdp);
783 if (error)
784 gfs2_io_error(sdp);
785 }
786 /* At this point, we're through modifying the disk */
787
788 /* Release stuff */
789
790 iput(sdp->sd_jindex);
791 iput(sdp->sd_inum_inode);
792 iput(sdp->sd_statfs_inode);
793 iput(sdp->sd_rindex);
794 iput(sdp->sd_quota_inode);
795
796 gfs2_glock_put(sdp->sd_rename_gl);
797 gfs2_glock_put(sdp->sd_trans_gl);
798
799 if (!sdp->sd_args.ar_spectator) {
800 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
801 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
802 gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
803 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
804 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
805 iput(sdp->sd_ir_inode);
806 iput(sdp->sd_sc_inode);
807 iput(sdp->sd_qc_inode);
808 }
809
810 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
811 gfs2_clear_rgrpd(sdp);
812 gfs2_jindex_free(sdp);
813 /* Take apart glock structures and buffer lists */
814 gfs2_gl_hash_clear(sdp);
815 /* Unmount the locking protocol */
816 gfs2_lm_unmount(sdp);
817
818 /* At this point, we're through participating in the lockspace */
819 gfs2_sys_fs_del(sdp);
820}
821
822/**
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100823 * gfs2_sync_fs - sync the filesystem
824 * @sb: the superblock
825 *
826 * Flushes the log to disk.
827 */
828
829static int gfs2_sync_fs(struct super_block *sb, int wait)
830{
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +0100831 if (wait && sb->s_fs_info)
832 gfs2_log_flush(sb->s_fs_info, NULL);
833 return 0;
834}
835
836/**
837 * gfs2_freeze - prevent further writes to the filesystem
838 * @sb: the VFS structure for the filesystem
839 *
840 */
841
842static int gfs2_freeze(struct super_block *sb)
843{
844 struct gfs2_sbd *sdp = sb->s_fs_info;
845 int error;
846
847 if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
848 return -EINVAL;
849
850 for (;;) {
851 error = gfs2_freeze_fs(sdp);
852 if (!error)
853 break;
854
855 switch (error) {
856 case -EBUSY:
857 fs_err(sdp, "waiting for recovery before freeze\n");
858 break;
859
860 default:
861 fs_err(sdp, "error freezing FS: %d\n", error);
862 break;
863 }
864
865 fs_err(sdp, "retrying...\n");
866 msleep(1000);
867 }
868 return 0;
869}
870
871/**
872 * gfs2_unfreeze - reallow writes to the filesystem
873 * @sb: the VFS structure for the filesystem
874 *
875 */
876
877static int gfs2_unfreeze(struct super_block *sb)
878{
879 gfs2_unfreeze_fs(sb->s_fs_info);
880 return 0;
881}
882
883/**
884 * statfs_fill - fill in the sg for a given RG
885 * @rgd: the RG
886 * @sc: the sc structure
887 *
888 * Returns: 0 on success, -ESTALE if the LVB is invalid
889 */
890
891static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
892 struct gfs2_statfs_change_host *sc)
893{
894 gfs2_rgrp_verify(rgd);
895 sc->sc_total += rgd->rd_data;
896 sc->sc_free += rgd->rd_free;
897 sc->sc_dinodes += rgd->rd_dinodes;
898 return 0;
899}
900
901/**
902 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
903 * @sdp: the filesystem
904 * @sc: the sc info that will be returned
905 *
906 * Any error (other than a signal) will cause this routine to fall back
907 * to the synchronous version.
908 *
909 * FIXME: This really shouldn't busy wait like this.
910 *
911 * Returns: errno
912 */
913
914static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
915{
916 struct gfs2_holder ri_gh;
917 struct gfs2_rgrpd *rgd_next;
918 struct gfs2_holder *gha, *gh;
919 unsigned int slots = 64;
920 unsigned int x;
921 int done;
922 int error = 0, err;
923
924 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
925 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
926 if (!gha)
927 return -ENOMEM;
928
929 error = gfs2_rindex_hold(sdp, &ri_gh);
930 if (error)
931 goto out;
932
933 rgd_next = gfs2_rgrpd_get_first(sdp);
934
935 for (;;) {
936 done = 1;
937
938 for (x = 0; x < slots; x++) {
939 gh = gha + x;
940
941 if (gh->gh_gl && gfs2_glock_poll(gh)) {
942 err = gfs2_glock_wait(gh);
943 if (err) {
944 gfs2_holder_uninit(gh);
945 error = err;
946 } else {
947 if (!error)
948 error = statfs_slow_fill(
949 gh->gh_gl->gl_object, sc);
950 gfs2_glock_dq_uninit(gh);
951 }
952 }
953
954 if (gh->gh_gl)
955 done = 0;
956 else if (rgd_next && !error) {
957 error = gfs2_glock_nq_init(rgd_next->rd_gl,
958 LM_ST_SHARED,
959 GL_ASYNC,
960 gh);
961 rgd_next = gfs2_rgrpd_get_next(rgd_next);
962 done = 0;
963 }
964
965 if (signal_pending(current))
966 error = -ERESTARTSYS;
967 }
968
969 if (done)
970 break;
971
972 yield();
973 }
974
975 gfs2_glock_dq_uninit(&ri_gh);
976
977out:
978 kfree(gha);
979 return error;
980}
981
982/**
983 * gfs2_statfs_i - Do a statfs
984 * @sdp: the filesystem
985 * @sg: the sg structure
986 *
987 * Returns: errno
988 */
989
990static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
991{
992 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
993 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
994
995 spin_lock(&sdp->sd_statfs_spin);
996
997 *sc = *m_sc;
998 sc->sc_total += l_sc->sc_total;
999 sc->sc_free += l_sc->sc_free;
1000 sc->sc_dinodes += l_sc->sc_dinodes;
1001
1002 spin_unlock(&sdp->sd_statfs_spin);
1003
1004 if (sc->sc_free < 0)
1005 sc->sc_free = 0;
1006 if (sc->sc_free > sc->sc_total)
1007 sc->sc_free = sc->sc_total;
1008 if (sc->sc_dinodes < 0)
1009 sc->sc_dinodes = 0;
1010
1011 return 0;
1012}
1013
1014/**
1015 * gfs2_statfs - Gather and return stats about the filesystem
1016 * @sb: The superblock
1017 * @statfsbuf: The buffer
1018 *
1019 * Returns: 0 on success or error code
1020 */
1021
1022static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
1023{
1024 struct super_block *sb = dentry->d_inode->i_sb;
1025 struct gfs2_sbd *sdp = sb->s_fs_info;
1026 struct gfs2_statfs_change_host sc;
1027 int error;
1028
1029 if (gfs2_tune_get(sdp, gt_statfs_slow))
1030 error = gfs2_statfs_slow(sdp, &sc);
1031 else
1032 error = gfs2_statfs_i(sdp, &sc);
1033
1034 if (error)
1035 return error;
1036
1037 buf->f_type = GFS2_MAGIC;
1038 buf->f_bsize = sdp->sd_sb.sb_bsize;
1039 buf->f_blocks = sc.sc_total;
1040 buf->f_bfree = sc.sc_free;
1041 buf->f_bavail = sc.sc_free;
1042 buf->f_files = sc.sc_dinodes + sc.sc_free;
1043 buf->f_ffree = sc.sc_free;
1044 buf->f_namelen = GFS2_FNAMESIZE;
1045
1046 return 0;
1047}
1048
1049/**
1050 * gfs2_remount_fs - called when the FS is remounted
1051 * @sb: the filesystem
1052 * @flags: the remount flags
1053 * @data: extra data passed in (not used right now)
1054 *
1055 * Returns: errno
1056 */
1057
1058static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
1059{
1060 struct gfs2_sbd *sdp = sb->s_fs_info;
1061 struct gfs2_args args = sdp->sd_args; /* Default to current settings */
1062 struct gfs2_tune *gt = &sdp->sd_tune;
1063 int error;
1064
1065 spin_lock(&gt->gt_spin);
1066 args.ar_commit = gt->gt_log_flush_secs;
1067 spin_unlock(&gt->gt_spin);
1068 error = gfs2_mount_args(sdp, &args, data);
1069 if (error)
1070 return error;
1071
1072 /* Not allowed to change locking details */
1073 if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
1074 strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
1075 strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
1076 return -EINVAL;
1077
1078 /* Some flags must not be changed */
1079 if (args_neq(&args, &sdp->sd_args, spectator) ||
1080 args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
1081 args_neq(&args, &sdp->sd_args, localflocks) ||
1082 args_neq(&args, &sdp->sd_args, localcaching) ||
1083 args_neq(&args, &sdp->sd_args, meta))
1084 return -EINVAL;
1085
1086 if (sdp->sd_args.ar_spectator)
1087 *flags |= MS_RDONLY;
1088
1089 if ((sb->s_flags ^ *flags) & MS_RDONLY) {
1090 if (*flags & MS_RDONLY)
1091 error = gfs2_make_fs_ro(sdp);
1092 else
1093 error = gfs2_make_fs_rw(sdp);
1094 if (error)
1095 return error;
1096 }
1097
1098 sdp->sd_args = args;
1099 if (sdp->sd_args.ar_posix_acl)
1100 sb->s_flags |= MS_POSIXACL;
1101 else
1102 sb->s_flags &= ~MS_POSIXACL;
1103 spin_lock(&gt->gt_spin);
1104 gt->gt_log_flush_secs = args.ar_commit;
1105 spin_unlock(&gt->gt_spin);
1106
Steven Whitehouse8633ecf2009-07-31 11:07:29 +01001107 gfs2_online_uevent(sdp);
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001108 return 0;
1109}
1110
1111/**
1112 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1113 * @inode: The inode to drop
1114 *
1115 * If we've received a callback on an iopen lock then its because a
1116 * remote node tried to deallocate the inode but failed due to this node
1117 * still having the inode open. Here we mark the link count zero
1118 * since we know that it must have reached zero if the GLF_DEMOTE flag
1119 * is set on the iopen glock. If we didn't do a disk read since the
1120 * remote node removed the final link then we might otherwise miss
1121 * this event. This check ensures that this node will deallocate the
1122 * inode's blocks, or alternatively pass the baton on to another
1123 * node for later deallocation.
1124 */
1125
1126static void gfs2_drop_inode(struct inode *inode)
1127{
1128 struct gfs2_inode *ip = GFS2_I(inode);
1129
1130 if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
1131 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1132 if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
1133 clear_nlink(inode);
1134 }
1135 generic_drop_inode(inode);
1136}
1137
1138/**
1139 * gfs2_clear_inode - Deallocate an inode when VFS is done with it
1140 * @inode: The VFS inode
1141 *
1142 */
1143
1144static void gfs2_clear_inode(struct inode *inode)
1145{
1146 struct gfs2_inode *ip = GFS2_I(inode);
1147
1148 /* This tells us its a "real" inode and not one which only
1149 * serves to contain an address space (see rgrp.c, meta_io.c)
1150 * which therefore doesn't have its own glocks.
1151 */
1152 if (test_bit(GIF_USER, &ip->i_flags)) {
1153 ip->i_gl->gl_object = NULL;
1154 gfs2_glock_put(ip->i_gl);
1155 ip->i_gl = NULL;
1156 if (ip->i_iopen_gh.gh_gl) {
1157 ip->i_iopen_gh.gh_gl->gl_object = NULL;
1158 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1159 }
1160 }
1161}
1162
1163static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1164{
1165 do {
1166 if (d1 == d2)
1167 return 1;
1168 d1 = d1->d_parent;
1169 } while (!IS_ROOT(d1));
1170 return 0;
1171}
1172
1173/**
1174 * gfs2_show_options - Show mount options for /proc/mounts
1175 * @s: seq_file structure
1176 * @mnt: vfsmount
1177 *
1178 * Returns: 0 on success or error code
1179 */
1180
1181static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
1182{
1183 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
1184 struct gfs2_args *args = &sdp->sd_args;
1185 int lfsecs;
1186
1187 if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
1188 seq_printf(s, ",meta");
1189 if (args->ar_lockproto[0])
1190 seq_printf(s, ",lockproto=%s", args->ar_lockproto);
1191 if (args->ar_locktable[0])
1192 seq_printf(s, ",locktable=%s", args->ar_locktable);
1193 if (args->ar_hostdata[0])
1194 seq_printf(s, ",hostdata=%s", args->ar_hostdata);
1195 if (args->ar_spectator)
1196 seq_printf(s, ",spectator");
1197 if (args->ar_ignore_local_fs)
1198 seq_printf(s, ",ignore_local_fs");
1199 if (args->ar_localflocks)
1200 seq_printf(s, ",localflocks");
1201 if (args->ar_localcaching)
1202 seq_printf(s, ",localcaching");
1203 if (args->ar_debug)
1204 seq_printf(s, ",debug");
1205 if (args->ar_upgrade)
1206 seq_printf(s, ",upgrade");
1207 if (args->ar_posix_acl)
1208 seq_printf(s, ",acl");
1209 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1210 char *state;
1211 switch (args->ar_quota) {
1212 case GFS2_QUOTA_OFF:
1213 state = "off";
1214 break;
1215 case GFS2_QUOTA_ACCOUNT:
1216 state = "account";
1217 break;
1218 case GFS2_QUOTA_ON:
1219 state = "on";
1220 break;
1221 default:
1222 state = "unknown";
1223 break;
1224 }
1225 seq_printf(s, ",quota=%s", state);
1226 }
1227 if (args->ar_suiddir)
1228 seq_printf(s, ",suiddir");
1229 if (args->ar_data != GFS2_DATA_DEFAULT) {
1230 char *state;
1231 switch (args->ar_data) {
1232 case GFS2_DATA_WRITEBACK:
1233 state = "writeback";
1234 break;
1235 case GFS2_DATA_ORDERED:
1236 state = "ordered";
1237 break;
1238 default:
1239 state = "unknown";
1240 break;
1241 }
1242 seq_printf(s, ",data=%s", state);
1243 }
1244 if (args->ar_discard)
1245 seq_printf(s, ",discard");
1246 lfsecs = sdp->sd_tune.gt_log_flush_secs;
1247 if (lfsecs != 60)
1248 seq_printf(s, ",commit=%d", lfsecs);
Bob Petersond34843d2009-08-24 10:44:18 +01001249 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1250 const char *state;
1251
1252 switch (args->ar_errors) {
1253 case GFS2_ERRORS_WITHDRAW:
1254 state = "withdraw";
1255 break;
1256 case GFS2_ERRORS_PANIC:
1257 state = "panic";
1258 break;
1259 default:
1260 state = "unknown";
1261 break;
1262 }
1263 seq_printf(s, ",errors=%s", state);
1264 }
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001265 return 0;
1266}
1267
1268/*
1269 * We have to (at the moment) hold the inodes main lock to cover
1270 * the gap between unlocking the shared lock on the iopen lock and
1271 * taking the exclusive lock. I'd rather do a shared -> exclusive
1272 * conversion on the iopen lock, but we can change that later. This
1273 * is safe, just less efficient.
1274 */
1275
1276static void gfs2_delete_inode(struct inode *inode)
1277{
1278 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
1279 struct gfs2_inode *ip = GFS2_I(inode);
1280 struct gfs2_holder gh;
1281 int error;
1282
1283 if (!test_bit(GIF_USER, &ip->i_flags))
1284 goto out;
1285
1286 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1287 if (unlikely(error)) {
1288 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1289 goto out;
1290 }
1291
1292 gfs2_glock_dq_wait(&ip->i_iopen_gh);
1293 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
1294 error = gfs2_glock_nq(&ip->i_iopen_gh);
1295 if (error)
1296 goto out_truncate;
1297
1298 if (S_ISDIR(inode->i_mode) &&
1299 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1300 error = gfs2_dir_exhash_dealloc(ip);
1301 if (error)
1302 goto out_unlock;
1303 }
1304
1305 if (ip->i_eattr) {
1306 error = gfs2_ea_dealloc(ip);
1307 if (error)
1308 goto out_unlock;
1309 }
1310
1311 if (!gfs2_is_stuffed(ip)) {
1312 error = gfs2_file_dealloc(ip);
1313 if (error)
1314 goto out_unlock;
1315 }
1316
1317 error = gfs2_dinode_dealloc(ip);
1318 if (error)
1319 goto out_unlock;
1320
1321out_truncate:
1322 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1323 if (error)
1324 goto out_unlock;
1325 /* Needs to be done before glock release & also in a transaction */
1326 truncate_inode_pages(&inode->i_data, 0);
1327 gfs2_trans_end(sdp);
1328
1329out_unlock:
1330 if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
1331 gfs2_glock_dq(&ip->i_iopen_gh);
1332 gfs2_holder_uninit(&ip->i_iopen_gh);
1333 gfs2_glock_dq_uninit(&gh);
1334 if (error && error != GLR_TRYFAILED && error != -EROFS)
1335 fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
1336out:
1337 truncate_inode_pages(&inode->i_data, 0);
1338 clear_inode(inode);
1339}
1340
1341static struct inode *gfs2_alloc_inode(struct super_block *sb)
1342{
1343 struct gfs2_inode *ip;
1344
1345 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1346 if (ip) {
1347 ip->i_flags = 0;
1348 ip->i_gl = NULL;
1349 }
1350 return &ip->i_inode;
1351}
1352
1353static void gfs2_destroy_inode(struct inode *inode)
1354{
1355 kmem_cache_free(gfs2_inode_cachep, inode);
1356}
1357
1358const struct super_operations gfs2_super_ops = {
1359 .alloc_inode = gfs2_alloc_inode,
1360 .destroy_inode = gfs2_destroy_inode,
1361 .write_inode = gfs2_write_inode,
1362 .delete_inode = gfs2_delete_inode,
1363 .put_super = gfs2_put_super,
Steven Whitehouse9e6e0a12009-05-22 10:36:01 +01001364 .sync_fs = gfs2_sync_fs,
1365 .freeze_fs = gfs2_freeze,
1366 .unfreeze_fs = gfs2_unfreeze,
1367 .statfs = gfs2_statfs,
1368 .remount_fs = gfs2_remount_fs,
1369 .clear_inode = gfs2_clear_inode,
1370 .drop_inode = gfs2_drop_inode,
1371 .show_options = gfs2_show_options,
1372};
1373