blob: 4b4059b5e50194d34f60fda5bba4b01504b13719 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_fs.h"
Dave Chinner70a9883c2013-10-23 10:36:05 +110020#include "xfs_shared.h"
Dave Chinner239880e2013-10-23 10:50:10 +110021#include "xfs_format.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110022#include "xfs_log_format.h"
Dave Chinner239880e2013-10-23 10:50:10 +110023#include "xfs_trans_resv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_sb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_mount.h"
Darrick J. Wong3ab78df2016-08-03 11:15:38 +100026#include "xfs_defer.h"
Dave Chinner8f661932014-06-06 15:15:59 +100027#include "xfs_da_format.h"
28#include "xfs_da_btree.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_inode.h"
Dave Chinner239880e2013-10-23 10:50:10 +110030#include "xfs_trans.h"
Nathan Scotta844f452005-11-02 14:38:42 +110031#include "xfs_inode_item.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include "xfs_error.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110033#include "xfs_btree.h"
34#include "xfs_alloc_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include "xfs_alloc.h"
Darrick J. Wonge70d8292016-08-03 11:36:08 +100036#include "xfs_rmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "xfs_ialloc.h"
38#include "xfs_fsops.h"
39#include "xfs_itable.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070040#include "xfs_trans_space.h"
41#include "xfs_rtalloc.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000042#include "xfs_trace.h"
Dave Chinner239880e2013-10-23 10:50:10 +110043#include "xfs_log.h"
Dave Chinnera4fbe6a2013-10-23 10:51:50 +110044#include "xfs_filestream.h"
Darrick J. Wong340785c2016-08-03 11:33:42 +100045#include "xfs_rmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
48 * File system operations
49 */
50
51int
52xfs_fs_geometry(
53 xfs_mount_t *mp,
54 xfs_fsop_geom_t *geo,
55 int new_version)
56{
Dan Rosenbergc4d0c3b2011-02-14 13:45:28 +000057
58 memset(geo, 0, sizeof(*geo));
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 geo->blocksize = mp->m_sb.sb_blocksize;
61 geo->rtextsize = mp->m_sb.sb_rextsize;
62 geo->agblocks = mp->m_sb.sb_agblocks;
63 geo->agcount = mp->m_sb.sb_agcount;
64 geo->logblocks = mp->m_sb.sb_logblocks;
65 geo->sectsize = mp->m_sb.sb_sectsize;
66 geo->inodesize = mp->m_sb.sb_inodesize;
67 geo->imaxpct = mp->m_sb.sb_imax_pct;
68 geo->datablocks = mp->m_sb.sb_dblocks;
69 geo->rtblocks = mp->m_sb.sb_rblocks;
70 geo->rtextents = mp->m_sb.sb_rextents;
71 geo->logstart = mp->m_sb.sb_logstart;
72 ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
73 memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
74 if (new_version >= 2) {
75 geo->sunit = mp->m_sb.sb_unit;
76 geo->swidth = mp->m_sb.sb_width;
77 }
78 if (new_version >= 3) {
79 geo->version = XFS_FSOP_GEOM_VERSION;
Dave Chinner263997a2014-05-20 07:46:40 +100080 geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
Dave Chinner5d074a42014-05-20 07:46:55 +100081 XFS_FSOP_GEOM_FLAGS_DIRV2 |
Eric Sandeen62118702008-03-06 13:44:28 +110082 (xfs_sb_version_hasattr(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
Eric Sandeen62118702008-03-06 13:44:28 +110084 (xfs_sb_version_hasquota(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
Eric Sandeen62118702008-03-06 13:44:28 +110086 (xfs_sb_version_hasalign(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
Eric Sandeen62118702008-03-06 13:44:28 +110088 (xfs_sb_version_hasdalign(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
Eric Sandeen62118702008-03-06 13:44:28 +110090 (xfs_sb_version_hasextflgbit(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
Eric Sandeen62118702008-03-06 13:44:28 +110092 (xfs_sb_version_hassector(&mp->m_sb) ?
Nathan Scottd8cc8902005-11-02 10:34:53 +110093 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
Barry Naujok189f4bf2008-05-21 16:58:55 +100094 (xfs_sb_version_hasasciici(&mp->m_sb) ?
95 XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
David Chinner92821e22007-05-24 15:26:31 +100096 (xfs_sb_version_haslazysbcount(&mp->m_sb) ?
97 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
Eric Sandeen62118702008-03-06 13:44:28 +110098 (xfs_sb_version_hasattr2(&mp->m_sb) ?
Eric Sandeen69a58a42012-10-09 14:11:45 -050099 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
100 (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
Dave Chinner74137fff2013-05-27 16:38:26 +1000101 XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
102 (xfs_sb_version_hascrc(&mp->m_sb) ?
Mark Tinguely2900a572013-10-02 16:00:06 -0500103 XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
104 (xfs_sb_version_hasftype(&mp->m_sb) ?
Brian Foster0c153c12014-04-24 16:01:41 +1000105 XFS_FSOP_GEOM_FLAGS_FTYPE : 0) |
106 (xfs_sb_version_hasfinobt(&mp->m_sb) ?
Brian Foster502a4e72015-05-29 08:58:32 +1000107 XFS_FSOP_GEOM_FLAGS_FINOBT : 0) |
108 (xfs_sb_version_hassparseinodes(&mp->m_sb) ?
Darrick J. Wong5d650e92016-08-03 12:16:44 +1000109 XFS_FSOP_GEOM_FLAGS_SPINODES : 0) |
110 (xfs_sb_version_hasrmapbt(&mp->m_sb) ?
111 XFS_FSOP_GEOM_FLAGS_RMAPBT : 0);
Eric Sandeen62118702008-03-06 13:44:28 +1100112 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 mp->m_sb.sb_logsectsize : BBSIZE;
114 geo->rtsectsize = mp->m_sb.sb_blocksize;
Dave Chinner8f661932014-06-06 15:15:59 +1000115 geo->dirblocksize = mp->m_dir_geo->blksize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 }
117 if (new_version >= 4) {
118 geo->flags |=
Eric Sandeen62118702008-03-06 13:44:28 +1100119 (xfs_sb_version_haslogv2(&mp->m_sb) ?
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
121 geo->logsunit = mp->m_sb.sb_logsunit;
122 }
123 return 0;
124}
125
Dave Chinnerfd236832012-11-12 22:53:59 +1100126static struct xfs_buf *
127xfs_growfs_get_hdr_buf(
128 struct xfs_mount *mp,
129 xfs_daddr_t blkno,
130 size_t numblks,
Dave Chinner1813dd62012-11-14 17:54:40 +1100131 int flags,
132 const struct xfs_buf_ops *ops)
Dave Chinnerfd236832012-11-12 22:53:59 +1100133{
134 struct xfs_buf *bp;
135
136 bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
137 if (!bp)
138 return NULL;
139
140 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
141 bp->b_bn = blkno;
142 bp->b_maps[0].bm_bn = blkno;
Dave Chinner1813dd62012-11-14 17:54:40 +1100143 bp->b_ops = ops;
Dave Chinnerfd236832012-11-12 22:53:59 +1100144
145 return bp;
146}
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148static int
149xfs_growfs_data_private(
150 xfs_mount_t *mp, /* mount point for filesystem */
151 xfs_growfs_data_t *in) /* growfs data input struct */
152{
153 xfs_agf_t *agf;
Dave Chinnerde497682012-11-12 22:54:00 +1100154 struct xfs_agfl *agfl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 xfs_agi_t *agi;
156 xfs_agnumber_t agno;
157 xfs_extlen_t agsize;
158 xfs_extlen_t tmpsize;
159 xfs_alloc_rec_t *arec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 xfs_buf_t *bp;
161 int bucket;
162 int dpct;
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500163 int error, saved_error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 xfs_agnumber_t nagcount;
165 xfs_agnumber_t nagimax = 0;
166 xfs_rfsblock_t nb, nb_mod;
167 xfs_rfsblock_t new;
168 xfs_rfsblock_t nfree;
169 xfs_agnumber_t oagcount;
170 int pct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 xfs_trans_t *tp;
172
173 nb = in->newblocks;
174 pct = in->imaxpct;
175 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
Dave Chinner24513372014-06-25 14:58:08 +1000176 return -EINVAL;
Nathan Scott4cc929e2007-05-14 18:24:02 +1000177 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
178 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 dpct = pct - mp->m_sb.sb_imax_pct;
Dave Chinnerba372672014-10-02 09:05:32 +1000180 error = xfs_buf_read_uncached(mp->m_ddev_targp,
Dave Chinner1922c942010-09-22 10:47:20 +1000181 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
Dave Chinnerba372672014-10-02 09:05:32 +1000182 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
183 if (error)
Dave Chinnereab4e632012-11-12 22:54:02 +1100184 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 xfs_buf_relse(bp);
186
187 new = nb; /* use new as a temporary here */
188 nb_mod = do_div(new, mp->m_sb.sb_agblocks);
189 nagcount = new + (nb_mod != 0);
190 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
191 nagcount--;
Eric Sandeene6da7c92009-05-23 14:30:12 -0500192 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 if (nb < mp->m_sb.sb_dblocks)
Dave Chinner24513372014-06-25 14:58:08 +1000194 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196 new = nb - mp->m_sb.sb_dblocks;
197 oagcount = mp->m_sb.sb_agcount;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000198
199 /* allocate the new per-ag structures */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 if (nagcount > oagcount) {
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000201 error = xfs_initialize_perag(mp, nagcount, &nagimax);
202 if (error)
203 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 }
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000205
Christoph Hellwig253f4912016-04-06 09:19:55 +1000206 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
207 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
208 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000211 /*
212 * Write new AG headers to disk. Non-transactional, but written
213 * synchronously so they are completed prior to the growfs transaction
214 * being logged.
215 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 nfree = 0;
217 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
Dave Chinnerf94c4452013-11-21 15:41:06 +1100218 __be32 *agfl_bno;
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 /*
Dave Chinnerde497682012-11-12 22:54:00 +1100221 * AG freespace header block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 */
Dave Chinnerfd236832012-11-12 22:53:59 +1100223 bp = xfs_growfs_get_hdr_buf(mp,
224 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
Dave Chinner1813dd62012-11-14 17:54:40 +1100225 XFS_FSS_TO_BB(mp, 1), 0,
226 &xfs_agf_buf_ops);
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000227 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000228 error = -ENOMEM;
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000229 goto error0;
230 }
Dave Chinnerfd236832012-11-12 22:53:59 +1100231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 agf = XFS_BUF_TO_AGF(bp);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100233 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
234 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
235 agf->agf_seqno = cpu_to_be32(agno);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 if (agno == nagcount - 1)
237 agsize =
238 nb -
239 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
240 else
241 agsize = mp->m_sb.sb_agblocks;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100242 agf->agf_length = cpu_to_be32(agsize);
243 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
244 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
245 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
246 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
Darrick J. Wonge70d8292016-08-03 11:36:08 +1000247 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
248 agf->agf_roots[XFS_BTNUM_RMAPi] =
249 cpu_to_be32(XFS_RMAP_BLOCK(mp));
250 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
Darrick J. Wongf32866fd2016-08-17 08:31:49 +1000251 agf->agf_rmap_blocks = cpu_to_be32(1);
Darrick J. Wonge70d8292016-08-03 11:36:08 +1000252 }
253
Dave Chinnerad747e32016-04-06 07:06:20 +1000254 agf->agf_flfirst = cpu_to_be32(1);
255 agf->agf_fllast = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 agf->agf_flcount = 0;
Darrick J. Wong80180262016-08-03 11:31:47 +1000257 tmpsize = agsize - mp->m_ag_prealloc_blocks;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100258 agf->agf_freeblks = cpu_to_be32(tmpsize);
259 agf->agf_longest = cpu_to_be32(tmpsize);
Dave Chinner4e0e6042013-04-03 16:11:13 +1100260 if (xfs_sb_version_hascrc(&mp->m_sb))
Dave Chinnerac383de2015-08-19 10:31:41 +1000261 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
Darrick J. Wongac4fef62016-10-03 09:11:18 -0700262 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
263 agf->agf_refcount_root = cpu_to_be32(
264 xfs_refc_block(mp));
265 agf->agf_refcount_level = cpu_to_be32(1);
266 agf->agf_refcount_blocks = cpu_to_be32(1);
267 }
Dave Chinner4e0e6042013-04-03 16:11:13 +1100268
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000269 error = xfs_bwrite(bp);
270 xfs_buf_relse(bp);
271 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 goto error0;
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 /*
Dave Chinnerde497682012-11-12 22:54:00 +1100275 * AG freelist header block
276 */
277 bp = xfs_growfs_get_hdr_buf(mp,
278 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
Dave Chinner1813dd62012-11-14 17:54:40 +1100279 XFS_FSS_TO_BB(mp, 1), 0,
280 &xfs_agfl_buf_ops);
Dave Chinnerde497682012-11-12 22:54:00 +1100281 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000282 error = -ENOMEM;
Dave Chinnerde497682012-11-12 22:54:00 +1100283 goto error0;
284 }
285
286 agfl = XFS_BUF_TO_AGFL(bp);
Christoph Hellwig77c95bb2013-04-03 16:11:14 +1100287 if (xfs_sb_version_hascrc(&mp->m_sb)) {
288 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
289 agfl->agfl_seqno = cpu_to_be32(agno);
Dave Chinnerac383de2015-08-19 10:31:41 +1000290 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
Christoph Hellwig77c95bb2013-04-03 16:11:14 +1100291 }
Dave Chinnerf94c4452013-11-21 15:41:06 +1100292
293 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
Dave Chinnerde497682012-11-12 22:54:00 +1100294 for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
Dave Chinnerf94c4452013-11-21 15:41:06 +1100295 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
Dave Chinnerde497682012-11-12 22:54:00 +1100296
297 error = xfs_bwrite(bp);
298 xfs_buf_relse(bp);
299 if (error)
300 goto error0;
301
302 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 * AG inode header block
304 */
Dave Chinnerfd236832012-11-12 22:53:59 +1100305 bp = xfs_growfs_get_hdr_buf(mp,
306 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
Dave Chinner1813dd62012-11-14 17:54:40 +1100307 XFS_FSS_TO_BB(mp, 1), 0,
308 &xfs_agi_buf_ops);
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000309 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000310 error = -ENOMEM;
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000311 goto error0;
312 }
Dave Chinnerfd236832012-11-12 22:53:59 +1100313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 agi = XFS_BUF_TO_AGI(bp);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100315 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
316 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
317 agi->agi_seqno = cpu_to_be32(agno);
318 agi->agi_length = cpu_to_be32(agsize);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 agi->agi_count = 0;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100320 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
321 agi->agi_level = cpu_to_be32(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 agi->agi_freecount = 0;
Christoph Hellwig16259e72005-11-02 15:11:25 +1100323 agi->agi_newino = cpu_to_be32(NULLAGINO);
324 agi->agi_dirino = cpu_to_be32(NULLAGINO);
Dave Chinner983d09f2013-04-03 16:11:15 +1100325 if (xfs_sb_version_hascrc(&mp->m_sb))
Dave Chinnerac383de2015-08-19 10:31:41 +1000326 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
Brian Fostera3fa5162014-04-24 16:01:39 +1000327 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
328 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
329 agi->agi_free_level = cpu_to_be32(1);
330 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
Christoph Hellwig16259e72005-11-02 15:11:25 +1100332 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
Dave Chinner983d09f2013-04-03 16:11:15 +1100333
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000334 error = xfs_bwrite(bp);
335 xfs_buf_relse(bp);
336 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 goto error0;
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 /*
340 * BNO btree root block
341 */
Dave Chinnerfd236832012-11-12 22:53:59 +1100342 bp = xfs_growfs_get_hdr_buf(mp,
343 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
Dave Chinner1813dd62012-11-14 17:54:40 +1100344 BTOBB(mp->m_sb.sb_blocksize), 0,
345 &xfs_allocbt_buf_ops);
Dave Chinnerfd236832012-11-12 22:53:59 +1100346
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000347 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000348 error = -ENOMEM;
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000349 goto error0;
350 }
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600351
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500352 if (xfs_sb_version_hascrc(&mp->m_sb))
353 xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1,
354 agno, XFS_BTREE_CRC_BLOCKS);
355 else
356 xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1,
357 agno, 0);
358
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600359 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
Darrick J. Wong80180262016-08-03 11:31:47 +1000360 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100361 arec->ar_blockcount = cpu_to_be32(
362 agsize - be32_to_cpu(arec->ar_startblock));
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600363
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000364 error = xfs_bwrite(bp);
365 xfs_buf_relse(bp);
366 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 goto error0;
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /*
370 * CNT btree root block
371 */
Dave Chinnerfd236832012-11-12 22:53:59 +1100372 bp = xfs_growfs_get_hdr_buf(mp,
373 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
Dave Chinner1813dd62012-11-14 17:54:40 +1100374 BTOBB(mp->m_sb.sb_blocksize), 0,
375 &xfs_allocbt_buf_ops);
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000376 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000377 error = -ENOMEM;
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000378 goto error0;
379 }
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600380
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500381 if (xfs_sb_version_hascrc(&mp->m_sb))
382 xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1,
383 agno, XFS_BTREE_CRC_BLOCKS);
384 else
385 xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1,
386 agno, 0);
387
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600388 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
Darrick J. Wong80180262016-08-03 11:31:47 +1000389 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100390 arec->ar_blockcount = cpu_to_be32(
391 agsize - be32_to_cpu(arec->ar_startblock));
392 nfree += be32_to_cpu(arec->ar_blockcount);
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600393
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000394 error = xfs_bwrite(bp);
395 xfs_buf_relse(bp);
396 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 goto error0;
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000398
Darrick J. Wonge70d8292016-08-03 11:36:08 +1000399 /* RMAP btree root block */
400 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
401 struct xfs_rmap_rec *rrec;
402 struct xfs_btree_block *block;
403
404 bp = xfs_growfs_get_hdr_buf(mp,
405 XFS_AGB_TO_DADDR(mp, agno, XFS_RMAP_BLOCK(mp)),
406 BTOBB(mp->m_sb.sb_blocksize), 0,
407 &xfs_rmapbt_buf_ops);
408 if (!bp) {
409 error = -ENOMEM;
410 goto error0;
411 }
412
413 xfs_btree_init_block(mp, bp, XFS_RMAP_CRC_MAGIC, 0, 0,
414 agno, XFS_BTREE_CRC_BLOCKS);
415 block = XFS_BUF_TO_BLOCK(bp);
416
417
418 /*
419 * mark the AG header regions as static metadata The BNO
420 * btree block is the first block after the headers, so
421 * it's location defines the size of region the static
422 * metadata consumes.
423 *
424 * Note: unlike mkfs, we never have to account for log
425 * space when growing the data regions
426 */
427 rrec = XFS_RMAP_REC_ADDR(block, 1);
428 rrec->rm_startblock = 0;
429 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
430 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
431 rrec->rm_offset = 0;
432 be16_add_cpu(&block->bb_numrecs, 1);
433
434 /* account freespace btree root blocks */
435 rrec = XFS_RMAP_REC_ADDR(block, 2);
436 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
437 rrec->rm_blockcount = cpu_to_be32(2);
438 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
439 rrec->rm_offset = 0;
440 be16_add_cpu(&block->bb_numrecs, 1);
441
442 /* account inode btree root blocks */
443 rrec = XFS_RMAP_REC_ADDR(block, 3);
444 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
445 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
446 XFS_IBT_BLOCK(mp));
447 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
448 rrec->rm_offset = 0;
449 be16_add_cpu(&block->bb_numrecs, 1);
450
451 /* account for rmap btree root */
452 rrec = XFS_RMAP_REC_ADDR(block, 4);
453 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
454 rrec->rm_blockcount = cpu_to_be32(1);
455 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
456 rrec->rm_offset = 0;
457 be16_add_cpu(&block->bb_numrecs, 1);
458
Darrick J. Wongac4fef62016-10-03 09:11:18 -0700459 /* account for refc btree root */
460 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
461 rrec = XFS_RMAP_REC_ADDR(block, 5);
462 rrec->rm_startblock = cpu_to_be32(
463 xfs_refc_block(mp));
464 rrec->rm_blockcount = cpu_to_be32(1);
465 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
466 rrec->rm_offset = 0;
467 be16_add_cpu(&block->bb_numrecs, 1);
468 }
469
Darrick J. Wonge70d8292016-08-03 11:36:08 +1000470 error = xfs_bwrite(bp);
471 xfs_buf_relse(bp);
472 if (error)
473 goto error0;
474 }
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 /*
477 * INO btree root block
478 */
Dave Chinnerfd236832012-11-12 22:53:59 +1100479 bp = xfs_growfs_get_hdr_buf(mp,
480 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
Dave Chinner1813dd62012-11-14 17:54:40 +1100481 BTOBB(mp->m_sb.sb_blocksize), 0,
482 &xfs_inobt_buf_ops);
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000483 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000484 error = -ENOMEM;
Chandra Seetharamanb5229502011-09-07 19:37:54 +0000485 goto error0;
486 }
Dave Chinnerfd236832012-11-12 22:53:59 +1100487
Christoph Hellwigee1a47a2013-04-21 14:53:46 -0500488 if (xfs_sb_version_hascrc(&mp->m_sb))
489 xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0,
490 agno, XFS_BTREE_CRC_BLOCKS);
491 else
492 xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0,
493 agno, 0);
Dave Chinnerb64f3a32012-11-13 16:40:27 -0600494
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000495 error = xfs_bwrite(bp);
496 xfs_buf_relse(bp);
497 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 goto error0;
Brian Fostera3fa5162014-04-24 16:01:39 +1000499
500 /*
501 * FINO btree root block
502 */
503 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
504 bp = xfs_growfs_get_hdr_buf(mp,
505 XFS_AGB_TO_DADDR(mp, agno, XFS_FIBT_BLOCK(mp)),
506 BTOBB(mp->m_sb.sb_blocksize), 0,
507 &xfs_inobt_buf_ops);
508 if (!bp) {
Dave Chinner24513372014-06-25 14:58:08 +1000509 error = -ENOMEM;
Brian Fostera3fa5162014-04-24 16:01:39 +1000510 goto error0;
511 }
512
513 if (xfs_sb_version_hascrc(&mp->m_sb))
514 xfs_btree_init_block(mp, bp, XFS_FIBT_CRC_MAGIC,
515 0, 0, agno,
516 XFS_BTREE_CRC_BLOCKS);
517 else
518 xfs_btree_init_block(mp, bp, XFS_FIBT_MAGIC, 0,
519 0, agno, 0);
520
521 error = xfs_bwrite(bp);
522 xfs_buf_relse(bp);
523 if (error)
524 goto error0;
525 }
526
Darrick J. Wongac4fef62016-10-03 09:11:18 -0700527 /*
528 * refcount btree root block
529 */
530 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
531 bp = xfs_growfs_get_hdr_buf(mp,
532 XFS_AGB_TO_DADDR(mp, agno, xfs_refc_block(mp)),
533 BTOBB(mp->m_sb.sb_blocksize), 0,
534 &xfs_refcountbt_buf_ops);
535 if (!bp) {
536 error = -ENOMEM;
537 goto error0;
538 }
539
540 xfs_btree_init_block(mp, bp, XFS_REFC_CRC_MAGIC,
541 0, 0, agno,
542 XFS_BTREE_CRC_BLOCKS);
543
544 error = xfs_bwrite(bp);
545 xfs_buf_relse(bp);
546 if (error)
547 goto error0;
548 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 }
550 xfs_trans_agblocks_delta(tp, nfree);
551 /*
552 * There are new blocks in the old last a.g.
553 */
554 if (new) {
Darrick J. Wong340785c2016-08-03 11:33:42 +1000555 struct xfs_owner_info oinfo;
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 /*
558 * Change the agi length.
559 */
560 error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
561 if (error) {
562 goto error0;
563 }
564 ASSERT(bp);
565 agi = XFS_BUF_TO_AGI(bp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800566 be32_add_cpu(&agi->agi_length, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 ASSERT(nagcount == oagcount ||
Christoph Hellwig16259e72005-11-02 15:11:25 +1100568 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
570 /*
571 * Change agf length.
572 */
573 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
574 if (error) {
575 goto error0;
576 }
577 ASSERT(bp);
578 agf = XFS_BUF_TO_AGF(bp);
Marcin Slusarz413d57c2008-02-13 15:03:29 -0800579 be32_add_cpu(&agf->agf_length, new);
Christoph Hellwig16259e72005-11-02 15:11:25 +1100580 ASSERT(be32_to_cpu(agf->agf_length) ==
581 be32_to_cpu(agi->agi_length));
Christoph Hellwig0b1b2132009-12-14 23:14:59 +0000582
Tim Shimmin0164af52007-06-18 16:50:08 +1000583 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
Darrick J. Wong340785c2016-08-03 11:33:42 +1000584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 /*
586 * Free the new space.
Darrick J. Wong340785c2016-08-03 11:33:42 +1000587 *
588 * XFS_RMAP_OWN_NULL is used here to tell the rmap btree that
589 * this doesn't actually exist in the rmap btree.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 */
Darrick J. Wong340785c2016-08-03 11:33:42 +1000591 xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
592 error = xfs_free_extent(tp,
593 XFS_AGB_TO_FSB(mp, agno,
594 be32_to_cpu(agf->agf_length) - new),
Darrick J. Wong3fd129b2016-09-19 10:30:52 +1000595 new, &oinfo, XFS_AG_RESV_NONE);
Darrick J. Wong340785c2016-08-03 11:33:42 +1000596 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 goto error0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000599
600 /*
601 * Update changed superblock fields transactionally. These are not
602 * seen by the rest of the world until the transaction commit applies
603 * them atomically to the superblock.
604 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 if (nagcount > oagcount)
606 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
607 if (nb > mp->m_sb.sb_dblocks)
608 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
609 nb - mp->m_sb.sb_dblocks);
610 if (nfree)
611 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
612 if (dpct)
613 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
Christoph Hellwigf8079b82015-02-05 11:13:21 +1100614 xfs_trans_set_sync(tp);
Christoph Hellwig70393312015-06-04 13:48:08 +1000615 error = xfs_trans_commit(tp);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000616 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 return error;
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000618
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 /* New allocation groups fully initialized, so update mount struct */
620 if (nagimax)
621 mp->m_maxagi = nagimax;
622 if (mp->m_sb.sb_imax_pct) {
623 __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
624 do_div(icount, 100);
625 mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
626 } else
627 mp->m_maxicount = 0;
Dave Chinner055388a2011-01-04 11:35:03 +1100628 xfs_set_low_space_thresholds(mp);
Darrick J. Wong52548852016-08-03 11:38:24 +1000629 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
Dave Chinner1c1c6eb2010-01-11 11:47:44 +0000630
631 /* update secondary superblocks. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 for (agno = 1; agno < nagcount; agno++) {
Dave Chinner1375cb62012-10-09 14:50:52 +1100633 error = 0;
634 /*
635 * new secondary superblocks need to be zeroed, not read from
636 * disk as the contents of the new area we are growing into is
637 * completely unknown.
638 */
639 if (agno < oagcount) {
640 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
Dave Chinner98021822012-11-12 22:54:03 +1100642 XFS_FSS_TO_BB(mp, 1), 0, &bp,
Dave Chinner1813dd62012-11-14 17:54:40 +1100643 &xfs_sb_buf_ops);
Dave Chinner1375cb62012-10-09 14:50:52 +1100644 } else {
645 bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
646 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
647 XFS_FSS_TO_BB(mp, 1), 0);
Dave Chinnerb0f539d2012-11-14 17:53:49 +1100648 if (bp) {
Dave Chinner1813dd62012-11-14 17:54:40 +1100649 bp->b_ops = &xfs_sb_buf_ops;
Dave Chinner1375cb62012-10-09 14:50:52 +1100650 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
Dave Chinnerb0f539d2012-11-14 17:53:49 +1100651 } else
Dave Chinner24513372014-06-25 14:58:08 +1000652 error = -ENOMEM;
Dave Chinner1375cb62012-10-09 14:50:52 +1100653 }
654
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500655 /*
656 * If we get an error reading or writing alternate superblocks,
657 * continue. xfs_repair chooses the "best" superblock based
658 * on most matches; if we break early, we'll leave more
659 * superblocks un-updated than updated, and xfs_repair may
660 * pick them over the properly-updated primary.
661 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 if (error) {
Dave Chinner53487782011-03-07 10:05:35 +1100663 xfs_warn(mp,
664 "error %d reading secondary superblock for ag %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 error, agno);
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500666 saved_error = error;
667 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 }
Dave Chinner4d11a402015-01-22 09:10:26 +1100669 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
Dave Chinner98021822012-11-12 22:54:03 +1100670
Christoph Hellwigc2b006c2011-08-23 08:28:07 +0000671 error = xfs_bwrite(bp);
672 xfs_buf_relse(bp);
673 if (error) {
Dave Chinner53487782011-03-07 10:05:35 +1100674 xfs_warn(mp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 "write error %d updating secondary superblock for ag %d",
676 error, agno);
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500677 saved_error = error;
678 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
680 }
Eric Sandeen59e5a0e2013-10-11 14:14:05 -0500681 return saved_error ? saved_error : error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 error0:
Christoph Hellwig4906e212015-06-04 13:47:56 +1000684 xfs_trans_cancel(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return error;
686}
687
688static int
689xfs_growfs_log_private(
690 xfs_mount_t *mp, /* mount point for filesystem */
691 xfs_growfs_log_t *in) /* growfs log input struct */
692{
693 xfs_extlen_t nb;
694
695 nb = in->newblocks;
696 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
Dave Chinner24513372014-06-25 14:58:08 +1000697 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 if (nb == mp->m_sb.sb_logblocks &&
699 in->isint == (mp->m_sb.sb_logstart != 0))
Dave Chinner24513372014-06-25 14:58:08 +1000700 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 /*
702 * Moving the log is hard, need new interfaces to sync
703 * the log first, hold off all activity while moving it.
704 * Can have shorter or longer log in the same space,
705 * or transform internal to external log or vice versa.
706 */
Dave Chinner24513372014-06-25 14:58:08 +1000707 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708}
709
710/*
711 * protected versions of growfs function acquire and release locks on the mount
712 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
713 * XFS_IOC_FSGROWFSRT
714 */
715
716
717int
718xfs_growfs_data(
719 xfs_mount_t *mp,
720 xfs_growfs_data_t *in)
721{
722 int error;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600723
724 if (!capable(CAP_SYS_ADMIN))
Dave Chinner24513372014-06-25 14:58:08 +1000725 return -EPERM;
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000726 if (!mutex_trylock(&mp->m_growlock))
Dave Chinner24513372014-06-25 14:58:08 +1000727 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 error = xfs_growfs_data_private(mp, in);
Christoph Hellwig52785112015-02-16 11:49:23 +1100729 /*
730 * Increment the generation unconditionally, the error could be from
731 * updating the secondary superblocks, in which case the new size
732 * is live already.
733 */
734 mp->m_generation++;
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000735 mutex_unlock(&mp->m_growlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 return error;
737}
738
739int
740xfs_growfs_log(
741 xfs_mount_t *mp,
742 xfs_growfs_log_t *in)
743{
744 int error;
sandeen@sandeen.net743bb4652008-11-25 21:20:06 -0600745
746 if (!capable(CAP_SYS_ADMIN))
Dave Chinner24513372014-06-25 14:58:08 +1000747 return -EPERM;
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000748 if (!mutex_trylock(&mp->m_growlock))
Dave Chinner24513372014-06-25 14:58:08 +1000749 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 error = xfs_growfs_log_private(mp, in);
Christoph Hellwigcc92e7a2007-08-30 17:21:54 +1000751 mutex_unlock(&mp->m_growlock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 return error;
753}
754
755/*
756 * exported through ioctl XFS_IOC_FSCOUNTS
757 */
758
759int
760xfs_fs_counts(
761 xfs_mount_t *mp,
762 xfs_fsop_counts_t *cnt)
763{
Dave Chinner501ab322015-02-23 21:19:28 +1100764 cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
Dave Chinnere88b64e2015-02-23 21:19:53 +1100765 cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
Dave Chinner0d485ad2015-02-23 21:22:03 +1100766 cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
Darrick J. Wong52548852016-08-03 11:38:24 +1000767 mp->m_alloc_set_aside;
Dave Chinner501ab322015-02-23 21:19:28 +1100768
Eric Sandeen3685c2a2007-10-11 17:42:32 +1000769 spin_lock(&mp->m_sb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 cnt->freertx = mp->m_sb.sb_frextents;
Eric Sandeen3685c2a2007-10-11 17:42:32 +1000771 spin_unlock(&mp->m_sb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 return 0;
773}
774
775/*
776 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
777 *
778 * xfs_reserve_blocks is called to set m_resblks
779 * in the in-core mount table. The number of unused reserved blocks
Nathan Scottc41564b2006-03-29 08:55:14 +1000780 * is kept in m_resblks_avail.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 *
782 * Reserve the requested number of blocks if available. Otherwise return
783 * as many as possible to satisfy the request. The actual number
784 * reserved are returned in outval
785 *
786 * A null inval pointer indicates that only the current reserved blocks
787 * available should be returned no settings are changed.
788 */
789
790int
791xfs_reserve_blocks(
792 xfs_mount_t *mp,
793 __uint64_t *inval,
794 xfs_fsop_resblks_t *outval)
795{
Brian Foster408fd482016-06-21 11:53:28 +1000796 __int64_t lcounter, delta;
797 __int64_t fdblks_delta = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 __uint64_t request;
Brian Foster408fd482016-06-21 11:53:28 +1000799 __int64_t free;
800 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 /* If inval is null, report current values and return */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 if (inval == (__uint64_t *)NULL) {
David Chinner84e1e992007-06-18 16:50:27 +1000804 if (!outval)
Dave Chinner24513372014-06-25 14:58:08 +1000805 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 outval->resblks = mp->m_resblks;
807 outval->resblks_avail = mp->m_resblks_avail;
Jesper Juhl014c2542006-01-15 02:37:08 +0100808 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 }
810
811 request = *inval;
David Chinnerdbcabad2007-02-10 18:36:17 +1100812
813 /*
Brian Foster408fd482016-06-21 11:53:28 +1000814 * With per-cpu counters, this becomes an interesting problem. we need
815 * to work out if we are freeing or allocation blocks first, then we can
816 * do the modification as necessary.
David Chinnerdbcabad2007-02-10 18:36:17 +1100817 *
Brian Foster408fd482016-06-21 11:53:28 +1000818 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
819 * hold out any changes while we work out what to do. This means that
820 * the amount of free space can change while we do this, so we need to
821 * retry if we end up trying to reserve more space than is available.
David Chinnerdbcabad2007-02-10 18:36:17 +1100822 */
Eric Sandeen3685c2a2007-10-11 17:42:32 +1000823 spin_lock(&mp->m_sb_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825 /*
826 * If our previous reservation was larger than the current value,
Brian Foster408fd482016-06-21 11:53:28 +1000827 * then move any unused blocks back to the free pool. Modify the resblks
828 * counters directly since we shouldn't have any problems unreserving
829 * space.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 if (mp->m_resblks > request) {
832 lcounter = mp->m_resblks_avail - request;
833 if (lcounter > 0) { /* release unused blocks */
David Chinnerdbcabad2007-02-10 18:36:17 +1100834 fdblks_delta = lcounter;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 mp->m_resblks_avail -= lcounter;
836 }
837 mp->m_resblks = request;
Brian Foster408fd482016-06-21 11:53:28 +1000838 if (fdblks_delta) {
839 spin_unlock(&mp->m_sb_lock);
840 error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
841 spin_lock(&mp->m_sb_lock);
842 }
David Chinner4be536d2006-09-07 14:26:50 +1000843
Brian Foster408fd482016-06-21 11:53:28 +1000844 goto out;
845 }
846
847 /*
848 * If the request is larger than the current reservation, reserve the
849 * blocks before we update the reserve counters. Sample m_fdblocks and
850 * perform a partial reservation if the request exceeds free space.
851 */
852 error = -ENOSPC;
853 do {
Dave Chinner0d485ad2015-02-23 21:22:03 +1100854 free = percpu_counter_sum(&mp->m_fdblocks) -
Darrick J. Wong52548852016-08-03 11:38:24 +1000855 mp->m_alloc_set_aside;
David Chinnerdbcabad2007-02-10 18:36:17 +1100856 if (!free)
Brian Foster408fd482016-06-21 11:53:28 +1000857 break;
David Chinnerdbcabad2007-02-10 18:36:17 +1100858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 delta = request - mp->m_resblks;
David Chinner4be536d2006-09-07 14:26:50 +1000860 lcounter = free - delta;
Brian Foster408fd482016-06-21 11:53:28 +1000861 if (lcounter < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 /* We can't satisfy the request, just get what we can */
Brian Foster408fd482016-06-21 11:53:28 +1000863 fdblks_delta = free;
864 else
865 fdblks_delta = delta;
866
867 /*
868 * We'll either succeed in getting space from the free block
869 * count or we'll get an ENOSPC. If we get a ENOSPC, it means
870 * things changed while we were calculating fdblks_delta and so
871 * we should try again to see if there is anything left to
872 * reserve.
873 *
874 * Don't set the reserved flag here - we don't want to reserve
875 * the extra reserve blocks from the reserve.....
876 */
877 spin_unlock(&mp->m_sb_lock);
878 error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
879 spin_lock(&mp->m_sb_lock);
880 } while (error == -ENOSPC);
881
882 /*
883 * Update the reserve counters if blocks have been successfully
884 * allocated.
885 */
886 if (!error && fdblks_delta) {
887 mp->m_resblks += fdblks_delta;
888 mp->m_resblks_avail += fdblks_delta;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
Brian Foster408fd482016-06-21 11:53:28 +1000890
David Chinnerdbcabad2007-02-10 18:36:17 +1100891out:
David Chinner84e1e992007-06-18 16:50:27 +1000892 if (outval) {
893 outval->resblks = mp->m_resblks;
894 outval->resblks_avail = mp->m_resblks_avail;
895 }
David Chinnerdbcabad2007-02-10 18:36:17 +1100896
Brian Foster408fd482016-06-21 11:53:28 +1000897 spin_unlock(&mp->m_sb_lock);
898 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899}
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901int
902xfs_fs_goingdown(
903 xfs_mount_t *mp,
904 __uint32_t inflags)
905{
906 switch (inflags) {
907 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
Christoph Hellwigb267ce92007-08-30 17:21:30 +1000908 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909
Christoph Hellwigf33c6792005-11-25 16:41:47 +1100910 if (sb && !IS_ERR(sb)) {
Nathan Scott7d04a332006-06-09 14:58:38 +1000911 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 thaw_bdev(sb->s_bdev, sb);
913 }
Barry Naujok189f4bf2008-05-21 16:58:55 +1000914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 break;
916 }
917 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
Nathan Scott7d04a332006-06-09 14:58:38 +1000918 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 break;
920 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
Nathan Scott7d04a332006-06-09 14:58:38 +1000921 xfs_force_shutdown(mp,
922 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 break;
924 default:
Dave Chinner24513372014-06-25 14:58:08 +1000925 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 }
927
928 return 0;
929}
Dave Chinner2af51f32012-04-23 15:59:03 +1000930
931/*
932 * Force a shutdown of the filesystem instantly while keeping the filesystem
933 * consistent. We don't do an unmount here; just shutdown the shop, make sure
934 * that absolutely nothing persistent happens to this filesystem after this
935 * point.
936 */
937void
938xfs_do_force_shutdown(
939 xfs_mount_t *mp,
940 int flags,
941 char *fname,
942 int lnnum)
943{
944 int logerror;
945
946 logerror = flags & SHUTDOWN_LOG_IO_ERROR;
947
948 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
949 xfs_notice(mp,
950 "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
951 __func__, flags, lnnum, fname, __return_address);
952 }
953 /*
954 * No need to duplicate efforts.
955 */
956 if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
957 return;
958
959 /*
960 * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
961 * queue up anybody new on the log reservations, and wakes up
962 * everybody who's sleeping on log reservations to tell them
963 * the bad news.
964 */
965 if (xfs_log_force_umount(mp, logerror))
966 return;
967
968 if (flags & SHUTDOWN_CORRUPT_INCORE) {
969 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
970 "Corruption of in-memory data detected. Shutting down filesystem");
971 if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
972 xfs_stack_trace();
973 } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
974 if (logerror) {
975 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
976 "Log I/O Error Detected. Shutting down filesystem");
977 } else if (flags & SHUTDOWN_DEVICE_REQ) {
978 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
979 "All device paths lost. Shutting down filesystem");
980 } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
981 xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
982 "I/O Error Detected. Shutting down filesystem");
983 }
984 }
985 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
986 xfs_alert(mp,
987 "Please umount the filesystem and rectify the problem(s)");
988 }
989}