blob: 475985d14758cc12a59c366552038928cebed348 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonfe6c9912008-01-28 11:13:02 -06003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
Joe Perchesd77d1b52014-03-06 12:10:45 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
David Teiglandb3b94fa2006-01-16 16:50:04 +000012#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/buffer_head.h>
Steven Whitehousef42faf42006-01-30 18:34:10 +000016#include <linux/fs.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050017#include <linux/gfs2_ondisk.h>
Bob Peterson1f466a42008-03-10 18:17:47 -050018#include <linux/prefetch.h>
Steven Whitehousef15ab562009-02-09 09:25:01 +000019#include <linux/blkdev.h>
Bob Peterson7c9ca622011-08-31 09:53:19 +010020#include <linux/rbtree.h>
Steven Whitehouse9dbe9612012-10-31 10:37:10 +000021#include <linux/random.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000022
23#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050024#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000025#include "glock.h"
26#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000027#include "lops.h"
28#include "meta_io.h"
29#include "quota.h"
30#include "rgrp.h"
31#include "super.h"
32#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050033#include "util.h"
Benjamin Marzinski172e0452007-03-23 14:51:56 -060034#include "log.h"
Steven Whitehousec8cdf472007-06-08 10:05:33 +010035#include "inode.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010036#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000037
Steven Whitehouse2c1e52a2006-09-05 15:41:57 -040038#define BFITNOENT ((u32)~0)
Bob Peterson6760bdc2007-07-24 14:09:32 -050039#define NO_BLOCK ((u64)~0)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040040
Bob Peterson1f466a42008-03-10 18:17:47 -050041#if BITS_PER_LONG == 32
42#define LBITMASK (0x55555555UL)
43#define LBITSKIP55 (0x55555555UL)
44#define LBITSKIP00 (0x00000000UL)
45#else
46#define LBITMASK (0x5555555555555555UL)
47#define LBITSKIP55 (0x5555555555555555UL)
48#define LBITSKIP00 (0x0000000000000000UL)
49#endif
50
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040051/*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040054 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040060 */
61
Bob Peterson5ce13432013-11-06 10:55:52 -050062struct gfs2_extent {
63 struct gfs2_rbm rbm;
64 u32 len;
65};
66
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040067static const char valid_change[16] = {
68 /* current */
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040069 /* n */ 0, 1, 1, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040070 /* e */ 1, 0, 0, 0,
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040071 /* w */ 0, 0, 0, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040072 1, 0, 0, 0
73};
74
Bob Peterson5ce13432013-11-06 10:55:52 -050075static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
76 const struct gfs2_inode *ip, bool nowrap,
77 const struct gfs2_alloc_parms *ap);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +010078
79
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040080/**
81 * gfs2_setbit - Set a bit in the bitmaps
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010082 * @rbm: The position of the bit to set
83 * @do_clone: Also set the clone bitmap, if it exists
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040084 * @new_state: the new state of the block
85 *
86 */
87
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010088static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
Bob Peterson06344b92012-04-26 12:44:35 -040089 unsigned char new_state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040090{
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000091 unsigned char *byte1, *byte2, *end, cur_state;
Bob Petersone579ed42013-09-17 13:12:15 -040092 struct gfs2_bitmap *bi = rbm_bi(rbm);
93 unsigned int buflen = bi->bi_len;
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010094 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040095
Bob Petersone579ed42013-09-17 13:12:15 -040096 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
97 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040098
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000099 BUG_ON(byte1 >= end);
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400100
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000101 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400102
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000103 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
Joe Perchesd77d1b52014-03-06 12:10:45 -0800104 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
105 rbm->offset, cur_state, new_state);
106 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
107 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
108 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
109 bi->bi_offset, bi->bi_len);
Bob Peterson95c8e172011-03-22 10:49:12 -0400110 dump_stack();
Steven Whitehouse3e6339d2012-08-13 11:37:51 +0100111 gfs2_consist_rgrpd(rbm->rgd);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000112 return;
113 }
114 *byte1 ^= (cur_state ^ new_state) << bit;
115
Bob Petersone579ed42013-09-17 13:12:15 -0400116 if (do_clone && bi->bi_clone) {
117 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000118 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
119 *byte2 ^= (cur_state ^ new_state) << bit;
120 }
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400121}
122
123/**
124 * gfs2_testbit - test a bit in the bitmaps
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100125 * @rbm: The bit to test
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400126 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100127 * Returns: The two bit block state of the requested bit
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400128 */
129
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100130static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400131{
Bob Petersone579ed42013-09-17 13:12:15 -0400132 struct gfs2_bitmap *bi = rbm_bi(rbm);
133 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100134 const u8 *byte;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400135 unsigned int bit;
136
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100137 byte = buffer + (rbm->offset / GFS2_NBBY);
138 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400139
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100140 return (*byte >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400141}
142
143/**
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000144 * gfs2_bit_search
145 * @ptr: Pointer to bitmap data
146 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
147 * @state: The state we are searching for
148 *
149 * We xor the bitmap data with a patter which is the bitwise opposite
150 * of what we are looking for, this gives rise to a pattern of ones
151 * wherever there is a match. Since we have two bits per entry, we
152 * take this pattern, shift it down by one place and then and it with
153 * the original. All the even bit positions (0,2,4, etc) then represent
154 * successful matches, so we mask with 0x55555..... to remove the unwanted
155 * odd bit positions.
156 *
157 * This allows searching of a whole u64 at once (32 blocks) with a
158 * single test (on 64 bit arches).
159 */
160
161static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
162{
163 u64 tmp;
164 static const u64 search[] = {
Hannes Eder075ac442009-02-21 02:11:42 +0100165 [0] = 0xffffffffffffffffULL,
166 [1] = 0xaaaaaaaaaaaaaaaaULL,
167 [2] = 0x5555555555555555ULL,
168 [3] = 0x0000000000000000ULL,
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000169 };
170 tmp = le64_to_cpu(*ptr) ^ search[state];
171 tmp &= (tmp >> 1);
172 tmp &= mask;
173 return tmp;
174}
175
176/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400177 * rs_cmp - multi-block reservation range compare
178 * @blk: absolute file system block number of the new reservation
179 * @len: number of blocks in the new reservation
180 * @rs: existing reservation to compare against
181 *
182 * returns: 1 if the block range is beyond the reach of the reservation
183 * -1 if the block range is before the start of the reservation
184 * 0 if the block range overlaps with the reservation
185 */
186static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
187{
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100188 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400189
190 if (blk >= startblk + rs->rs_free)
191 return 1;
192 if (blk + len - 1 < startblk)
193 return -1;
194 return 0;
195}
196
197/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400198 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
199 * a block in a given allocation state.
Bob Peterson886b1412012-04-11 13:03:52 -0400200 * @buf: the buffer that holds the bitmaps
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000201 * @len: the length (in bytes) of the buffer
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400202 * @goal: start search at this block's bit-pair (within @buffer)
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000203 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400204 *
205 * Scope of @goal and returned block number is only within this bitmap buffer,
206 * not entire rgrp or filesystem. @buffer will be offset from the actual
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000207 * beginning of a bitmap block buffer, skipping any header structures, but
208 * headers are always a multiple of 64 bits long so that the buffer is
209 * always aligned to a 64 bit boundary.
210 *
211 * The size of the buffer is in bytes, but is it assumed that it is
Anand Gadiyarfd589a82009-07-16 17:13:03 +0200212 * always ok to read a complete multiple of 64 bits at the end
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000213 * of the block in case the end is no aligned to a natural boundary.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400214 *
215 * Return: the block number (bitmap buffer scope) that was found
216 */
217
Hannes Eder02ab1722009-02-21 02:12:05 +0100218static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
219 u32 goal, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400220{
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000221 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
222 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
223 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
224 u64 tmp;
Hannes Eder075ac442009-02-21 02:11:42 +0100225 u64 mask = 0x5555555555555555ULL;
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000226 u32 bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400227
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000228 /* Mask off bits we don't care about at the start of the search */
229 mask <<= spoint;
230 tmp = gfs2_bit_search(ptr, mask, state);
231 ptr++;
232 while(tmp == 0 && ptr < end) {
Hannes Eder075ac442009-02-21 02:11:42 +0100233 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000234 ptr++;
Bob Peterson1f466a42008-03-10 18:17:47 -0500235 }
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000236 /* Mask off any bits which are more than len bytes from the start */
237 if (ptr == end && (len & (sizeof(u64) - 1)))
238 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
239 /* Didn't find anything, so return */
240 if (tmp == 0)
241 return BFITNOENT;
242 ptr--;
Steven Whitehoused8bd5042009-04-23 08:54:02 +0100243 bit = __ffs64(tmp);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000244 bit /= 2; /* two bits per entry in the bitmap */
245 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400246}
247
248/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100249 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
250 * @rbm: The rbm with rgd already set correctly
251 * @block: The block number (filesystem relative)
252 *
253 * This sets the bi and offset members of an rbm based on a
254 * resource group and a filesystem relative block number. The
255 * resource group must be set in the rbm on entry, the bi and
256 * offset members will be set by this function.
257 *
258 * Returns: 0 on success, or an error code
259 */
260
261static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
262{
263 u64 rblock = block - rbm->rgd->rd_data0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100264
265 if (WARN_ON_ONCE(rblock > UINT_MAX))
266 return -EINVAL;
267 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
268 return -E2BIG;
269
Bob Petersone579ed42013-09-17 13:12:15 -0400270 rbm->bii = 0;
Bob Petersona68a0a32012-10-19 08:32:51 -0400271 rbm->offset = (u32)(rblock);
272 /* Check if the block is within the first block */
Bob Petersone579ed42013-09-17 13:12:15 -0400273 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
Bob Petersona68a0a32012-10-19 08:32:51 -0400274 return 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100275
Bob Petersona68a0a32012-10-19 08:32:51 -0400276 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
277 rbm->offset += (sizeof(struct gfs2_rgrp) -
278 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
Bob Petersone579ed42013-09-17 13:12:15 -0400279 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
280 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100281 return 0;
282}
283
284/**
Bob Peterson149ed7f2013-09-17 13:14:35 -0400285 * gfs2_rbm_incr - increment an rbm structure
286 * @rbm: The rbm with rgd already set correctly
287 *
288 * This function takes an existing rbm structure and increments it to the next
289 * viable block offset.
290 *
291 * Returns: If incrementing the offset would cause the rbm to go past the
292 * end of the rgrp, true is returned, otherwise false.
293 *
294 */
295
296static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
297{
298 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
299 rbm->offset++;
300 return false;
301 }
302 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
303 return true;
304
305 rbm->offset = 0;
306 rbm->bii++;
307 return false;
308}
309
310/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100311 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
312 * @rbm: Position to search (value/result)
313 * @n_unaligned: Number of unaligned blocks to check
314 * @len: Decremented for each block found (terminate on zero)
315 *
316 * Returns: true if a non-free block is encountered
317 */
318
319static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
320{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100321 u32 n;
322 u8 res;
323
324 for (n = 0; n < n_unaligned; n++) {
325 res = gfs2_testbit(rbm);
326 if (res != GFS2_BLKST_FREE)
327 return true;
328 (*len)--;
329 if (*len == 0)
330 return true;
Bob Peterson149ed7f2013-09-17 13:14:35 -0400331 if (gfs2_rbm_incr(rbm))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100332 return true;
333 }
334
335 return false;
336}
337
338/**
339 * gfs2_free_extlen - Return extent length of free blocks
Fabian Frederick27ff6a02014-07-02 22:05:27 +0200340 * @rrbm: Starting position
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100341 * @len: Max length to check
342 *
343 * Starting at the block specified by the rbm, see how many free blocks
344 * there are, not reading more than len blocks ahead. This can be done
345 * using memchr_inv when the blocks are byte aligned, but has to be done
346 * on a block by block basis in case of unaligned blocks. Also this
347 * function can cope with bitmap boundaries (although it must stop on
348 * a resource group boundary)
349 *
350 * Returns: Number of free blocks in the extent
351 */
352
353static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
354{
355 struct gfs2_rbm rbm = *rrbm;
356 u32 n_unaligned = rbm.offset & 3;
357 u32 size = len;
358 u32 bytes;
359 u32 chunk_size;
360 u8 *ptr, *start, *end;
361 u64 block;
Bob Petersone579ed42013-09-17 13:12:15 -0400362 struct gfs2_bitmap *bi;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100363
364 if (n_unaligned &&
365 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
366 goto out;
367
Bob Peterson37015302012-09-12 09:40:31 -0400368 n_unaligned = len & 3;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100369 /* Start is now byte aligned */
370 while (len > 3) {
Bob Petersone579ed42013-09-17 13:12:15 -0400371 bi = rbm_bi(&rbm);
372 start = bi->bi_bh->b_data;
373 if (bi->bi_clone)
374 start = bi->bi_clone;
375 end = start + bi->bi_bh->b_size;
376 start += bi->bi_offset;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100377 BUG_ON(rbm.offset & 3);
378 start += (rbm.offset / GFS2_NBBY);
379 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
380 ptr = memchr_inv(start, 0, bytes);
381 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
382 chunk_size *= GFS2_NBBY;
383 BUG_ON(len < chunk_size);
384 len -= chunk_size;
385 block = gfs2_rbm_to_block(&rbm);
Bob Peterson15bd50a2012-12-20 13:21:07 -0500386 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
387 n_unaligned = 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100388 break;
Bob Peterson15bd50a2012-12-20 13:21:07 -0500389 }
390 if (ptr) {
391 n_unaligned = 3;
392 break;
393 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100394 n_unaligned = len & 3;
395 }
396
397 /* Deal with any bits left over at the end */
398 if (n_unaligned)
399 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
400out:
401 return size - len;
402}
403
404/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400405 * gfs2_bitcount - count the number of bits in a certain state
Bob Peterson886b1412012-04-11 13:03:52 -0400406 * @rgd: the resource group descriptor
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400407 * @buffer: the buffer that holds the bitmaps
408 * @buflen: the length (in bytes) of the buffer
409 * @state: the state of the block we're looking for
410 *
411 * Returns: The number of bits
412 */
413
Steven Whitehouse110acf32008-01-29 13:30:20 +0000414static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
415 unsigned int buflen, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400416{
Steven Whitehouse110acf32008-01-29 13:30:20 +0000417 const u8 *byte = buffer;
418 const u8 *end = buffer + buflen;
419 const u8 state1 = state << 2;
420 const u8 state2 = state << 4;
421 const u8 state3 = state << 6;
Steven Whitehousecd915492006-09-04 12:49:07 -0400422 u32 count = 0;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400423
424 for (; byte < end; byte++) {
425 if (((*byte) & 0x03) == state)
426 count++;
427 if (((*byte) & 0x0C) == state1)
428 count++;
429 if (((*byte) & 0x30) == state2)
430 count++;
431 if (((*byte) & 0xC0) == state3)
432 count++;
433 }
434
435 return count;
436}
437
David Teiglandb3b94fa2006-01-16 16:50:04 +0000438/**
439 * gfs2_rgrp_verify - Verify that a resource group is consistent
David Teiglandb3b94fa2006-01-16 16:50:04 +0000440 * @rgd: the rgrp
441 *
442 */
443
444void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
445{
446 struct gfs2_sbd *sdp = rgd->rd_sbd;
447 struct gfs2_bitmap *bi = NULL;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100448 u32 length = rgd->rd_length;
Steven Whitehousecd915492006-09-04 12:49:07 -0400449 u32 count[4], tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000450 int buf, x;
451
Steven Whitehousecd915492006-09-04 12:49:07 -0400452 memset(count, 0, 4 * sizeof(u32));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000453
454 /* Count # blocks in each of 4 possible allocation states */
455 for (buf = 0; buf < length; buf++) {
456 bi = rgd->rd_bits + buf;
457 for (x = 0; x < 4; x++)
458 count[x] += gfs2_bitcount(rgd,
459 bi->bi_bh->b_data +
460 bi->bi_offset,
461 bi->bi_len, x);
462 }
463
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000464 if (count[0] != rgd->rd_free) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000465 if (gfs2_consist_rgrpd(rgd))
466 fs_err(sdp, "free data mismatch: %u != %u\n",
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000467 count[0], rgd->rd_free);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000468 return;
469 }
470
Steven Whitehouse73f74942008-11-04 10:32:57 +0000471 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500472 if (count[1] != tmp) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000473 if (gfs2_consist_rgrpd(rgd))
474 fs_err(sdp, "used data mismatch: %u != %u\n",
475 count[1], tmp);
476 return;
477 }
478
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500479 if (count[2] + count[3] != rgd->rd_dinodes) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000480 if (gfs2_consist_rgrpd(rgd))
481 fs_err(sdp, "used metadata mismatch: %u != %u\n",
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500482 count[2] + count[3], rgd->rd_dinodes);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000483 return;
484 }
485}
486
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100487static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000488{
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100489 u64 first = rgd->rd_data0;
490 u64 last = first + rgd->rd_data;
Steven Whitehouse16910422006-09-05 11:15:45 -0400491 return first <= block && block < last;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000492}
493
494/**
495 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
496 * @sdp: The GFS2 superblock
Bob Peterson886b1412012-04-11 13:03:52 -0400497 * @blk: The data block number
498 * @exact: True if this needs to be an exact match
David Teiglandb3b94fa2006-01-16 16:50:04 +0000499 *
500 * Returns: The resource group, or NULL if not found
501 */
502
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000503struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000504{
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000505 struct rb_node *n, *next;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100506 struct gfs2_rgrpd *cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000507
508 spin_lock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000509 n = sdp->sd_rindex_tree.rb_node;
510 while (n) {
511 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
512 next = NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100513 if (blk < cur->rd_addr)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000514 next = n->rb_left;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100515 else if (blk >= cur->rd_data0 + cur->rd_data)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000516 next = n->rb_right;
517 if (next == NULL) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000518 spin_unlock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000519 if (exact) {
520 if (blk < cur->rd_addr)
521 return NULL;
522 if (blk >= cur->rd_data0 + cur->rd_data)
523 return NULL;
524 }
Bob Peterson7c9ca622011-08-31 09:53:19 +0100525 return cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000526 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000527 n = next;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000528 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000529 spin_unlock(&sdp->sd_rindex_spin);
530
531 return NULL;
532}
533
534/**
535 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
536 * @sdp: The GFS2 superblock
537 *
538 * Returns: The first rgrp in the filesystem
539 */
540
541struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
542{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100543 const struct rb_node *n;
544 struct gfs2_rgrpd *rgd;
545
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100546 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100547 n = rb_first(&sdp->sd_rindex_tree);
548 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100549 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100550
551 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000552}
553
554/**
555 * gfs2_rgrpd_get_next - get the next RG
Bob Peterson886b1412012-04-11 13:03:52 -0400556 * @rgd: the resource group descriptor
David Teiglandb3b94fa2006-01-16 16:50:04 +0000557 *
558 * Returns: The next rgrp
559 */
560
561struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
562{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100563 struct gfs2_sbd *sdp = rgd->rd_sbd;
564 const struct rb_node *n;
565
566 spin_lock(&sdp->sd_rindex_spin);
567 n = rb_next(&rgd->rd_node);
568 if (n == NULL)
569 n = rb_first(&sdp->sd_rindex_tree);
570
571 if (unlikely(&rgd->rd_node == n)) {
572 spin_unlock(&sdp->sd_rindex_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000573 return NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100574 }
575 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
576 spin_unlock(&sdp->sd_rindex_spin);
577 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000578}
579
Abhi Das00a158b2014-09-18 21:40:28 -0500580void check_and_update_goal(struct gfs2_inode *ip)
581{
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
583 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
584 ip->i_goal = ip->i_no_addr;
585}
586
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100587void gfs2_free_clones(struct gfs2_rgrpd *rgd)
588{
589 int x;
590
591 for (x = 0; x < rgd->rd_length; x++) {
592 struct gfs2_bitmap *bi = rgd->rd_bits + x;
593 kfree(bi->bi_clone);
594 bi->bi_clone = NULL;
595 }
596}
597
Bob Peterson0a305e42012-06-06 11:17:59 +0100598/**
599 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
600 * @ip: the inode for this reservation
601 */
602int gfs2_rs_alloc(struct gfs2_inode *ip)
603{
Abhijith Dasf1213ca2012-12-19 10:48:01 -0500604 int error = 0;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100605
Bob Peterson0a305e42012-06-06 11:17:59 +0100606 down_write(&ip->i_rw_mutex);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400607 if (ip->i_res)
Abhijith Dasf1213ca2012-12-19 10:48:01 -0500608 goto out;
609
610 ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
611 if (!ip->i_res) {
612 error = -ENOMEM;
613 goto out;
614 }
615
616 RB_CLEAR_NODE(&ip->i_res->rs_node);
617out:
Bob Peterson0a305e42012-06-06 11:17:59 +0100618 up_write(&ip->i_rw_mutex);
Wei Yongjun441362d2013-03-11 23:01:37 +0800619 return error;
Bob Peterson0a305e42012-06-06 11:17:59 +0100620}
621
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100622static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400623{
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100624 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
625 (unsigned long long)rs->rs_inum,
626 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100627 rs->rs_rbm.offset, rs->rs_free);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400628}
629
Bob Peterson0a305e42012-06-06 11:17:59 +0100630/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400631 * __rs_deltree - remove a multi-block reservation from the rgd tree
632 * @rs: The reservation to remove
633 *
634 */
Bob Peterson20095212013-03-13 10:26:38 -0400635static void __rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400636{
637 struct gfs2_rgrpd *rgd;
638
639 if (!gfs2_rs_active(rs))
640 return;
641
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100642 rgd = rs->rs_rbm.rgd;
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100643 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100644 rb_erase(&rs->rs_node, &rgd->rd_rstree);
Michel Lespinasse24d634e2012-08-05 22:04:08 -0700645 RB_CLEAR_NODE(&rs->rs_node);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400646
647 if (rs->rs_free) {
Bob Petersone579ed42013-09-17 13:12:15 -0400648 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
649
Bob Peterson20095212013-03-13 10:26:38 -0400650 /* return reserved blocks to the rgrp */
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100651 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
652 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
Bob Peterson5ea50502013-11-25 11:16:25 +0000653 /* The rgrp extent failure point is likely not to increase;
654 it will only do so if the freed blocks are somehow
655 contiguous with a span of free blocks that follows. Still,
656 it will force the number to be recalculated later. */
657 rgd->rd_extfail_pt += rs->rs_free;
Bob Peterson8e2e0042012-07-19 08:12:40 -0400658 rs->rs_free = 0;
Bob Petersone579ed42013-09-17 13:12:15 -0400659 clear_bit(GBF_FULL, &bi->bi_flags);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400660 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400661}
662
663/**
664 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
665 * @rs: The reservation to remove
666 *
667 */
Bob Peterson20095212013-03-13 10:26:38 -0400668void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400669{
670 struct gfs2_rgrpd *rgd;
671
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100672 rgd = rs->rs_rbm.rgd;
673 if (rgd) {
674 spin_lock(&rgd->rd_rsspin);
Bob Peterson20095212013-03-13 10:26:38 -0400675 __rs_deltree(rs);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100676 spin_unlock(&rgd->rd_rsspin);
677 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400678}
679
680/**
681 * gfs2_rs_delete - delete a multi-block reservation
Bob Peterson0a305e42012-06-06 11:17:59 +0100682 * @ip: The inode for this reservation
Steven Whitehouseaf5c2692013-09-27 12:49:33 +0100683 * @wcount: The inode's write count, or NULL
Bob Peterson0a305e42012-06-06 11:17:59 +0100684 *
685 */
Steven Whitehouseaf5c2692013-09-27 12:49:33 +0100686void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
Bob Peterson0a305e42012-06-06 11:17:59 +0100687{
688 down_write(&ip->i_rw_mutex);
Steven Whitehouseaf5c2692013-09-27 12:49:33 +0100689 if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
Bob Peterson20095212013-03-13 10:26:38 -0400690 gfs2_rs_deltree(ip->i_res);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400691 BUG_ON(ip->i_res->rs_free);
Bob Peterson0a305e42012-06-06 11:17:59 +0100692 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
693 ip->i_res = NULL;
694 }
695 up_write(&ip->i_rw_mutex);
696}
697
Bob Peterson8e2e0042012-07-19 08:12:40 -0400698/**
699 * return_all_reservations - return all reserved blocks back to the rgrp.
700 * @rgd: the rgrp that needs its space back
701 *
702 * We previously reserved a bunch of blocks for allocation. Now we need to
703 * give them back. This leave the reservation structures in tact, but removes
704 * all of their corresponding "no-fly zones".
705 */
706static void return_all_reservations(struct gfs2_rgrpd *rgd)
707{
708 struct rb_node *n;
709 struct gfs2_blkreserv *rs;
710
711 spin_lock(&rgd->rd_rsspin);
712 while ((n = rb_first(&rgd->rd_rstree))) {
713 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Bob Peterson20095212013-03-13 10:26:38 -0400714 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400715 }
716 spin_unlock(&rgd->rd_rsspin);
717}
718
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100719void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000720{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100721 struct rb_node *n;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000722 struct gfs2_rgrpd *rgd;
723 struct gfs2_glock *gl;
724
Bob Peterson7c9ca622011-08-31 09:53:19 +0100725 while ((n = rb_first(&sdp->sd_rindex_tree))) {
726 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000727 gl = rgd->rd_gl;
728
Bob Peterson7c9ca622011-08-31 09:53:19 +0100729 rb_erase(n, &sdp->sd_rindex_tree);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000730
731 if (gl) {
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100732 spin_lock(&gl->gl_spin);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500733 gl->gl_object = NULL;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100734 spin_unlock(&gl->gl_spin);
Steven Whitehouse29687a22011-03-30 16:33:25 +0100735 gfs2_glock_add_to_lru(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000736 gfs2_glock_put(gl);
737 }
738
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100739 gfs2_free_clones(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000740 kfree(rgd->rd_bits);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400741 return_all_reservations(rgd);
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600742 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000743 }
744}
745
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100746static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
747{
Joe Perchesd77d1b52014-03-06 12:10:45 -0800748 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
749 pr_info("ri_length = %u\n", rgd->rd_length);
750 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
751 pr_info("ri_data = %u\n", rgd->rd_data);
752 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100753}
754
David Teiglandb3b94fa2006-01-16 16:50:04 +0000755/**
756 * gfs2_compute_bitstructs - Compute the bitmap sizes
757 * @rgd: The resource group descriptor
758 *
759 * Calculates bitmap descriptors, one for each block that contains bitmap data
760 *
761 * Returns: errno
762 */
763
764static int compute_bitstructs(struct gfs2_rgrpd *rgd)
765{
766 struct gfs2_sbd *sdp = rgd->rd_sbd;
767 struct gfs2_bitmap *bi;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100768 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
Steven Whitehousecd915492006-09-04 12:49:07 -0400769 u32 bytes_left, bytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000770 int x;
771
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400772 if (!length)
773 return -EINVAL;
774
Steven Whitehousedd894be2006-07-27 14:29:00 -0400775 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000776 if (!rgd->rd_bits)
777 return -ENOMEM;
778
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100779 bytes_left = rgd->rd_bitbytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000780
781 for (x = 0; x < length; x++) {
782 bi = rgd->rd_bits + x;
783
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +0100784 bi->bi_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000785 /* small rgrp; bitmap stored completely in header block */
786 if (length == 1) {
787 bytes = bytes_left;
788 bi->bi_offset = sizeof(struct gfs2_rgrp);
789 bi->bi_start = 0;
790 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500791 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000792 /* header block */
793 } else if (x == 0) {
794 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
795 bi->bi_offset = sizeof(struct gfs2_rgrp);
796 bi->bi_start = 0;
797 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500798 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000799 /* last block */
800 } else if (x + 1 == length) {
801 bytes = bytes_left;
802 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100803 bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000804 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500805 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000806 /* other blocks */
807 } else {
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500808 bytes = sdp->sd_sb.sb_bsize -
809 sizeof(struct gfs2_meta_header);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000810 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100811 bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000812 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500813 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000814 }
815
816 bytes_left -= bytes;
817 }
818
819 if (bytes_left) {
820 gfs2_consist_rgrpd(rgd);
821 return -EIO;
822 }
823 bi = rgd->rd_bits + (length - 1);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100824 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000825 if (gfs2_consist_rgrpd(rgd)) {
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100826 gfs2_rindex_print(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000827 fs_err(sdp, "start=%u len=%u offset=%u\n",
828 bi->bi_start, bi->bi_len, bi->bi_offset);
829 }
830 return -EIO;
831 }
832
833 return 0;
834}
835
836/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500837 * gfs2_ri_total - Total up the file system space, according to the rindex.
Bob Peterson886b1412012-04-11 13:03:52 -0400838 * @sdp: the filesystem
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500839 *
840 */
841u64 gfs2_ri_total(struct gfs2_sbd *sdp)
842{
843 u64 total_data = 0;
844 struct inode *inode = sdp->sd_rindex;
845 struct gfs2_inode *ip = GFS2_I(inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500846 char buf[sizeof(struct gfs2_rindex)];
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500847 int error, rgrps;
848
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500849 for (rgrps = 0;; rgrps++) {
850 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
851
Bob Petersonbcd72782010-12-07 13:58:56 -0500852 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500853 break;
Andrew Price43066292012-04-16 16:40:55 +0100854 error = gfs2_internal_read(ip, buf, &pos,
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500855 sizeof(struct gfs2_rindex));
856 if (error != sizeof(struct gfs2_rindex))
857 break;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100858 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500859 }
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500860 return total_data;
861}
862
Bob Peterson6aad1c32012-03-05 09:20:59 -0500863static int rgd_insert(struct gfs2_rgrpd *rgd)
Bob Peterson7c9ca622011-08-31 09:53:19 +0100864{
865 struct gfs2_sbd *sdp = rgd->rd_sbd;
866 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
867
868 /* Figure out where to put new node */
869 while (*newn) {
870 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
871 rd_node);
872
873 parent = *newn;
874 if (rgd->rd_addr < cur->rd_addr)
875 newn = &((*newn)->rb_left);
876 else if (rgd->rd_addr > cur->rd_addr)
877 newn = &((*newn)->rb_right);
878 else
Bob Peterson6aad1c32012-03-05 09:20:59 -0500879 return -EEXIST;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100880 }
881
882 rb_link_node(&rgd->rd_node, parent, newn);
883 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500884 sdp->sd_rgrps++;
885 return 0;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100886}
887
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500888/**
Robert Peterson6c532672007-05-10 16:54:38 -0500889 * read_rindex_entry - Pull in a new resource index entry from the disk
Andrew Price43066292012-04-16 16:40:55 +0100890 * @ip: Pointer to the rindex inode
David Teiglandb3b94fa2006-01-16 16:50:04 +0000891 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100892 * Returns: 0 on success, > 0 on EOF, error code otherwise
Robert Peterson6c532672007-05-10 16:54:38 -0500893 */
894
Andrew Price43066292012-04-16 16:40:55 +0100895static int read_rindex_entry(struct gfs2_inode *ip)
Robert Peterson6c532672007-05-10 16:54:38 -0500896{
897 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse7005c3e2013-12-06 10:16:14 +0000898 const unsigned bsize = sdp->sd_sb.sb_bsize;
Robert Peterson6c532672007-05-10 16:54:38 -0500899 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100900 struct gfs2_rindex buf;
Robert Peterson6c532672007-05-10 16:54:38 -0500901 int error;
902 struct gfs2_rgrpd *rgd;
903
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100904 if (pos >= i_size_read(&ip->i_inode))
905 return 1;
906
Andrew Price43066292012-04-16 16:40:55 +0100907 error = gfs2_internal_read(ip, (char *)&buf, &pos,
Robert Peterson6c532672007-05-10 16:54:38 -0500908 sizeof(struct gfs2_rindex));
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100909
910 if (error != sizeof(struct gfs2_rindex))
911 return (error == 0) ? 1 : error;
Robert Peterson6c532672007-05-10 16:54:38 -0500912
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600913 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
Robert Peterson6c532672007-05-10 16:54:38 -0500914 error = -ENOMEM;
915 if (!rgd)
916 return error;
917
Robert Peterson6c532672007-05-10 16:54:38 -0500918 rgd->rd_sbd = sdp;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100919 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
920 rgd->rd_length = be32_to_cpu(buf.ri_length);
921 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
922 rgd->rd_data = be32_to_cpu(buf.ri_data);
923 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400924 spin_lock_init(&rgd->rd_rsspin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100925
Robert Peterson6c532672007-05-10 16:54:38 -0500926 error = compute_bitstructs(rgd);
927 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100928 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500929
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100930 error = gfs2_glock_get(sdp, rgd->rd_addr,
Robert Peterson6c532672007-05-10 16:54:38 -0500931 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
932 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100933 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500934
935 rgd->rd_gl->gl_object = rgd;
Steven Whitehouse7005c3e2013-12-06 10:16:14 +0000936 rgd->rd_gl->gl_vm.start = rgd->rd_addr * bsize;
937 rgd->rd_gl->gl_vm.end = rgd->rd_gl->gl_vm.start + (rgd->rd_length * bsize) - 1;
David Teigland4e2f8842012-11-14 13:47:37 -0500938 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
Bob Peterson0e27c182014-10-29 08:02:28 -0500939 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100940 if (rgd->rd_data > sdp->sd_max_rg_data)
941 sdp->sd_max_rg_data = rgd->rd_data;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100942 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500943 error = rgd_insert(rgd);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100944 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500945 if (!error)
946 return 0;
947
948 error = 0; /* someone else read in the rgrp; free it and ignore it */
Bob Petersonc1ac5392012-03-22 08:58:30 -0400949 gfs2_glock_put(rgd->rd_gl);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100950
951fail:
952 kfree(rgd->rd_bits);
953 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
Robert Peterson6c532672007-05-10 16:54:38 -0500954 return error;
955}
956
957/**
Bob Peterson0e27c182014-10-29 08:02:28 -0500958 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
959 * @sdp: the GFS2 superblock
960 *
961 * The purpose of this function is to select a subset of the resource groups
962 * and mark them as PREFERRED. We do it in such a way that each node prefers
963 * to use a unique set of rgrps to minimize glock contention.
964 */
965static void set_rgrp_preferences(struct gfs2_sbd *sdp)
966{
967 struct gfs2_rgrpd *rgd, *first;
968 int i;
969
970 /* Skip an initial number of rgrps, based on this node's journal ID.
971 That should start each node out on its own set. */
972 rgd = gfs2_rgrpd_get_first(sdp);
973 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
974 rgd = gfs2_rgrpd_get_next(rgd);
975 first = rgd;
976
977 do {
978 rgd->rd_flags |= GFS2_RDF_PREFERRED;
979 for (i = 0; i < sdp->sd_journals; i++) {
980 rgd = gfs2_rgrpd_get_next(rgd);
Abhi Das959b6712015-05-05 11:26:04 -0500981 if (!rgd || rgd == first)
Bob Peterson0e27c182014-10-29 08:02:28 -0500982 break;
983 }
Abhi Das959b6712015-05-05 11:26:04 -0500984 } while (rgd && rgd != first);
Bob Peterson0e27c182014-10-29 08:02:28 -0500985}
986
987/**
Robert Peterson6c532672007-05-10 16:54:38 -0500988 * gfs2_ri_update - Pull in a new resource index from the disk
989 * @ip: pointer to the rindex inode
990 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000991 * Returns: 0 on successful update, error code otherwise
992 */
993
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100994static int gfs2_ri_update(struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000995{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400996 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000997 int error;
998
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100999 do {
Andrew Price43066292012-04-16 16:40:55 +01001000 error = read_rindex_entry(ip);
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001001 } while (error == 0);
1002
1003 if (error < 0)
1004 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001005
Bob Peterson0e27c182014-10-29 08:02:28 -05001006 set_rgrp_preferences(sdp);
1007
Bob Petersoncf45b752008-01-31 10:31:39 -06001008 sdp->sd_rindex_uptodate = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001009 return 0;
Robert Peterson6c532672007-05-10 16:54:38 -05001010}
David Teiglandb3b94fa2006-01-16 16:50:04 +00001011
Robert Peterson6c532672007-05-10 16:54:38 -05001012/**
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001013 * gfs2_rindex_update - Update the rindex if required
David Teiglandb3b94fa2006-01-16 16:50:04 +00001014 * @sdp: The GFS2 superblock
David Teiglandb3b94fa2006-01-16 16:50:04 +00001015 *
1016 * We grab a lock on the rindex inode to make sure that it doesn't
1017 * change whilst we are performing an operation. We keep this lock
1018 * for quite long periods of time compared to other locks. This
1019 * doesn't matter, since it is shared and it is very, very rarely
1020 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1021 *
1022 * This makes sure that we're using the latest copy of the resource index
1023 * special file, which might have been updated if someone expanded the
1024 * filesystem (via gfs2_grow utility), which adds new resource groups.
1025 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001026 * Returns: 0 on succeess, error code otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001027 */
1028
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001029int gfs2_rindex_update(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001030{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001031 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001032 struct gfs2_glock *gl = ip->i_gl;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001033 struct gfs2_holder ri_gh;
1034 int error = 0;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001035 int unlock_required = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001036
1037 /* Read new copy from disk if we don't have the latest */
Bob Petersoncf45b752008-01-31 10:31:39 -06001038 if (!sdp->sd_rindex_uptodate) {
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001039 if (!gfs2_glock_is_locked_by_me(gl)) {
1040 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1041 if (error)
Bob Peterson6aad1c32012-03-05 09:20:59 -05001042 return error;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001043 unlock_required = 1;
1044 }
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001045 if (!sdp->sd_rindex_uptodate)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001046 error = gfs2_ri_update(ip);
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001047 if (unlock_required)
1048 gfs2_glock_dq_uninit(&ri_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001049 }
1050
1051 return error;
1052}
1053
Bob Peterson42d52e32008-01-28 18:38:07 -06001054static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001055{
1056 const struct gfs2_rgrp *str = buf;
Bob Peterson42d52e32008-01-28 18:38:07 -06001057 u32 rg_flags;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001058
Bob Peterson42d52e32008-01-28 18:38:07 -06001059 rg_flags = be32_to_cpu(str->rg_flags);
Steven Whitehouse09010972009-05-20 10:48:47 +01001060 rg_flags &= ~GFS2_RDF_MASK;
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001061 rgd->rd_flags &= GFS2_RDF_MASK;
1062 rgd->rd_flags |= rg_flags;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001063 rgd->rd_free = be32_to_cpu(str->rg_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001064 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001065 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001066}
1067
Bob Peterson42d52e32008-01-28 18:38:07 -06001068static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001069{
1070 struct gfs2_rgrp *str = buf;
1071
Steven Whitehouse09010972009-05-20 10:48:47 +01001072 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001073 str->rg_free = cpu_to_be32(rgd->rd_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001074 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001075 str->__pad = cpu_to_be32(0);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001076 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001077 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1078}
1079
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001080static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1081{
1082 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1083 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1084
1085 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1086 rgl->rl_dinodes != str->rg_dinodes ||
1087 rgl->rl_igeneration != str->rg_igeneration)
1088 return 0;
1089 return 1;
1090}
1091
1092static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1093{
1094 const struct gfs2_rgrp *str = buf;
1095
1096 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1097 rgl->rl_flags = str->rg_flags;
1098 rgl->rl_free = str->rg_free;
1099 rgl->rl_dinodes = str->rg_dinodes;
1100 rgl->rl_igeneration = str->rg_igeneration;
1101 rgl->__pad = 0UL;
1102}
1103
1104static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1105{
1106 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1107 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1108 rgl->rl_unlinked = cpu_to_be32(unlinked);
1109}
1110
1111static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1112{
1113 struct gfs2_bitmap *bi;
1114 const u32 length = rgd->rd_length;
1115 const u8 *buffer = NULL;
1116 u32 i, goal, count = 0;
1117
1118 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1119 goal = 0;
1120 buffer = bi->bi_bh->b_data + bi->bi_offset;
1121 WARN_ON(!buffer_uptodate(bi->bi_bh));
1122 while (goal < bi->bi_len * GFS2_NBBY) {
1123 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1124 GFS2_BLKST_UNLINKED);
1125 if (goal == BFITNOENT)
1126 break;
1127 count++;
1128 goal++;
1129 }
1130 }
1131
1132 return count;
1133}
1134
1135
David Teiglandb3b94fa2006-01-16 16:50:04 +00001136/**
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001137 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1138 * @rgd: the struct gfs2_rgrpd describing the RG to read in
David Teiglandb3b94fa2006-01-16 16:50:04 +00001139 *
1140 * Read in all of a Resource Group's header and bitmap blocks.
1141 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1142 *
1143 * Returns: errno
1144 */
1145
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301146static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001147{
1148 struct gfs2_sbd *sdp = rgd->rd_sbd;
1149 struct gfs2_glock *gl = rgd->rd_gl;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001150 unsigned int length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001151 struct gfs2_bitmap *bi;
1152 unsigned int x, y;
1153 int error;
1154
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001155 if (rgd->rd_bits[0].bi_bh != NULL)
1156 return 0;
1157
David Teiglandb3b94fa2006-01-16 16:50:04 +00001158 for (x = 0; x < length; x++) {
1159 bi = rgd->rd_bits + x;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001160 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001161 if (error)
1162 goto fail;
1163 }
1164
1165 for (y = length; y--;) {
1166 bi = rgd->rd_bits + y;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001167 error = gfs2_meta_wait(sdp, bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001168 if (error)
1169 goto fail;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001170 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
David Teiglandb3b94fa2006-01-16 16:50:04 +00001171 GFS2_METATYPE_RG)) {
1172 error = -EIO;
1173 goto fail;
1174 }
1175 }
1176
Bob Petersoncf45b752008-01-31 10:31:39 -06001177 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01001178 for (x = 0; x < length; x++)
1179 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
Bob Peterson42d52e32008-01-28 18:38:07 -06001180 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001181 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
Bob Peterson7c9ca622011-08-31 09:53:19 +01001182 rgd->rd_free_clone = rgd->rd_free;
Bob Peterson5ea50502013-11-25 11:16:25 +00001183 /* max out the rgrp allocation failure point */
1184 rgd->rd_extfail_pt = rgd->rd_free;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001185 }
Al Viro951b4bd2013-06-02 19:53:40 -04001186 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001187 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1188 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1189 rgd->rd_bits[0].bi_bh->b_data);
1190 }
1191 else if (sdp->sd_args.ar_rgrplvb) {
1192 if (!gfs2_rgrp_lvb_valid(rgd)){
1193 gfs2_consist_rgrpd(rgd);
1194 error = -EIO;
1195 goto fail;
1196 }
1197 if (rgd->rd_rgl->rl_unlinked == 0)
1198 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1199 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001200 return 0;
1201
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001202fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001203 while (x--) {
1204 bi = rgd->rd_bits + x;
1205 brelse(bi->bi_bh);
1206 bi->bi_bh = NULL;
1207 gfs2_assert_warn(sdp, !bi->bi_clone);
1208 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001209
1210 return error;
1211}
1212
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301213static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001214{
1215 u32 rl_flags;
1216
1217 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1218 return 0;
1219
Al Viro951b4bd2013-06-02 19:53:40 -04001220 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001221 return gfs2_rgrp_bh_get(rgd);
1222
1223 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1224 rl_flags &= ~GFS2_RDF_MASK;
1225 rgd->rd_flags &= GFS2_RDF_MASK;
1226 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1227 if (rgd->rd_rgl->rl_unlinked == 0)
1228 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1229 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1230 rgd->rd_free_clone = rgd->rd_free;
1231 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1232 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1233 return 0;
1234}
1235
1236int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1237{
1238 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1239 struct gfs2_sbd *sdp = rgd->rd_sbd;
1240
1241 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1242 return 0;
Bob Peterson8b127d02014-01-16 08:52:16 -05001243 return gfs2_rgrp_bh_get(rgd);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001244}
1245
David Teiglandb3b94fa2006-01-16 16:50:04 +00001246/**
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001247 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1248 * @rgd: The resource group
David Teiglandb3b94fa2006-01-16 16:50:04 +00001249 *
1250 */
1251
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001252void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001253{
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001254 int x, length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001255
David Teiglandb3b94fa2006-01-16 16:50:04 +00001256 for (x = 0; x < length; x++) {
1257 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001258 if (bi->bi_bh) {
1259 brelse(bi->bi_bh);
1260 bi->bi_bh = NULL;
1261 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001262 }
1263
David Teiglandb3b94fa2006-01-16 16:50:04 +00001264}
1265
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001266/**
1267 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1268 * @gh: The glock holder for the resource group
1269 *
1270 */
1271
1272void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1273{
1274 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1275 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1276 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1277
1278 if (rgd && demote_requested)
1279 gfs2_rgrp_brelse(rgd);
1280}
1281
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001282int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
Bob Peterson7c9ca622011-08-31 09:53:19 +01001283 struct buffer_head *bh,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001284 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001285{
1286 struct super_block *sb = sdp->sd_vfs;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001287 u64 blk;
Steven Whitehouse64d576b2009-02-12 13:31:58 +00001288 sector_t start = 0;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001289 sector_t nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001290 int rv;
1291 unsigned int x;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001292 u32 trimmed = 0;
1293 u8 diff;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001294
1295 for (x = 0; x < bi->bi_len; x++) {
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001296 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1297 clone += bi->bi_offset;
1298 clone += x;
1299 if (bh) {
1300 const u8 *orig = bh->b_data + bi->bi_offset + x;
1301 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1302 } else {
1303 diff = ~(*clone | (*clone >> 1));
1304 }
Steven Whitehousef15ab562009-02-09 09:25:01 +00001305 diff &= 0x55;
1306 if (diff == 0)
1307 continue;
1308 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001309 while(diff) {
1310 if (diff & 1) {
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001311 if (nr_blks == 0)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001312 goto start_new_extent;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001313 if ((start + nr_blks) != blk) {
1314 if (nr_blks >= minlen) {
1315 rv = sb_issue_discard(sb,
1316 start, nr_blks,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001317 GFP_NOFS, 0);
1318 if (rv)
1319 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001320 trimmed += nr_blks;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001321 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001322 nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001323start_new_extent:
1324 start = blk;
1325 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001326 nr_blks++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001327 }
1328 diff >>= 2;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001329 blk++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001330 }
1331 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001332 if (nr_blks >= minlen) {
1333 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001334 if (rv)
1335 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001336 trimmed += nr_blks;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001337 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001338 if (ptrimmed)
1339 *ptrimmed = trimmed;
1340 return 0;
1341
Steven Whitehousef15ab562009-02-09 09:25:01 +00001342fail:
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001343 if (sdp->sd_args.ar_discard)
1344 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001345 sdp->sd_args.ar_discard = 0;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001346 return -EIO;
1347}
1348
1349/**
1350 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1351 * @filp: Any file on the filesystem
1352 * @argp: Pointer to the arguments (also used to pass result)
1353 *
1354 * Returns: 0 on success, otherwise error code
1355 */
1356
1357int gfs2_fitrim(struct file *filp, void __user *argp)
1358{
Al Viro496ad9a2013-01-23 17:07:38 -05001359 struct inode *inode = file_inode(filp);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001360 struct gfs2_sbd *sdp = GFS2_SB(inode);
1361 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1362 struct buffer_head *bh;
1363 struct gfs2_rgrpd *rgd;
1364 struct gfs2_rgrpd *rgd_end;
1365 struct gfs2_holder gh;
1366 struct fstrim_range r;
1367 int ret = 0;
1368 u64 amt;
1369 u64 trimmed = 0;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001370 u64 start, end, minlen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001371 unsigned int x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001372 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001373
1374 if (!capable(CAP_SYS_ADMIN))
1375 return -EPERM;
1376
1377 if (!blk_queue_discard(q))
1378 return -EOPNOTSUPP;
1379
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001380 if (copy_from_user(&r, argp, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001381 return -EFAULT;
1382
Bob Peterson5e2f7d62012-04-04 22:11:16 -04001383 ret = gfs2_rindex_update(sdp);
1384 if (ret)
1385 return ret;
1386
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001387 start = r.start >> bs_shift;
1388 end = start + (r.len >> bs_shift);
1389 minlen = max_t(u64, r.minlen,
1390 q->limits.discard_granularity) >> bs_shift;
1391
Abhijith Das6a98c332013-06-19 17:03:29 -04001392 if (end <= start || minlen > sdp->sd_max_rg_data)
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001393 return -EINVAL;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001394
Abhijith Das6a98c332013-06-19 17:03:29 -04001395 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1396 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1397
1398 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1399 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1400 return -EINVAL; /* start is beyond the end of the fs */
1401
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001402 while (1) {
1403
1404 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1405 if (ret)
1406 goto out;
1407
1408 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1409 /* Trim each bitmap in the rgrp */
1410 for (x = 0; x < rgd->rd_length; x++) {
1411 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001412 ret = gfs2_rgrp_send_discards(sdp,
1413 rgd->rd_data0, NULL, bi, minlen,
1414 &amt);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001415 if (ret) {
1416 gfs2_glock_dq_uninit(&gh);
1417 goto out;
1418 }
1419 trimmed += amt;
1420 }
1421
1422 /* Mark rgrp as having been trimmed */
1423 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1424 if (ret == 0) {
1425 bh = rgd->rd_bits[0].bi_bh;
1426 rgd->rd_flags |= GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00001427 gfs2_trans_add_meta(rgd->rd_gl, bh);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001428 gfs2_rgrp_out(rgd, bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001429 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001430 gfs2_trans_end(sdp);
1431 }
1432 }
1433 gfs2_glock_dq_uninit(&gh);
1434
1435 if (rgd == rgd_end)
1436 break;
1437
1438 rgd = gfs2_rgrpd_get_next(rgd);
1439 }
1440
1441out:
Abhijith Das6a98c332013-06-19 17:03:29 -04001442 r.len = trimmed << bs_shift;
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001443 if (copy_to_user(argp, &r, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001444 return -EFAULT;
1445
1446 return ret;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001447}
1448
David Teiglandb3b94fa2006-01-16 16:50:04 +00001449/**
Bob Peterson8e2e0042012-07-19 08:12:40 -04001450 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
Bob Peterson8e2e0042012-07-19 08:12:40 -04001451 * @ip: the inode structure
Bob Peterson8e2e0042012-07-19 08:12:40 -04001452 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001453 */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001454static void rs_insert(struct gfs2_inode *ip)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001455{
1456 struct rb_node **newn, *parent = NULL;
1457 int rc;
1458 struct gfs2_blkreserv *rs = ip->i_res;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001459 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001460 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1461
1462 BUG_ON(gfs2_rs_active(rs));
Bob Peterson8e2e0042012-07-19 08:12:40 -04001463
1464 spin_lock(&rgd->rd_rsspin);
1465 newn = &rgd->rd_rstree.rb_node;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001466 while (*newn) {
1467 struct gfs2_blkreserv *cur =
1468 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1469
1470 parent = *newn;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001471 rc = rs_cmp(fsblock, rs->rs_free, cur);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001472 if (rc > 0)
1473 newn = &((*newn)->rb_right);
1474 else if (rc < 0)
1475 newn = &((*newn)->rb_left);
1476 else {
1477 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001478 WARN_ON(1);
1479 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001480 }
1481 }
1482
Bob Peterson8e2e0042012-07-19 08:12:40 -04001483 rb_link_node(&rs->rs_node, parent, newn);
1484 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1485
Bob Peterson8e2e0042012-07-19 08:12:40 -04001486 /* Do our rgrp accounting for the reservation */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001487 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
Bob Peterson8e2e0042012-07-19 08:12:40 -04001488 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse9e733d32012-08-23 15:37:59 +01001489 trace_gfs2_rs(rs, TRACE_RS_INSERT);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001490}
1491
1492/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001493 * rg_mblk_search - find a group of multiple free blocks to form a reservation
Bob Peterson8e2e0042012-07-19 08:12:40 -04001494 * @rgd: the resource group descriptor
Bob Peterson8e2e0042012-07-19 08:12:40 -04001495 * @ip: pointer to the inode for which we're reserving blocks
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001496 * @ap: the allocation parameters
Bob Peterson8e2e0042012-07-19 08:12:40 -04001497 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001498 */
1499
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001500static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001501 const struct gfs2_alloc_parms *ap)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001502{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001503 struct gfs2_rbm rbm = { .rgd = rgd, };
1504 u64 goal;
1505 struct gfs2_blkreserv *rs = ip->i_res;
1506 u32 extlen;
1507 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1508 int ret;
Bob Petersonaf21ca82013-05-14 13:04:29 -04001509 struct inode *inode = &ip->i_inode;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001510
Bob Petersonaf21ca82013-05-14 13:04:29 -04001511 if (S_ISDIR(inode->i_mode))
1512 extlen = 1;
1513 else {
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001514 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
Bob Petersonaf21ca82013-05-14 13:04:29 -04001515 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1516 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001517 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001518 return;
1519
Bob Peterson8e2e0042012-07-19 08:12:40 -04001520 /* Find bitmap block that contains bits for goal block */
1521 if (rgrp_contains_block(rgd, ip->i_goal))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001522 goal = ip->i_goal;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001523 else
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001524 goal = rgd->rd_last_alloc + rgd->rd_data0;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001525
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001526 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1527 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001528
Bob Peterson5ce13432013-11-06 10:55:52 -05001529 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true, ap);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001530 if (ret == 0) {
1531 rs->rs_rbm = rbm;
1532 rs->rs_free = extlen;
1533 rs->rs_inum = ip->i_no_addr;
1534 rs_insert(ip);
Bob Peterson13d2eb02012-12-20 13:23:04 -05001535 } else {
1536 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1537 rgd->rd_last_alloc = 0;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001538 }
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001539}
1540
David Teiglandb3b94fa2006-01-16 16:50:04 +00001541/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001542 * gfs2_next_unreserved_block - Return next block that is not reserved
1543 * @rgd: The resource group
1544 * @block: The starting block
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001545 * @length: The required length
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001546 * @ip: Ignore any reservations for this inode
1547 *
1548 * If the block does not appear in any reservation, then return the
1549 * block number unchanged. If it does appear in the reservation, then
1550 * keep looking through the tree of reservations in order to find the
1551 * first block number which is not reserved.
1552 */
1553
1554static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001555 u32 length,
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001556 const struct gfs2_inode *ip)
1557{
1558 struct gfs2_blkreserv *rs;
1559 struct rb_node *n;
1560 int rc;
1561
1562 spin_lock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001563 n = rgd->rd_rstree.rb_node;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001564 while (n) {
1565 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001566 rc = rs_cmp(block, length, rs);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001567 if (rc < 0)
1568 n = n->rb_left;
1569 else if (rc > 0)
1570 n = n->rb_right;
1571 else
1572 break;
1573 }
1574
1575 if (n) {
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001576 while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) {
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001577 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001578 n = n->rb_right;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001579 if (n == NULL)
1580 break;
1581 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1582 }
1583 }
1584
1585 spin_unlock(&rgd->rd_rsspin);
1586 return block;
1587}
1588
1589/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001590 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1591 * @rbm: The current position in the resource group
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001592 * @ip: The inode for which we are searching for blocks
1593 * @minext: The minimum extent length
Bob Peterson5ce13432013-11-06 10:55:52 -05001594 * @maxext: A pointer to the maximum extent structure
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001595 *
1596 * This checks the current position in the rgrp to see whether there is
1597 * a reservation covering this block. If not then this function is a
1598 * no-op. If there is, then the position is moved to the end of the
1599 * contiguous reservation(s) so that we are pointing at the first
1600 * non-reserved block.
1601 *
1602 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1603 */
1604
1605static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001606 const struct gfs2_inode *ip,
Bob Peterson5ce13432013-11-06 10:55:52 -05001607 u32 minext,
1608 struct gfs2_extent *maxext)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001609{
1610 u64 block = gfs2_rbm_to_block(rbm);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001611 u32 extlen = 1;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001612 u64 nblock;
1613 int ret;
1614
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001615 /*
1616 * If we have a minimum extent length, then skip over any extent
1617 * which is less than the min extent length in size.
1618 */
1619 if (minext) {
1620 extlen = gfs2_free_extlen(rbm, minext);
Bob Peterson5ce13432013-11-06 10:55:52 -05001621 if (extlen <= maxext->len)
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001622 goto fail;
1623 }
1624
1625 /*
1626 * Check the extent which has been found against the reservations
1627 * and skip if parts of it are already reserved
1628 */
1629 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
Bob Peterson5ce13432013-11-06 10:55:52 -05001630 if (nblock == block) {
1631 if (!minext || extlen >= minext)
1632 return 0;
1633
1634 if (extlen > maxext->len) {
1635 maxext->len = extlen;
1636 maxext->rbm = *rbm;
1637 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001638fail:
Bob Peterson5ce13432013-11-06 10:55:52 -05001639 nblock = block + extlen;
1640 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001641 ret = gfs2_rbm_from_block(rbm, nblock);
1642 if (ret < 0)
1643 return ret;
1644 return 1;
1645}
1646
1647/**
1648 * gfs2_rbm_find - Look for blocks of a particular state
1649 * @rbm: Value/result starting position and final position
1650 * @state: The state which we want to find
Bob Peterson5ce13432013-11-06 10:55:52 -05001651 * @minext: Pointer to the requested extent length (NULL for a single block)
1652 * This is updated to be the actual reservation size.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001653 * @ip: If set, check for reservations
1654 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1655 * around until we've reached the starting point.
Bob Peterson5ce13432013-11-06 10:55:52 -05001656 * @ap: the allocation parameters
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001657 *
1658 * Side effects:
1659 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1660 * has no free blocks in it.
Bob Peterson5ea50502013-11-25 11:16:25 +00001661 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1662 * has come up short on a free block search.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001663 *
1664 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1665 */
1666
Bob Peterson5ce13432013-11-06 10:55:52 -05001667static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
1668 const struct gfs2_inode *ip, bool nowrap,
1669 const struct gfs2_alloc_parms *ap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001670{
1671 struct buffer_head *bh;
Bob Petersone579ed42013-09-17 13:12:15 -04001672 int initial_bii;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001673 u32 initial_offset;
Bob Peterson5ea50502013-11-25 11:16:25 +00001674 int first_bii = rbm->bii;
1675 u32 first_offset = rbm->offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001676 u32 offset;
1677 u8 *buffer;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001678 int n = 0;
1679 int iters = rbm->rgd->rd_length;
1680 int ret;
Bob Petersone579ed42013-09-17 13:12:15 -04001681 struct gfs2_bitmap *bi;
Bob Peterson5ce13432013-11-06 10:55:52 -05001682 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001683
1684 /* If we are not starting at the beginning of a bitmap, then we
1685 * need to add one to the bitmap count to ensure that we search
1686 * the starting bitmap twice.
1687 */
1688 if (rbm->offset != 0)
1689 iters++;
1690
1691 while(1) {
Bob Petersone579ed42013-09-17 13:12:15 -04001692 bi = rbm_bi(rbm);
1693 if (test_bit(GBF_FULL, &bi->bi_flags) &&
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001694 (state == GFS2_BLKST_FREE))
1695 goto next_bitmap;
1696
Bob Petersone579ed42013-09-17 13:12:15 -04001697 bh = bi->bi_bh;
1698 buffer = bh->b_data + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001699 WARN_ON(!buffer_uptodate(bh));
Bob Petersone579ed42013-09-17 13:12:15 -04001700 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1701 buffer = bi->bi_clone + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001702 initial_offset = rbm->offset;
Bob Petersone579ed42013-09-17 13:12:15 -04001703 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001704 if (offset == BFITNOENT)
1705 goto bitmap_full;
1706 rbm->offset = offset;
1707 if (ip == NULL)
1708 return 0;
1709
Bob Petersone579ed42013-09-17 13:12:15 -04001710 initial_bii = rbm->bii;
Bob Peterson5ce13432013-11-06 10:55:52 -05001711 ret = gfs2_reservation_check_and_update(rbm, ip,
1712 minext ? *minext : 0,
1713 &maxext);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001714 if (ret == 0)
1715 return 0;
1716 if (ret > 0) {
Bob Petersone579ed42013-09-17 13:12:15 -04001717 n += (rbm->bii - initial_bii);
Bob Peterson8d8b7522012-08-07 13:28:17 -04001718 goto next_iter;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001719 }
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001720 if (ret == -E2BIG) {
Bob Petersone579ed42013-09-17 13:12:15 -04001721 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001722 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001723 n += (rbm->bii - initial_bii);
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001724 goto res_covered_end_of_rgrp;
1725 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001726 return ret;
1727
1728bitmap_full: /* Mark bitmap as full and fall through */
Fabian Fredericka3e32132015-05-18 15:23:03 -05001729 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
Bob Petersone579ed42013-09-17 13:12:15 -04001730 set_bit(GBF_FULL, &bi->bi_flags);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001731
1732next_bitmap: /* Find next bitmap in the rgrp */
1733 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001734 rbm->bii++;
1735 if (rbm->bii == rbm->rgd->rd_length)
1736 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001737res_covered_end_of_rgrp:
Bob Petersone579ed42013-09-17 13:12:15 -04001738 if ((rbm->bii == 0) && nowrap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001739 break;
1740 n++;
Bob Peterson8d8b7522012-08-07 13:28:17 -04001741next_iter:
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001742 if (n >= iters)
1743 break;
1744 }
1745
Bob Peterson5ce13432013-11-06 10:55:52 -05001746 if (minext == NULL || state != GFS2_BLKST_FREE)
1747 return -ENOSPC;
1748
Bob Peterson5ea50502013-11-25 11:16:25 +00001749 /* If the extent was too small, and it's smaller than the smallest
1750 to have failed before, remember for future reference that it's
1751 useless to search this rgrp again for this amount or more. */
1752 if ((first_offset == 0) && (first_bii == 0) &&
1753 (*minext < rbm->rgd->rd_extfail_pt))
1754 rbm->rgd->rd_extfail_pt = *minext;
1755
Bob Peterson5ce13432013-11-06 10:55:52 -05001756 /* If the maximum extent we found is big enough to fulfill the
1757 minimum requirements, use it anyway. */
1758 if (maxext.len) {
1759 *rbm = maxext.rbm;
1760 *minext = maxext.len;
1761 return 0;
1762 }
1763
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001764 return -ENOSPC;
1765}
1766
1767/**
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001768 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1769 * @rgd: The rgrp
Bob Peterson886b1412012-04-11 13:03:52 -04001770 * @last_unlinked: block address of the last dinode we unlinked
1771 * @skip: block address we should explicitly not unlink
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001772 *
Bob Peterson1a0eae82010-04-14 11:58:16 -04001773 * Returns: 0 if no error
1774 * The inode, if one has been found, in inode.
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001775 */
1776
Steven Whitehouse044b9412010-11-03 20:01:07 +00001777static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001778{
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001779 u64 block;
Bob Peterson5f3eae72007-08-08 16:52:09 -05001780 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001781 struct gfs2_glock *gl;
1782 struct gfs2_inode *ip;
1783 int error;
1784 int found = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001785 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001786
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001787 while (1) {
Bob Peterson5f3eae72007-08-08 16:52:09 -05001788 down_write(&sdp->sd_log_flush_lock);
Bob Peterson5ce13432013-11-06 10:55:52 -05001789 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
1790 true, NULL);
Bob Peterson5f3eae72007-08-08 16:52:09 -05001791 up_write(&sdp->sd_log_flush_lock);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001792 if (error == -ENOSPC)
1793 break;
1794 if (WARN_ON_ONCE(error))
Bob Peterson24c73872007-07-12 16:58:50 -05001795 break;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001796
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001797 block = gfs2_rbm_to_block(&rbm);
1798 if (gfs2_rbm_from_block(&rbm, block + 1))
1799 break;
1800 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001801 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001802 if (block == skip)
Steven Whitehouse1e19a192009-07-10 21:13:38 +01001803 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001804 *last_unlinked = block;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001805
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001806 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
Steven Whitehouse044b9412010-11-03 20:01:07 +00001807 if (error)
1808 continue;
1809
1810 /* If the inode is already in cache, we can ignore it here
1811 * because the existing inode disposal code will deal with
1812 * it when all refs have gone away. Accessing gl_object like
1813 * this is not safe in general. Here it is ok because we do
1814 * not dereference the pointer, and we only need an approx
1815 * answer to whether it is NULL or not.
1816 */
1817 ip = gl->gl_object;
1818
1819 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1820 gfs2_glock_put(gl);
1821 else
1822 found++;
1823
1824 /* Limit reclaim to sensible number of tasks */
Bob Peterson44ad37d2011-03-17 16:19:58 -04001825 if (found > NR_CPUS)
Steven Whitehouse044b9412010-11-03 20:01:07 +00001826 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001827 }
1828
1829 rgd->rd_flags &= ~GFS2_RDF_CHECK;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001830 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001831}
1832
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001833/**
1834 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1835 * @rgd: The rgrp in question
1836 * @loops: An indication of how picky we can be (0=very, 1=less so)
1837 *
1838 * This function uses the recently added glock statistics in order to
1839 * figure out whether a parciular resource group is suffering from
1840 * contention from multiple nodes. This is done purely on the basis
1841 * of timings, since this is the only data we have to work with and
1842 * our aim here is to reject a resource group which is highly contended
1843 * but (very important) not to do this too often in order to ensure that
1844 * we do not land up introducing fragmentation by changing resource
1845 * groups when not actually required.
1846 *
1847 * The calculation is fairly simple, we want to know whether the SRTTB
1848 * (i.e. smoothed round trip time for blocking operations) to acquire
1849 * the lock for this rgrp's glock is significantly greater than the
1850 * time taken for resource groups on average. We introduce a margin in
1851 * the form of the variable @var which is computed as the sum of the two
1852 * respective variences, and multiplied by a factor depending on @loops
1853 * and whether we have a lot of data to base the decision on. This is
1854 * then tested against the square difference of the means in order to
1855 * decide whether the result is statistically significant or not.
1856 *
1857 * Returns: A boolean verdict on the congestion status
1858 */
1859
1860static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1861{
1862 const struct gfs2_glock *gl = rgd->rd_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001863 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001864 struct gfs2_lkstats *st;
Ben Hutchings4d207132015-08-27 12:51:45 -05001865 u64 r_dcount, l_dcount;
1866 u64 l_srttb, a_srttb = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001867 s64 srttb_diff;
Ben Hutchings4d207132015-08-27 12:51:45 -05001868 u64 sqr_diff;
1869 u64 var;
Bob Peterson0166b192015-04-22 11:24:12 -05001870 int cpu, nonzero = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001871
1872 preempt_disable();
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001873 for_each_present_cpu(cpu) {
1874 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001875 if (st->stats[GFS2_LKS_SRTTB]) {
1876 a_srttb += st->stats[GFS2_LKS_SRTTB];
1877 nonzero++;
1878 }
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001879 }
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001880 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001881 if (nonzero)
1882 do_div(a_srttb, nonzero);
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001883 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1884 var = st->stats[GFS2_LKS_SRTTVARB] +
1885 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1886 preempt_enable();
1887
1888 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1889 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1890
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001891 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001892 return false;
1893
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001894 srttb_diff = a_srttb - l_srttb;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001895 sqr_diff = srttb_diff * srttb_diff;
1896
1897 var *= 2;
1898 if (l_dcount < 8 || r_dcount < 8)
1899 var *= 2;
1900 if (loops == 1)
1901 var *= 2;
1902
1903 return ((srttb_diff < 0) && (sqr_diff > var));
1904}
1905
1906/**
1907 * gfs2_rgrp_used_recently
1908 * @rs: The block reservation with the rgrp to test
1909 * @msecs: The time limit in milliseconds
1910 *
1911 * Returns: True if the rgrp glock has been used within the time limit
1912 */
1913static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1914 u64 msecs)
1915{
1916 u64 tdiff;
1917
1918 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1919 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1920
1921 return tdiff > (msecs * 1000 * 1000);
1922}
1923
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001924static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1925{
1926 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1927 u32 skip;
1928
1929 get_random_bytes(&skip, sizeof(skip));
1930 return skip % sdp->sd_rgrps;
1931}
1932
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001933static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1934{
1935 struct gfs2_rgrpd *rgd = *pos;
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001936 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001937
1938 rgd = gfs2_rgrpd_get_next(rgd);
1939 if (rgd == NULL)
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001940 rgd = gfs2_rgrpd_get_first(sdp);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001941 *pos = rgd;
1942 if (rgd != begin) /* If we didn't wrap */
1943 return true;
1944 return false;
1945}
1946
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001947/**
Bob Peterson0e27c182014-10-29 08:02:28 -05001948 * fast_to_acquire - determine if a resource group will be fast to acquire
1949 *
1950 * If this is one of our preferred rgrps, it should be quicker to acquire,
1951 * because we tried to set ourselves up as dlm lock master.
1952 */
1953static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1954{
1955 struct gfs2_glock *gl = rgd->rd_gl;
1956
1957 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1958 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1959 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1960 return 1;
1961 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1962 return 1;
1963 return 0;
1964}
1965
1966/**
Bob Peterson666d1d82012-06-13 23:03:56 -04001967 * gfs2_inplace_reserve - Reserve space in the filesystem
David Teiglandb3b94fa2006-01-16 16:50:04 +00001968 * @ip: the inode to reserve space for
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001969 * @ap: the allocation parameters
David Teiglandb3b94fa2006-01-16 16:50:04 +00001970 *
Abhi Das25435e52015-03-18 12:04:37 -05001971 * We try our best to find an rgrp that has at least ap->target blocks
1972 * available. After a couple of passes (loops == 2), the prospects of finding
1973 * such an rgrp diminish. At this stage, we return the first rgrp that has
1974 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1975 * the number of blocks available in the chosen rgrp.
1976 *
1977 * Returns: 0 on success,
1978 * -ENOMEM if a suitable rgrp can't be found
1979 * errno otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001980 */
1981
Abhi Das25435e52015-03-18 12:04:37 -05001982int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001983{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001984 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001985 struct gfs2_rgrpd *begin = NULL;
Bob Peterson564e12b2011-11-21 13:36:17 -05001986 struct gfs2_blkreserv *rs = ip->i_res;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001987 int error = 0, rg_locked, flags = 0;
Bob Peterson666d1d82012-06-13 23:03:56 -04001988 u64 last_unlinked = NO_BLOCK;
Bob Peterson7c9ca622011-08-31 09:53:19 +01001989 int loops = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001990 u32 skip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001991
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001992 if (sdp->sd_args.ar_rgrplvb)
1993 flags |= GL_SKIP;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001994 if (gfs2_assert_warn(sdp, ap->target))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001995 return -EINVAL;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001996 if (gfs2_rs_active(rs)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001997 begin = rs->rs_rbm.rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001998 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001999 rs->rs_rbm.rgd = begin = ip->i_rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002000 } else {
Abhi Das00a158b2014-09-18 21:40:28 -05002001 check_and_update_goal(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002002 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002003 }
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002004 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002005 skip = gfs2_orlov_skip(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002006 if (rs->rs_rbm.rgd == NULL)
Bob Peterson7c9ca622011-08-31 09:53:19 +01002007 return -EBADSLT;
2008
2009 while (loops < 3) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002010 rg_locked = 1;
Abhijith Das292c8c12007-11-29 14:13:54 -06002011
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002012 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
2013 rg_locked = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002014 if (skip && skip--)
2015 goto next_rgrp;
Bob Peterson0e27c182014-10-29 08:02:28 -05002016 if (!gfs2_rs_active(rs)) {
2017 if (loops == 0 &&
2018 !fast_to_acquire(rs->rs_rbm.rgd))
2019 goto next_rgrp;
2020 if ((loops < 2) &&
2021 gfs2_rgrp_used_recently(rs, 1000) &&
2022 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2023 goto next_rgrp;
2024 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002025 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002026 LM_ST_EXCLUSIVE, flags,
2027 &rs->rs_rgd_gh);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002028 if (unlikely(error))
2029 return error;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00002030 if (!gfs2_rs_active(rs) && (loops < 2) &&
2031 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2032 goto skip_rgrp;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002033 if (sdp->sd_args.ar_rgrplvb) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002034 error = update_rgrp_lvb(rs->rs_rbm.rgd);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002035 if (unlikely(error)) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002036 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2037 return error;
2038 }
2039 }
Abhijith Das292c8c12007-11-29 14:13:54 -06002040 }
Bob Peterson666d1d82012-06-13 23:03:56 -04002041
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002042 /* Skip unuseable resource groups */
Bob Peterson5ea50502013-11-25 11:16:25 +00002043 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2044 GFS2_RDF_ERROR)) ||
Abhi Das25435e52015-03-18 12:04:37 -05002045 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002046 goto skip_rgrp;
2047
2048 if (sdp->sd_args.ar_rgrplvb)
2049 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2050
2051 /* Get a reservation if we don't already have one */
2052 if (!gfs2_rs_active(rs))
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002053 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002054
2055 /* Skip rgrps when we can't get a reservation on first pass */
2056 if (!gfs2_rs_active(rs) && (loops < 1))
2057 goto check_rgrp;
2058
2059 /* If rgrp has enough free space, use it */
Abhi Das25435e52015-03-18 12:04:37 -05002060 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2061 (loops == 2 && ap->min_target &&
2062 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002063 ip->i_rgd = rs->rs_rbm.rgd;
Abhi Das25435e52015-03-18 12:04:37 -05002064 ap->allowed = ip->i_rgd->rd_free_clone;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002065 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002066 }
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002067check_rgrp:
2068 /* Check for unlinked inodes which can be reclaimed */
2069 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2070 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2071 ip->i_no_addr);
2072skip_rgrp:
Bob Peterson1330edb2013-11-06 10:58:00 -05002073 /* Drop reservation, if we couldn't use reserved rgrp */
2074 if (gfs2_rs_active(rs))
2075 gfs2_rs_deltree(rs);
2076
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002077 /* Unlock rgrp if required */
2078 if (!rg_locked)
2079 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2080next_rgrp:
2081 /* Find the next rgrp, and continue looking */
2082 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2083 continue;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002084 if (skip)
2085 continue;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002086
2087 /* If we've scanned all the rgrps, but found no free blocks
2088 * then this checks for some less likely conditions before
2089 * trying again.
2090 */
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002091 loops++;
2092 /* Check that fs hasn't grown if writing to rindex */
2093 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2094 error = gfs2_ri_update(ip);
2095 if (error)
2096 return error;
2097 }
2098 /* Flushing the log may release space */
2099 if (loops == 2)
Benjamin Marzinski24972552014-05-01 22:26:55 -05002100 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002101 }
2102
2103 return -ENOSPC;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002104}
2105
2106/**
2107 * gfs2_inplace_release - release an inplace reservation
2108 * @ip: the inode the reservation was taken out on
2109 *
2110 * Release a reservation made by gfs2_inplace_reserve().
2111 */
2112
2113void gfs2_inplace_release(struct gfs2_inode *ip)
2114{
Bob Peterson564e12b2011-11-21 13:36:17 -05002115 struct gfs2_blkreserv *rs = ip->i_res;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002116
Bob Peterson564e12b2011-11-21 13:36:17 -05002117 if (rs->rs_rgd_gh.gh_gl)
2118 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002119}
2120
2121/**
2122 * gfs2_get_block_type - Check a block in a RG is of given type
2123 * @rgd: the resource group holding the block
2124 * @block: the block number
2125 *
2126 * Returns: The block type (GFS2_BLKST_*)
2127 */
2128
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002129static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002130{
Steven Whitehouse39839032012-08-03 11:10:30 +01002131 struct gfs2_rbm rbm = { .rgd = rgd, };
2132 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002133
Steven Whitehouse39839032012-08-03 11:10:30 +01002134 ret = gfs2_rbm_from_block(&rbm, block);
2135 WARN_ON_ONCE(ret != 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002136
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002137 return gfs2_testbit(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002138}
2139
David Teiglandb3b94fa2006-01-16 16:50:04 +00002140
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002141/**
2142 * gfs2_alloc_extent - allocate an extent from a given bitmap
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002143 * @rbm: the resource group information
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002144 * @dinode: TRUE if the first block we allocate is for a dinode
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002145 * @n: The extent length (value/result)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002146 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002147 * Add the bitmap buffer to the transaction.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002148 * Set the found bits to @new_state to change block's allocation state.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002149 */
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002150static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002151 unsigned int *n)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002152{
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002153 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002154 const unsigned int elen = *n;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002155 u64 block;
2156 int ret;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002157
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002158 *n = 1;
2159 block = gfs2_rbm_to_block(rbm);
Bob Petersone579ed42013-09-17 13:12:15 -04002160 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002161 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002162 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002163 while (*n < elen) {
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002164 ret = gfs2_rbm_from_block(&pos, block);
Bob Peterson0688a5e2012-08-28 08:45:56 -04002165 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002166 break;
Bob Petersone579ed42013-09-17 13:12:15 -04002167 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002168 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002169 (*n)++;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002170 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002171 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002172}
2173
2174/**
2175 * rgblk_free - Change alloc state of given block(s)
2176 * @sdp: the filesystem
2177 * @bstart: the start of a run of blocks to free
2178 * @blen: the length of the block run (all must lie within ONE RG!)
2179 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2180 *
2181 * Returns: Resource group containing the block(s)
2182 */
2183
Steven Whitehousecd915492006-09-04 12:49:07 -04002184static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2185 u32 blen, unsigned char new_state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002186{
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002187 struct gfs2_rbm rbm;
Bob Petersond24e0562014-10-03 08:38:06 -04002188 struct gfs2_bitmap *bi, *bi_prev = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002189
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002190 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2191 if (!rbm.rgd) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00002192 if (gfs2_consist(sdp))
Steven Whitehouse382066d2006-05-24 10:22:09 -04002193 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002194 return NULL;
2195 }
2196
Bob Petersond24e0562014-10-03 08:38:06 -04002197 gfs2_rbm_from_block(&rbm, bstart);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002198 while (blen--) {
Bob Petersone579ed42013-09-17 13:12:15 -04002199 bi = rbm_bi(&rbm);
Bob Petersond24e0562014-10-03 08:38:06 -04002200 if (bi != bi_prev) {
2201 if (!bi->bi_clone) {
2202 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2203 GFP_NOFS | __GFP_NOFAIL);
2204 memcpy(bi->bi_clone + bi->bi_offset,
2205 bi->bi_bh->b_data + bi->bi_offset,
2206 bi->bi_len);
2207 }
2208 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2209 bi_prev = bi;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002210 }
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002211 gfs2_setbit(&rbm, false, new_state);
Bob Petersond24e0562014-10-03 08:38:06 -04002212 gfs2_rbm_incr(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002213 }
2214
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002215 return rbm.rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002216}
2217
2218/**
Steven Whitehouse09010972009-05-20 10:48:47 +01002219 * gfs2_rgrp_dump - print out an rgrp
2220 * @seq: The iterator
2221 * @gl: The glock in question
David Teiglandb3b94fa2006-01-16 16:50:04 +00002222 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00002223 */
2224
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002225void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
Steven Whitehouse09010972009-05-20 10:48:47 +01002226{
Bob Peterson8e2e0042012-07-19 08:12:40 -04002227 struct gfs2_rgrpd *rgd = gl->gl_object;
2228 struct gfs2_blkreserv *trs;
2229 const struct rb_node *n;
2230
Steven Whitehouse09010972009-05-20 10:48:47 +01002231 if (rgd == NULL)
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002232 return;
Bob Peterson5ea50502013-11-25 11:16:25 +00002233 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
Steven Whitehouse09010972009-05-20 10:48:47 +01002234 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002235 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
Bob Peterson5ea50502013-11-25 11:16:25 +00002236 rgd->rd_reserved, rgd->rd_extfail_pt);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002237 spin_lock(&rgd->rd_rsspin);
2238 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2239 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2240 dump_rs(seq, trs);
2241 }
2242 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse09010972009-05-20 10:48:47 +01002243}
2244
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002245static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2246{
2247 struct gfs2_sbd *sdp = rgd->rd_sbd;
2248 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
Steven Whitehouse86d00632009-09-14 09:50:57 +01002249 (unsigned long long)rgd->rd_addr);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002250 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2251 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2252 rgd->rd_flags |= GFS2_RDF_ERROR;
2253}
2254
Steven Whitehouse09010972009-05-20 10:48:47 +01002255/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002256 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2257 * @ip: The inode we have just allocated blocks for
2258 * @rbm: The start of the allocated blocks
2259 * @len: The extent length
Bob Peterson8e2e0042012-07-19 08:12:40 -04002260 *
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002261 * Adjusts a reservation after an allocation has taken place. If the
2262 * reservation does not match the allocation, or if it is now empty
2263 * then it is removed.
Bob Peterson8e2e0042012-07-19 08:12:40 -04002264 */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002265
2266static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2267 const struct gfs2_rbm *rbm, unsigned len)
Bob Peterson8e2e0042012-07-19 08:12:40 -04002268{
2269 struct gfs2_blkreserv *rs = ip->i_res;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002270 struct gfs2_rgrpd *rgd = rbm->rgd;
2271 unsigned rlen;
2272 u64 block;
2273 int ret;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002274
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002275 spin_lock(&rgd->rd_rsspin);
2276 if (gfs2_rs_active(rs)) {
2277 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2278 block = gfs2_rbm_to_block(rbm);
2279 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2280 rlen = min(rs->rs_free, len);
2281 rs->rs_free -= rlen;
2282 rgd->rd_reserved -= rlen;
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002283 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002284 if (rs->rs_free && !ret)
2285 goto out;
Bob Peterson1a855032014-10-29 08:02:30 -05002286 /* We used up our block reservation, so we should
2287 reserve more blocks next time. */
2288 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002289 }
Bob Peterson20095212013-03-13 10:26:38 -04002290 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002291 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002292out:
2293 spin_unlock(&rgd->rd_rsspin);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002294}
2295
2296/**
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002297 * gfs2_set_alloc_start - Set starting point for block allocation
2298 * @rbm: The rbm which will be set to the required location
2299 * @ip: The gfs2 inode
2300 * @dinode: Flag to say if allocation includes a new inode
2301 *
2302 * This sets the starting point from the reservation if one is active
2303 * otherwise it falls back to guessing a start point based on the
2304 * inode's goal block or the last allocation point in the rgrp.
2305 */
2306
2307static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2308 const struct gfs2_inode *ip, bool dinode)
2309{
2310 u64 goal;
2311
2312 if (gfs2_rs_active(ip->i_res)) {
2313 *rbm = ip->i_res->rs_rbm;
2314 return;
2315 }
2316
2317 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2318 goal = ip->i_goal;
2319 else
2320 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2321
2322 gfs2_rbm_from_block(rbm, goal);
2323}
2324
2325/**
Bob Peterson6e87ed02011-11-18 10:58:32 -05002326 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
Steven Whitehouse09010972009-05-20 10:48:47 +01002327 * @ip: the inode to allocate the block for
2328 * @bn: Used to return the starting block number
Bob Peterson8e2e0042012-07-19 08:12:40 -04002329 * @nblocks: requested number of blocks/extent length (value/result)
Bob Peterson6e87ed02011-11-18 10:58:32 -05002330 * @dinode: 1 if we're allocating a dinode block, else 0
Bob Peterson3c5d7852011-11-14 11:17:08 -05002331 * @generation: the generation number of the inode
Steven Whitehouse09010972009-05-20 10:48:47 +01002332 *
2333 * Returns: 0 or error
2334 */
2335
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002336int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002337 bool dinode, u64 *generation)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002338{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002339 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002340 struct buffer_head *dibh;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002341 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002342 unsigned int ndata;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002343 u64 block; /* block, within the file system scope */
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002344 int error;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002345
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002346 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson5ce13432013-11-06 10:55:52 -05002347 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false, NULL);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002348
Steven Whitehouse137834a2012-08-23 13:43:40 +01002349 if (error == -ENOSPC) {
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002350 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson5ce13432013-11-06 10:55:52 -05002351 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false,
2352 NULL);
Steven Whitehouse137834a2012-08-23 13:43:40 +01002353 }
2354
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002355 /* Since all blocks are reserved in advance, this shouldn't happen */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002356 if (error) {
Bob Peterson5ea50502013-11-25 11:16:25 +00002357 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002358 (unsigned long long)ip->i_no_addr, error, *nblocks,
Bob Peterson5ea50502013-11-25 11:16:25 +00002359 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2360 rbm.rgd->rd_extfail_pt);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002361 goto rgrp_error;
2362 }
2363
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002364 gfs2_alloc_extent(&rbm, dinode, nblocks);
2365 block = gfs2_rbm_to_block(&rbm);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002366 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002367 if (gfs2_rs_active(ip->i_res))
2368 gfs2_adjust_reservation(ip, &rbm, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002369 ndata = *nblocks;
2370 if (dinode)
2371 ndata--;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002372
Bob Peterson3c5d7852011-11-14 11:17:08 -05002373 if (!dinode) {
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002374 ip->i_goal = block + ndata - 1;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002375 error = gfs2_meta_inode_buffer(ip, &dibh);
2376 if (error == 0) {
2377 struct gfs2_dinode *di =
2378 (struct gfs2_dinode *)dibh->b_data;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002379 gfs2_trans_add_meta(ip->i_gl, dibh);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002380 di->di_goal_meta = di->di_goal_data =
2381 cpu_to_be64(ip->i_goal);
2382 brelse(dibh);
2383 }
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002384 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002385 if (rbm.rgd->rd_free < *nblocks) {
Fabian Frederickfc554ed2014-03-05 22:06:42 +08002386 pr_warn("nblocks=%u\n", *nblocks);
Steven Whitehouse09010972009-05-20 10:48:47 +01002387 goto rgrp_error;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002388 }
Steven Whitehouse09010972009-05-20 10:48:47 +01002389
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002390 rbm.rgd->rd_free -= *nblocks;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002391 if (dinode) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002392 rbm.rgd->rd_dinodes++;
2393 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002394 if (*generation == 0)
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002395 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002396 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002397
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002398 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002399 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2400 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002401
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002402 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002403 if (dinode)
Steven Whitehouseb2c8b3e2014-02-04 15:45:11 +00002404 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002405
Steven Whitehousefd4b4e02013-02-26 16:15:20 +00002406 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002407
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002408 rbm.rgd->rd_free_clone -= *nblocks;
2409 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002410 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002411 *bn = block;
2412 return 0;
2413
2414rgrp_error:
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002415 gfs2_rgrp_error(rbm.rgd);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002416 return -EIO;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002417}
2418
2419/**
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002420 * __gfs2_free_blocks - free a contiguous run of block(s)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002421 * @ip: the inode these blocks are being freed from
2422 * @bstart: first block of a run of contiguous blocks
2423 * @blen: the length of the block run
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002424 * @meta: 1 if the blocks represent metadata
David Teiglandb3b94fa2006-01-16 16:50:04 +00002425 *
2426 */
2427
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002428void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002429{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002430 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002431 struct gfs2_rgrpd *rgd;
2432
2433 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2434 if (!rgd)
2435 return;
Bob Peterson41db1ab2012-05-09 12:11:35 -04002436 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002437 rgd->rd_free += blen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002438 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002439 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002440 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002441 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002442
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002443 /* Directories keep their data in the metadata address space */
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002444 if (meta || ip->i_depth)
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002445 gfs2_meta_wipe(ip, bstart, blen);
Bob Peterson4c16c362011-02-23 16:11:33 -05002446}
David Teiglandb3b94fa2006-01-16 16:50:04 +00002447
Bob Peterson4c16c362011-02-23 16:11:33 -05002448/**
Bob Peterson4c16c362011-02-23 16:11:33 -05002449 * gfs2_free_meta - free a contiguous run of data block(s)
2450 * @ip: the inode these blocks are being freed from
2451 * @bstart: first block of a run of contiguous blocks
2452 * @blen: the length of the block run
2453 *
2454 */
2455
Steven Whitehousecd915492006-09-04 12:49:07 -04002456void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002457{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002458 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002459
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002460 __gfs2_free_blocks(ip, bstart, blen, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002461 gfs2_statfs_change(sdp, 0, +blen, 0);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002462 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002463}
2464
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002465void gfs2_unlink_di(struct inode *inode)
2466{
2467 struct gfs2_inode *ip = GFS2_I(inode);
2468 struct gfs2_sbd *sdp = GFS2_SB(inode);
2469 struct gfs2_rgrpd *rgd;
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002470 u64 blkno = ip->i_no_addr;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002471
2472 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2473 if (!rgd)
2474 return;
Bob Peterson41db1ab2012-05-09 12:11:35 -04002475 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002476 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002477 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002478 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2479 update_rgrp_lvb_unlinked(rgd, 1);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002480}
2481
Steven Whitehousecd915492006-09-04 12:49:07 -04002482static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002483{
2484 struct gfs2_sbd *sdp = rgd->rd_sbd;
2485 struct gfs2_rgrpd *tmp_rgd;
2486
2487 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2488 if (!tmp_rgd)
2489 return;
2490 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2491
Steven Whitehouse73f74942008-11-04 10:32:57 +00002492 if (!rgd->rd_dinodes)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002493 gfs2_consist_rgrpd(rgd);
Steven Whitehouse73f74942008-11-04 10:32:57 +00002494 rgd->rd_dinodes--;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002495 rgd->rd_free++;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002496
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002497 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002498 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002499 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2500 update_rgrp_lvb_unlinked(rgd, -1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002501
2502 gfs2_statfs_change(sdp, 0, +1, -1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002503}
2504
David Teiglandb3b94fa2006-01-16 16:50:04 +00002505
2506void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2507{
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002508 gfs2_free_uninit_di(rgd, ip->i_no_addr);
Bob Peterson41db1ab2012-05-09 12:11:35 -04002509 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002510 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002511 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002512}
2513
2514/**
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002515 * gfs2_check_blk_type - Check the type of a block
2516 * @sdp: The superblock
2517 * @no_addr: The block number to check
2518 * @type: The block type we are looking for
2519 *
2520 * Returns: 0 if the block type matches the expected type
2521 * -ESTALE if it doesn't match
2522 * or -ve errno if something went wrong while checking
2523 */
2524
2525int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2526{
2527 struct gfs2_rgrpd *rgd;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002528 struct gfs2_holder rgd_gh;
Bob Peterson58884c42012-03-05 10:19:35 -05002529 int error = -EINVAL;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002530
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002531 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002532 if (!rgd)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002533 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002534
2535 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2536 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002537 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002538
2539 if (gfs2_get_block_type(rgd, no_addr) != type)
2540 error = -ESTALE;
2541
2542 gfs2_glock_dq_uninit(&rgd_gh);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002543fail:
2544 return error;
2545}
2546
2547/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00002548 * gfs2_rlist_add - add a RG to a list of RGs
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002549 * @ip: the inode
David Teiglandb3b94fa2006-01-16 16:50:04 +00002550 * @rlist: the list of resource groups
2551 * @block: the block
2552 *
2553 * Figure out what RG a block belongs to and add that RG to the list
2554 *
2555 * FIXME: Don't use NOFAIL
2556 *
2557 */
2558
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002559void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
Steven Whitehousecd915492006-09-04 12:49:07 -04002560 u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002561{
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002562 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002563 struct gfs2_rgrpd *rgd;
2564 struct gfs2_rgrpd **tmp;
2565 unsigned int new_space;
2566 unsigned int x;
2567
2568 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2569 return;
2570
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002571 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2572 rgd = ip->i_rgd;
2573 else
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002574 rgd = gfs2_blk2rgrpd(sdp, block, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002575 if (!rgd) {
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002576 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002577 return;
2578 }
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002579 ip->i_rgd = rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002580
2581 for (x = 0; x < rlist->rl_rgrps; x++)
2582 if (rlist->rl_rgd[x] == rgd)
2583 return;
2584
2585 if (rlist->rl_rgrps == rlist->rl_space) {
2586 new_space = rlist->rl_space + 10;
2587
2588 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002589 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002590
2591 if (rlist->rl_rgd) {
2592 memcpy(tmp, rlist->rl_rgd,
2593 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2594 kfree(rlist->rl_rgd);
2595 }
2596
2597 rlist->rl_space = new_space;
2598 rlist->rl_rgd = tmp;
2599 }
2600
2601 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2602}
2603
2604/**
2605 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2606 * and initialize an array of glock holders for them
2607 * @rlist: the list of resource groups
2608 * @state: the lock state to acquire the RG lock in
David Teiglandb3b94fa2006-01-16 16:50:04 +00002609 *
2610 * FIXME: Don't use NOFAIL
2611 *
2612 */
2613
Bob Petersonfe6c9912008-01-28 11:13:02 -06002614void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002615{
2616 unsigned int x;
2617
2618 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002619 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002620 for (x = 0; x < rlist->rl_rgrps; x++)
2621 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
Bob Petersonfe6c9912008-01-28 11:13:02 -06002622 state, 0,
David Teiglandb3b94fa2006-01-16 16:50:04 +00002623 &rlist->rl_ghs[x]);
2624}
2625
2626/**
2627 * gfs2_rlist_free - free a resource group list
Fabian Frederick27ff6a02014-07-02 22:05:27 +02002628 * @rlist: the list of resource groups
David Teiglandb3b94fa2006-01-16 16:50:04 +00002629 *
2630 */
2631
2632void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2633{
2634 unsigned int x;
2635
2636 kfree(rlist->rl_rgd);
2637
2638 if (rlist->rl_ghs) {
2639 for (x = 0; x < rlist->rl_rgrps; x++)
2640 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2641 kfree(rlist->rl_ghs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002642 rlist->rl_ghs = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002643 }
2644}
2645