blob: 8b580e535ad4bdb0310f2fa6b11d48e7de6e15c9 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Petersonfe6c9912008-01-28 11:13:02 -06003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
Joe Perchesd77d1b52014-03-06 12:10:45 -080010#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
David Teiglandb3b94fa2006-01-16 16:50:04 +000012#include <linux/slab.h>
13#include <linux/spinlock.h>
14#include <linux/completion.h>
15#include <linux/buffer_head.h>
Steven Whitehousef42faf42006-01-30 18:34:10 +000016#include <linux/fs.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050017#include <linux/gfs2_ondisk.h>
Bob Peterson1f466a42008-03-10 18:17:47 -050018#include <linux/prefetch.h>
Steven Whitehousef15ab562009-02-09 09:25:01 +000019#include <linux/blkdev.h>
Bob Peterson7c9ca622011-08-31 09:53:19 +010020#include <linux/rbtree.h>
Steven Whitehouse9dbe9612012-10-31 10:37:10 +000021#include <linux/random.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000022
23#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050024#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000025#include "glock.h"
26#include "glops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000027#include "lops.h"
28#include "meta_io.h"
29#include "quota.h"
30#include "rgrp.h"
31#include "super.h"
32#include "trans.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050033#include "util.h"
Benjamin Marzinski172e0452007-03-23 14:51:56 -060034#include "log.h"
Steven Whitehousec8cdf472007-06-08 10:05:33 +010035#include "inode.h"
Steven Whitehouse63997772009-06-12 08:49:20 +010036#include "trace_gfs2.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000037
Steven Whitehouse2c1e52a2006-09-05 15:41:57 -040038#define BFITNOENT ((u32)~0)
Bob Peterson6760bdc2007-07-24 14:09:32 -050039#define NO_BLOCK ((u64)~0)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040040
Bob Peterson1f466a42008-03-10 18:17:47 -050041#if BITS_PER_LONG == 32
42#define LBITMASK (0x55555555UL)
43#define LBITSKIP55 (0x55555555UL)
44#define LBITSKIP00 (0x00000000UL)
45#else
46#define LBITMASK (0x5555555555555555UL)
47#define LBITSKIP55 (0x5555555555555555UL)
48#define LBITSKIP00 (0x0000000000000000UL)
49#endif
50
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040051/*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040054 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040060 */
61
Bob Peterson5ce13432013-11-06 10:55:52 -050062struct gfs2_extent {
63 struct gfs2_rbm rbm;
64 u32 len;
65};
66
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040067static const char valid_change[16] = {
68 /* current */
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040069 /* n */ 0, 1, 1, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040070 /* e */ 1, 0, 0, 0,
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -040071 /* w */ 0, 0, 0, 1,
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040072 1, 0, 0, 0
73};
74
Bob Peterson5ce13432013-11-06 10:55:52 -050075static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
Bob Peterson8381e602016-05-02 09:42:49 -050076 const struct gfs2_inode *ip, bool nowrap);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +010077
78
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040079/**
80 * gfs2_setbit - Set a bit in the bitmaps
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010081 * @rbm: The position of the bit to set
82 * @do_clone: Also set the clone bitmap, if it exists
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040083 * @new_state: the new state of the block
84 *
85 */
86
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010087static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
Bob Peterson06344b92012-04-26 12:44:35 -040088 unsigned char new_state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040089{
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000090 unsigned char *byte1, *byte2, *end, cur_state;
Bob Petersone579ed42013-09-17 13:12:15 -040091 struct gfs2_bitmap *bi = rbm_bi(rbm);
92 unsigned int buflen = bi->bi_len;
Steven Whitehouse3e6339d2012-08-13 11:37:51 +010093 const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040094
Bob Petersone579ed42013-09-17 13:12:15 -040095 byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
96 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040097
Steven Whitehouseb45e41d2008-02-06 10:11:15 +000098 BUG_ON(byte1 >= end);
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -040099
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000100 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400101
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000102 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
Joe Perchesd77d1b52014-03-06 12:10:45 -0800103 pr_warn("buf_blk = 0x%x old_state=%d, new_state=%d\n",
104 rbm->offset, cur_state, new_state);
105 pr_warn("rgrp=0x%llx bi_start=0x%x\n",
106 (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
107 pr_warn("bi_offset=0x%x bi_len=0x%x\n",
108 bi->bi_offset, bi->bi_len);
Bob Peterson95c8e172011-03-22 10:49:12 -0400109 dump_stack();
Steven Whitehouse3e6339d2012-08-13 11:37:51 +0100110 gfs2_consist_rgrpd(rbm->rgd);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000111 return;
112 }
113 *byte1 ^= (cur_state ^ new_state) << bit;
114
Bob Petersone579ed42013-09-17 13:12:15 -0400115 if (do_clone && bi->bi_clone) {
116 byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
Steven Whitehouseb45e41d2008-02-06 10:11:15 +0000117 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
118 *byte2 ^= (cur_state ^ new_state) << bit;
119 }
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400120}
121
122/**
123 * gfs2_testbit - test a bit in the bitmaps
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100124 * @rbm: The bit to test
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400125 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100126 * Returns: The two bit block state of the requested bit
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400127 */
128
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100129static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400130{
Bob Petersone579ed42013-09-17 13:12:15 -0400131 struct gfs2_bitmap *bi = rbm_bi(rbm);
132 const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100133 const u8 *byte;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400134 unsigned int bit;
135
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100136 byte = buffer + (rbm->offset / GFS2_NBBY);
137 bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400138
Steven Whitehousec04a2ef2012-08-13 11:14:57 +0100139 return (*byte >> bit) & GFS2_BIT_MASK;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400140}
141
142/**
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000143 * gfs2_bit_search
144 * @ptr: Pointer to bitmap data
145 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
146 * @state: The state we are searching for
147 *
148 * We xor the bitmap data with a patter which is the bitwise opposite
149 * of what we are looking for, this gives rise to a pattern of ones
150 * wherever there is a match. Since we have two bits per entry, we
151 * take this pattern, shift it down by one place and then and it with
152 * the original. All the even bit positions (0,2,4, etc) then represent
153 * successful matches, so we mask with 0x55555..... to remove the unwanted
154 * odd bit positions.
155 *
156 * This allows searching of a whole u64 at once (32 blocks) with a
157 * single test (on 64 bit arches).
158 */
159
160static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
161{
162 u64 tmp;
163 static const u64 search[] = {
Hannes Eder075ac442009-02-21 02:11:42 +0100164 [0] = 0xffffffffffffffffULL,
165 [1] = 0xaaaaaaaaaaaaaaaaULL,
166 [2] = 0x5555555555555555ULL,
167 [3] = 0x0000000000000000ULL,
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000168 };
169 tmp = le64_to_cpu(*ptr) ^ search[state];
170 tmp &= (tmp >> 1);
171 tmp &= mask;
172 return tmp;
173}
174
175/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400176 * rs_cmp - multi-block reservation range compare
177 * @blk: absolute file system block number of the new reservation
178 * @len: number of blocks in the new reservation
179 * @rs: existing reservation to compare against
180 *
181 * returns: 1 if the block range is beyond the reach of the reservation
182 * -1 if the block range is before the start of the reservation
183 * 0 if the block range overlaps with the reservation
184 */
185static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
186{
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100187 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400188
189 if (blk >= startblk + rs->rs_free)
190 return 1;
191 if (blk + len - 1 < startblk)
192 return -1;
193 return 0;
194}
195
196/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400197 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
198 * a block in a given allocation state.
Bob Peterson886b1412012-04-11 13:03:52 -0400199 * @buf: the buffer that holds the bitmaps
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000200 * @len: the length (in bytes) of the buffer
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400201 * @goal: start search at this block's bit-pair (within @buffer)
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000202 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400203 *
204 * Scope of @goal and returned block number is only within this bitmap buffer,
205 * not entire rgrp or filesystem. @buffer will be offset from the actual
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000206 * beginning of a bitmap block buffer, skipping any header structures, but
207 * headers are always a multiple of 64 bits long so that the buffer is
208 * always aligned to a 64 bit boundary.
209 *
210 * The size of the buffer is in bytes, but is it assumed that it is
Anand Gadiyarfd589a82009-07-16 17:13:03 +0200211 * always ok to read a complete multiple of 64 bits at the end
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000212 * of the block in case the end is no aligned to a natural boundary.
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400213 *
214 * Return: the block number (bitmap buffer scope) that was found
215 */
216
Hannes Eder02ab1722009-02-21 02:12:05 +0100217static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
218 u32 goal, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400219{
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000220 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
221 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
222 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
223 u64 tmp;
Hannes Eder075ac442009-02-21 02:11:42 +0100224 u64 mask = 0x5555555555555555ULL;
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000225 u32 bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400226
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000227 /* Mask off bits we don't care about at the start of the search */
228 mask <<= spoint;
229 tmp = gfs2_bit_search(ptr, mask, state);
230 ptr++;
231 while(tmp == 0 && ptr < end) {
Hannes Eder075ac442009-02-21 02:11:42 +0100232 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000233 ptr++;
Bob Peterson1f466a42008-03-10 18:17:47 -0500234 }
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000235 /* Mask off any bits which are more than len bytes from the start */
236 if (ptr == end && (len & (sizeof(u64) - 1)))
237 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
238 /* Didn't find anything, so return */
239 if (tmp == 0)
240 return BFITNOENT;
241 ptr--;
Steven Whitehoused8bd5042009-04-23 08:54:02 +0100242 bit = __ffs64(tmp);
Steven Whitehouse223b2b82009-02-17 14:13:35 +0000243 bit /= 2; /* two bits per entry in the bitmap */
244 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400245}
246
247/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100248 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
249 * @rbm: The rbm with rgd already set correctly
250 * @block: The block number (filesystem relative)
251 *
252 * This sets the bi and offset members of an rbm based on a
253 * resource group and a filesystem relative block number. The
254 * resource group must be set in the rbm on entry, the bi and
255 * offset members will be set by this function.
256 *
257 * Returns: 0 on success, or an error code
258 */
259
260static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
261{
262 u64 rblock = block - rbm->rgd->rd_data0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100263
264 if (WARN_ON_ONCE(rblock > UINT_MAX))
265 return -EINVAL;
266 if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
267 return -E2BIG;
268
Bob Petersone579ed42013-09-17 13:12:15 -0400269 rbm->bii = 0;
Bob Petersona68a0a32012-10-19 08:32:51 -0400270 rbm->offset = (u32)(rblock);
271 /* Check if the block is within the first block */
Bob Petersone579ed42013-09-17 13:12:15 -0400272 if (rbm->offset < rbm_bi(rbm)->bi_blocks)
Bob Petersona68a0a32012-10-19 08:32:51 -0400273 return 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100274
Bob Petersona68a0a32012-10-19 08:32:51 -0400275 /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
276 rbm->offset += (sizeof(struct gfs2_rgrp) -
277 sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
Bob Petersone579ed42013-09-17 13:12:15 -0400278 rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
279 rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100280 return 0;
281}
282
283/**
Bob Peterson149ed7f2013-09-17 13:14:35 -0400284 * gfs2_rbm_incr - increment an rbm structure
285 * @rbm: The rbm with rgd already set correctly
286 *
287 * This function takes an existing rbm structure and increments it to the next
288 * viable block offset.
289 *
290 * Returns: If incrementing the offset would cause the rbm to go past the
291 * end of the rgrp, true is returned, otherwise false.
292 *
293 */
294
295static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
296{
297 if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
298 rbm->offset++;
299 return false;
300 }
301 if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
302 return true;
303
304 rbm->offset = 0;
305 rbm->bii++;
306 return false;
307}
308
309/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100310 * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
311 * @rbm: Position to search (value/result)
312 * @n_unaligned: Number of unaligned blocks to check
313 * @len: Decremented for each block found (terminate on zero)
314 *
315 * Returns: true if a non-free block is encountered
316 */
317
318static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
319{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100320 u32 n;
321 u8 res;
322
323 for (n = 0; n < n_unaligned; n++) {
324 res = gfs2_testbit(rbm);
325 if (res != GFS2_BLKST_FREE)
326 return true;
327 (*len)--;
328 if (*len == 0)
329 return true;
Bob Peterson149ed7f2013-09-17 13:14:35 -0400330 if (gfs2_rbm_incr(rbm))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100331 return true;
332 }
333
334 return false;
335}
336
337/**
338 * gfs2_free_extlen - Return extent length of free blocks
Fabian Frederick27ff6a02014-07-02 22:05:27 +0200339 * @rrbm: Starting position
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100340 * @len: Max length to check
341 *
342 * Starting at the block specified by the rbm, see how many free blocks
343 * there are, not reading more than len blocks ahead. This can be done
344 * using memchr_inv when the blocks are byte aligned, but has to be done
345 * on a block by block basis in case of unaligned blocks. Also this
346 * function can cope with bitmap boundaries (although it must stop on
347 * a resource group boundary)
348 *
349 * Returns: Number of free blocks in the extent
350 */
351
352static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
353{
354 struct gfs2_rbm rbm = *rrbm;
355 u32 n_unaligned = rbm.offset & 3;
356 u32 size = len;
357 u32 bytes;
358 u32 chunk_size;
359 u8 *ptr, *start, *end;
360 u64 block;
Bob Petersone579ed42013-09-17 13:12:15 -0400361 struct gfs2_bitmap *bi;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100362
363 if (n_unaligned &&
364 gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
365 goto out;
366
Bob Peterson37015302012-09-12 09:40:31 -0400367 n_unaligned = len & 3;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100368 /* Start is now byte aligned */
369 while (len > 3) {
Bob Petersone579ed42013-09-17 13:12:15 -0400370 bi = rbm_bi(&rbm);
371 start = bi->bi_bh->b_data;
372 if (bi->bi_clone)
373 start = bi->bi_clone;
374 end = start + bi->bi_bh->b_size;
375 start += bi->bi_offset;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100376 BUG_ON(rbm.offset & 3);
377 start += (rbm.offset / GFS2_NBBY);
378 bytes = min_t(u32, len / GFS2_NBBY, (end - start));
379 ptr = memchr_inv(start, 0, bytes);
380 chunk_size = ((ptr == NULL) ? bytes : (ptr - start));
381 chunk_size *= GFS2_NBBY;
382 BUG_ON(len < chunk_size);
383 len -= chunk_size;
384 block = gfs2_rbm_to_block(&rbm);
Bob Peterson15bd50a2012-12-20 13:21:07 -0500385 if (gfs2_rbm_from_block(&rbm, block + chunk_size)) {
386 n_unaligned = 0;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100387 break;
Bob Peterson15bd50a2012-12-20 13:21:07 -0500388 }
389 if (ptr) {
390 n_unaligned = 3;
391 break;
392 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +0100393 n_unaligned = len & 3;
394 }
395
396 /* Deal with any bits left over at the end */
397 if (n_unaligned)
398 gfs2_unaligned_extlen(&rbm, n_unaligned, &len);
399out:
400 return size - len;
401}
402
403/**
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400404 * gfs2_bitcount - count the number of bits in a certain state
Bob Peterson886b1412012-04-11 13:03:52 -0400405 * @rgd: the resource group descriptor
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400406 * @buffer: the buffer that holds the bitmaps
407 * @buflen: the length (in bytes) of the buffer
408 * @state: the state of the block we're looking for
409 *
410 * Returns: The number of bits
411 */
412
Steven Whitehouse110acf32008-01-29 13:30:20 +0000413static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
414 unsigned int buflen, u8 state)
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400415{
Steven Whitehouse110acf32008-01-29 13:30:20 +0000416 const u8 *byte = buffer;
417 const u8 *end = buffer + buflen;
418 const u8 state1 = state << 2;
419 const u8 state2 = state << 4;
420 const u8 state3 = state << 6;
Steven Whitehousecd915492006-09-04 12:49:07 -0400421 u32 count = 0;
Steven Whitehouse88c8ab1f2006-05-18 13:52:39 -0400422
423 for (; byte < end; byte++) {
424 if (((*byte) & 0x03) == state)
425 count++;
426 if (((*byte) & 0x0C) == state1)
427 count++;
428 if (((*byte) & 0x30) == state2)
429 count++;
430 if (((*byte) & 0xC0) == state3)
431 count++;
432 }
433
434 return count;
435}
436
David Teiglandb3b94fa2006-01-16 16:50:04 +0000437/**
438 * gfs2_rgrp_verify - Verify that a resource group is consistent
David Teiglandb3b94fa2006-01-16 16:50:04 +0000439 * @rgd: the rgrp
440 *
441 */
442
443void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
444{
445 struct gfs2_sbd *sdp = rgd->rd_sbd;
446 struct gfs2_bitmap *bi = NULL;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100447 u32 length = rgd->rd_length;
Steven Whitehousecd915492006-09-04 12:49:07 -0400448 u32 count[4], tmp;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000449 int buf, x;
450
Steven Whitehousecd915492006-09-04 12:49:07 -0400451 memset(count, 0, 4 * sizeof(u32));
David Teiglandb3b94fa2006-01-16 16:50:04 +0000452
453 /* Count # blocks in each of 4 possible allocation states */
454 for (buf = 0; buf < length; buf++) {
455 bi = rgd->rd_bits + buf;
456 for (x = 0; x < 4; x++)
457 count[x] += gfs2_bitcount(rgd,
458 bi->bi_bh->b_data +
459 bi->bi_offset,
460 bi->bi_len, x);
461 }
462
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000463 if (count[0] != rgd->rd_free) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000464 if (gfs2_consist_rgrpd(rgd))
465 fs_err(sdp, "free data mismatch: %u != %u\n",
Steven Whitehousecfc8b542008-11-04 10:25:13 +0000466 count[0], rgd->rd_free);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000467 return;
468 }
469
Steven Whitehouse73f74942008-11-04 10:32:57 +0000470 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500471 if (count[1] != tmp) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000472 if (gfs2_consist_rgrpd(rgd))
473 fs_err(sdp, "used data mismatch: %u != %u\n",
474 count[1], tmp);
475 return;
476 }
477
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500478 if (count[2] + count[3] != rgd->rd_dinodes) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000479 if (gfs2_consist_rgrpd(rgd))
480 fs_err(sdp, "used metadata mismatch: %u != %u\n",
Benjamin Marzinski6b946172009-07-10 18:13:26 -0500481 count[2] + count[3], rgd->rd_dinodes);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000482 return;
483 }
484}
485
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100486static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000487{
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100488 u64 first = rgd->rd_data0;
489 u64 last = first + rgd->rd_data;
Steven Whitehouse16910422006-09-05 11:15:45 -0400490 return first <= block && block < last;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000491}
492
493/**
494 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
495 * @sdp: The GFS2 superblock
Bob Peterson886b1412012-04-11 13:03:52 -0400496 * @blk: The data block number
497 * @exact: True if this needs to be an exact match
David Teiglandb3b94fa2006-01-16 16:50:04 +0000498 *
499 * Returns: The resource group, or NULL if not found
500 */
501
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000502struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000503{
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000504 struct rb_node *n, *next;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100505 struct gfs2_rgrpd *cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000506
507 spin_lock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000508 n = sdp->sd_rindex_tree.rb_node;
509 while (n) {
510 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
511 next = NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100512 if (blk < cur->rd_addr)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000513 next = n->rb_left;
Steven Whitehousef75bbfb2011-09-08 10:21:13 +0100514 else if (blk >= cur->rd_data0 + cur->rd_data)
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000515 next = n->rb_right;
516 if (next == NULL) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000517 spin_unlock(&sdp->sd_rindex_spin);
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000518 if (exact) {
519 if (blk < cur->rd_addr)
520 return NULL;
521 if (blk >= cur->rd_data0 + cur->rd_data)
522 return NULL;
523 }
Bob Peterson7c9ca622011-08-31 09:53:19 +0100524 return cur;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000525 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +0000526 n = next;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000527 }
David Teiglandb3b94fa2006-01-16 16:50:04 +0000528 spin_unlock(&sdp->sd_rindex_spin);
529
530 return NULL;
531}
532
533/**
534 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
535 * @sdp: The GFS2 superblock
536 *
537 * Returns: The first rgrp in the filesystem
538 */
539
540struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
541{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100542 const struct rb_node *n;
543 struct gfs2_rgrpd *rgd;
544
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100545 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100546 n = rb_first(&sdp->sd_rindex_tree);
547 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100548 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100549
550 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000551}
552
553/**
554 * gfs2_rgrpd_get_next - get the next RG
Bob Peterson886b1412012-04-11 13:03:52 -0400555 * @rgd: the resource group descriptor
David Teiglandb3b94fa2006-01-16 16:50:04 +0000556 *
557 * Returns: The next rgrp
558 */
559
560struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
561{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100562 struct gfs2_sbd *sdp = rgd->rd_sbd;
563 const struct rb_node *n;
564
565 spin_lock(&sdp->sd_rindex_spin);
566 n = rb_next(&rgd->rd_node);
567 if (n == NULL)
568 n = rb_first(&sdp->sd_rindex_tree);
569
570 if (unlikely(&rgd->rd_node == n)) {
571 spin_unlock(&sdp->sd_rindex_spin);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000572 return NULL;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100573 }
574 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
575 spin_unlock(&sdp->sd_rindex_spin);
576 return rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000577}
578
Abhi Das00a158b2014-09-18 21:40:28 -0500579void check_and_update_goal(struct gfs2_inode *ip)
580{
581 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
582 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL)
583 ip->i_goal = ip->i_no_addr;
584}
585
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100586void gfs2_free_clones(struct gfs2_rgrpd *rgd)
587{
588 int x;
589
590 for (x = 0; x < rgd->rd_length; x++) {
591 struct gfs2_bitmap *bi = rgd->rd_bits + x;
592 kfree(bi->bi_clone);
593 bi->bi_clone = NULL;
594 }
595}
596
Bob Peterson0a305e42012-06-06 11:17:59 +0100597/**
Bob Petersonb54e9a02015-10-26 10:40:28 -0500598 * gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
599 * plus a quota allocations data structure, if necessary
Bob Peterson0a305e42012-06-06 11:17:59 +0100600 * @ip: the inode for this reservation
601 */
Bob Petersonb54e9a02015-10-26 10:40:28 -0500602int gfs2_rsqa_alloc(struct gfs2_inode *ip)
Bob Peterson0a305e42012-06-06 11:17:59 +0100603{
Bob Petersona097dc7e2015-07-16 08:28:04 -0500604 return gfs2_qa_alloc(ip);
Bob Peterson0a305e42012-06-06 11:17:59 +0100605}
606
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100607static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400608{
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100609 gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
610 (unsigned long long)rs->rs_inum,
611 (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100612 rs->rs_rbm.offset, rs->rs_free);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400613}
614
Bob Peterson0a305e42012-06-06 11:17:59 +0100615/**
Bob Peterson8e2e0042012-07-19 08:12:40 -0400616 * __rs_deltree - remove a multi-block reservation from the rgd tree
617 * @rs: The reservation to remove
618 *
619 */
Bob Peterson20095212013-03-13 10:26:38 -0400620static void __rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400621{
622 struct gfs2_rgrpd *rgd;
623
624 if (!gfs2_rs_active(rs))
625 return;
626
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100627 rgd = rs->rs_rbm.rgd;
Steven Whitehouse9e733d32012-08-23 15:37:59 +0100628 trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100629 rb_erase(&rs->rs_node, &rgd->rd_rstree);
Michel Lespinasse24d634e2012-08-05 22:04:08 -0700630 RB_CLEAR_NODE(&rs->rs_node);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400631
632 if (rs->rs_free) {
Bob Petersone579ed42013-09-17 13:12:15 -0400633 struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
634
Bob Peterson20095212013-03-13 10:26:38 -0400635 /* return reserved blocks to the rgrp */
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100636 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
637 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
Bob Peterson5ea50502013-11-25 11:16:25 +0000638 /* The rgrp extent failure point is likely not to increase;
639 it will only do so if the freed blocks are somehow
640 contiguous with a span of free blocks that follows. Still,
641 it will force the number to be recalculated later. */
642 rgd->rd_extfail_pt += rs->rs_free;
Bob Peterson8e2e0042012-07-19 08:12:40 -0400643 rs->rs_free = 0;
Bob Petersone579ed42013-09-17 13:12:15 -0400644 clear_bit(GBF_FULL, &bi->bi_flags);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400645 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400646}
647
648/**
649 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
650 * @rs: The reservation to remove
651 *
652 */
Bob Peterson20095212013-03-13 10:26:38 -0400653void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
Bob Peterson8e2e0042012-07-19 08:12:40 -0400654{
655 struct gfs2_rgrpd *rgd;
656
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100657 rgd = rs->rs_rbm.rgd;
658 if (rgd) {
659 spin_lock(&rgd->rd_rsspin);
Bob Peterson20095212013-03-13 10:26:38 -0400660 __rs_deltree(rs);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +0100661 spin_unlock(&rgd->rd_rsspin);
662 }
Bob Peterson8e2e0042012-07-19 08:12:40 -0400663}
664
665/**
Bob Petersonb54e9a02015-10-26 10:40:28 -0500666 * gfs2_rsqa_delete - delete a multi-block reservation and quota allocation
Bob Peterson0a305e42012-06-06 11:17:59 +0100667 * @ip: The inode for this reservation
Steven Whitehouseaf5c2692013-09-27 12:49:33 +0100668 * @wcount: The inode's write count, or NULL
Bob Peterson0a305e42012-06-06 11:17:59 +0100669 *
670 */
Bob Petersonb54e9a02015-10-26 10:40:28 -0500671void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
Bob Peterson0a305e42012-06-06 11:17:59 +0100672{
673 down_write(&ip->i_rw_mutex);
Bob Petersona097dc7e2015-07-16 08:28:04 -0500674 if ((wcount == NULL) || (atomic_read(wcount) <= 1)) {
675 gfs2_rs_deltree(&ip->i_res);
676 BUG_ON(ip->i_res.rs_free);
Bob Peterson0a305e42012-06-06 11:17:59 +0100677 }
678 up_write(&ip->i_rw_mutex);
Bob Petersona097dc7e2015-07-16 08:28:04 -0500679 gfs2_qa_delete(ip, wcount);
Bob Peterson0a305e42012-06-06 11:17:59 +0100680}
681
Bob Peterson8e2e0042012-07-19 08:12:40 -0400682/**
683 * return_all_reservations - return all reserved blocks back to the rgrp.
684 * @rgd: the rgrp that needs its space back
685 *
686 * We previously reserved a bunch of blocks for allocation. Now we need to
687 * give them back. This leave the reservation structures in tact, but removes
688 * all of their corresponding "no-fly zones".
689 */
690static void return_all_reservations(struct gfs2_rgrpd *rgd)
691{
692 struct rb_node *n;
693 struct gfs2_blkreserv *rs;
694
695 spin_lock(&rgd->rd_rsspin);
696 while ((n = rb_first(&rgd->rd_rstree))) {
697 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Bob Peterson20095212013-03-13 10:26:38 -0400698 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400699 }
700 spin_unlock(&rgd->rd_rsspin);
701}
702
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100703void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000704{
Bob Peterson7c9ca622011-08-31 09:53:19 +0100705 struct rb_node *n;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000706 struct gfs2_rgrpd *rgd;
707 struct gfs2_glock *gl;
708
Bob Peterson7c9ca622011-08-31 09:53:19 +0100709 while ((n = rb_first(&sdp->sd_rindex_tree))) {
710 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000711 gl = rgd->rd_gl;
712
Bob Peterson7c9ca622011-08-31 09:53:19 +0100713 rb_erase(n, &sdp->sd_rindex_tree);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000714
715 if (gl) {
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500716 spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse5c676f62006-02-27 17:23:27 -0500717 gl->gl_object = NULL;
Andreas Gruenbacherf3dd1642015-10-29 10:58:09 -0500718 spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse29687a22011-03-30 16:33:25 +0100719 gfs2_glock_add_to_lru(gl);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000720 gfs2_glock_put(gl);
721 }
722
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100723 gfs2_free_clones(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000724 kfree(rgd->rd_bits);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400725 return_all_reservations(rgd);
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600726 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000727 }
728}
729
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100730static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
731{
Joe Perchesd77d1b52014-03-06 12:10:45 -0800732 pr_info("ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
733 pr_info("ri_length = %u\n", rgd->rd_length);
734 pr_info("ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
735 pr_info("ri_data = %u\n", rgd->rd_data);
736 pr_info("ri_bitbytes = %u\n", rgd->rd_bitbytes);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100737}
738
David Teiglandb3b94fa2006-01-16 16:50:04 +0000739/**
740 * gfs2_compute_bitstructs - Compute the bitmap sizes
741 * @rgd: The resource group descriptor
742 *
743 * Calculates bitmap descriptors, one for each block that contains bitmap data
744 *
745 * Returns: errno
746 */
747
748static int compute_bitstructs(struct gfs2_rgrpd *rgd)
749{
750 struct gfs2_sbd *sdp = rgd->rd_sbd;
751 struct gfs2_bitmap *bi;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100752 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
Steven Whitehousecd915492006-09-04 12:49:07 -0400753 u32 bytes_left, bytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000754 int x;
755
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400756 if (!length)
757 return -EINVAL;
758
Steven Whitehousedd894be2006-07-27 14:29:00 -0400759 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000760 if (!rgd->rd_bits)
761 return -ENOMEM;
762
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100763 bytes_left = rgd->rd_bitbytes;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000764
765 for (x = 0; x < length; x++) {
766 bi = rgd->rd_bits + x;
767
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +0100768 bi->bi_flags = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000769 /* small rgrp; bitmap stored completely in header block */
770 if (length == 1) {
771 bytes = bytes_left;
772 bi->bi_offset = sizeof(struct gfs2_rgrp);
773 bi->bi_start = 0;
774 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500775 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000776 /* header block */
777 } else if (x == 0) {
778 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
779 bi->bi_offset = sizeof(struct gfs2_rgrp);
780 bi->bi_start = 0;
781 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500782 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000783 /* last block */
784 } else if (x + 1 == length) {
785 bytes = bytes_left;
786 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100787 bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000788 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500789 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000790 /* other blocks */
791 } else {
Steven Whitehouse568f4c92006-02-27 12:00:42 -0500792 bytes = sdp->sd_sb.sb_bsize -
793 sizeof(struct gfs2_meta_header);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000794 bi->bi_offset = sizeof(struct gfs2_meta_header);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100795 bi->bi_start = rgd->rd_bitbytes - bytes_left;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000796 bi->bi_len = bytes;
Bob Peterson7e230f52013-09-11 13:44:02 -0500797 bi->bi_blocks = bytes * GFS2_NBBY;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000798 }
799
800 bytes_left -= bytes;
801 }
802
803 if (bytes_left) {
804 gfs2_consist_rgrpd(rgd);
805 return -EIO;
806 }
807 bi = rgd->rd_bits + (length - 1);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100808 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
David Teiglandb3b94fa2006-01-16 16:50:04 +0000809 if (gfs2_consist_rgrpd(rgd)) {
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100810 gfs2_rindex_print(rgd);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000811 fs_err(sdp, "start=%u len=%u offset=%u\n",
812 bi->bi_start, bi->bi_len, bi->bi_offset);
813 }
814 return -EIO;
815 }
816
817 return 0;
818}
819
820/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500821 * gfs2_ri_total - Total up the file system space, according to the rindex.
Bob Peterson886b1412012-04-11 13:03:52 -0400822 * @sdp: the filesystem
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500823 *
824 */
825u64 gfs2_ri_total(struct gfs2_sbd *sdp)
826{
827 u64 total_data = 0;
828 struct inode *inode = sdp->sd_rindex;
829 struct gfs2_inode *ip = GFS2_I(inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500830 char buf[sizeof(struct gfs2_rindex)];
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500831 int error, rgrps;
832
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500833 for (rgrps = 0;; rgrps++) {
834 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
835
Bob Petersonbcd72782010-12-07 13:58:56 -0500836 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500837 break;
Andrew Price43066292012-04-16 16:40:55 +0100838 error = gfs2_internal_read(ip, buf, &pos,
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500839 sizeof(struct gfs2_rindex));
840 if (error != sizeof(struct gfs2_rindex))
841 break;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100842 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500843 }
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500844 return total_data;
845}
846
Bob Peterson6aad1c32012-03-05 09:20:59 -0500847static int rgd_insert(struct gfs2_rgrpd *rgd)
Bob Peterson7c9ca622011-08-31 09:53:19 +0100848{
849 struct gfs2_sbd *sdp = rgd->rd_sbd;
850 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
851
852 /* Figure out where to put new node */
853 while (*newn) {
854 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
855 rd_node);
856
857 parent = *newn;
858 if (rgd->rd_addr < cur->rd_addr)
859 newn = &((*newn)->rb_left);
860 else if (rgd->rd_addr > cur->rd_addr)
861 newn = &((*newn)->rb_right);
862 else
Bob Peterson6aad1c32012-03-05 09:20:59 -0500863 return -EEXIST;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100864 }
865
866 rb_link_node(&rgd->rd_node, parent, newn);
867 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500868 sdp->sd_rgrps++;
869 return 0;
Bob Peterson7c9ca622011-08-31 09:53:19 +0100870}
871
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500872/**
Robert Peterson6c532672007-05-10 16:54:38 -0500873 * read_rindex_entry - Pull in a new resource index entry from the disk
Andrew Price43066292012-04-16 16:40:55 +0100874 * @ip: Pointer to the rindex inode
David Teiglandb3b94fa2006-01-16 16:50:04 +0000875 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100876 * Returns: 0 on success, > 0 on EOF, error code otherwise
Robert Peterson6c532672007-05-10 16:54:38 -0500877 */
878
Andrew Price43066292012-04-16 16:40:55 +0100879static int read_rindex_entry(struct gfs2_inode *ip)
Robert Peterson6c532672007-05-10 16:54:38 -0500880{
881 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehouse7005c3e2013-12-06 10:16:14 +0000882 const unsigned bsize = sdp->sd_sb.sb_bsize;
Robert Peterson6c532672007-05-10 16:54:38 -0500883 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100884 struct gfs2_rindex buf;
Robert Peterson6c532672007-05-10 16:54:38 -0500885 int error;
886 struct gfs2_rgrpd *rgd;
887
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100888 if (pos >= i_size_read(&ip->i_inode))
889 return 1;
890
Andrew Price43066292012-04-16 16:40:55 +0100891 error = gfs2_internal_read(ip, (char *)&buf, &pos,
Robert Peterson6c532672007-05-10 16:54:38 -0500892 sizeof(struct gfs2_rindex));
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100893
894 if (error != sizeof(struct gfs2_rindex))
895 return (error == 0) ? 1 : error;
Robert Peterson6c532672007-05-10 16:54:38 -0500896
Bob Peterson6bdd9be2008-01-28 17:20:26 -0600897 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
Robert Peterson6c532672007-05-10 16:54:38 -0500898 error = -ENOMEM;
899 if (!rgd)
900 return error;
901
Robert Peterson6c532672007-05-10 16:54:38 -0500902 rgd->rd_sbd = sdp;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100903 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
904 rgd->rd_length = be32_to_cpu(buf.ri_length);
905 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
906 rgd->rd_data = be32_to_cpu(buf.ri_data);
907 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
Bob Peterson8e2e0042012-07-19 08:12:40 -0400908 spin_lock_init(&rgd->rd_rsspin);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100909
Robert Peterson6c532672007-05-10 16:54:38 -0500910 error = compute_bitstructs(rgd);
911 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100912 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500913
Steven Whitehousebb8d8a62007-06-01 14:11:58 +0100914 error = gfs2_glock_get(sdp, rgd->rd_addr,
Robert Peterson6c532672007-05-10 16:54:38 -0500915 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
916 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100917 goto fail;
Robert Peterson6c532672007-05-10 16:54:38 -0500918
919 rgd->rd_gl->gl_object = rgd;
Bob Peterson31dddd92015-10-28 09:05:31 -0500920 rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK;
921 rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr +
922 rgd->rd_length) * bsize) - 1;
David Teigland4e2f8842012-11-14 13:47:37 -0500923 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
Bob Peterson0e27c182014-10-29 08:02:28 -0500924 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
Bob Peterson7c9ca622011-08-31 09:53:19 +0100925 if (rgd->rd_data > sdp->sd_max_rg_data)
926 sdp->sd_max_rg_data = rgd->rd_data;
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100927 spin_lock(&sdp->sd_rindex_spin);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500928 error = rgd_insert(rgd);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100929 spin_unlock(&sdp->sd_rindex_spin);
Bob Peterson6aad1c32012-03-05 09:20:59 -0500930 if (!error)
931 return 0;
932
933 error = 0; /* someone else read in the rgrp; free it and ignore it */
Bob Petersonc1ac5392012-03-22 08:58:30 -0400934 gfs2_glock_put(rgd->rd_gl);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100935
936fail:
937 kfree(rgd->rd_bits);
938 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
Robert Peterson6c532672007-05-10 16:54:38 -0500939 return error;
940}
941
942/**
Bob Peterson0e27c182014-10-29 08:02:28 -0500943 * set_rgrp_preferences - Run all the rgrps, selecting some we prefer to use
944 * @sdp: the GFS2 superblock
945 *
946 * The purpose of this function is to select a subset of the resource groups
947 * and mark them as PREFERRED. We do it in such a way that each node prefers
948 * to use a unique set of rgrps to minimize glock contention.
949 */
950static void set_rgrp_preferences(struct gfs2_sbd *sdp)
951{
952 struct gfs2_rgrpd *rgd, *first;
953 int i;
954
955 /* Skip an initial number of rgrps, based on this node's journal ID.
956 That should start each node out on its own set. */
957 rgd = gfs2_rgrpd_get_first(sdp);
958 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++)
959 rgd = gfs2_rgrpd_get_next(rgd);
960 first = rgd;
961
962 do {
963 rgd->rd_flags |= GFS2_RDF_PREFERRED;
964 for (i = 0; i < sdp->sd_journals; i++) {
965 rgd = gfs2_rgrpd_get_next(rgd);
Abhi Das959b6712015-05-05 11:26:04 -0500966 if (!rgd || rgd == first)
Bob Peterson0e27c182014-10-29 08:02:28 -0500967 break;
968 }
Abhi Das959b6712015-05-05 11:26:04 -0500969 } while (rgd && rgd != first);
Bob Peterson0e27c182014-10-29 08:02:28 -0500970}
971
972/**
Robert Peterson6c532672007-05-10 16:54:38 -0500973 * gfs2_ri_update - Pull in a new resource index from the disk
974 * @ip: pointer to the rindex inode
975 *
David Teiglandb3b94fa2006-01-16 16:50:04 +0000976 * Returns: 0 on successful update, error code otherwise
977 */
978
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100979static int gfs2_ri_update(struct gfs2_inode *ip)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000980{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400981 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000982 int error;
983
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100984 do {
Andrew Price43066292012-04-16 16:40:55 +0100985 error = read_rindex_entry(ip);
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100986 } while (error == 0);
987
988 if (error < 0)
989 return error;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000990
Bob Peterson0e27c182014-10-29 08:02:28 -0500991 set_rgrp_preferences(sdp);
992
Bob Petersoncf45b752008-01-31 10:31:39 -0600993 sdp->sd_rindex_uptodate = 1;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000994 return 0;
Robert Peterson6c532672007-05-10 16:54:38 -0500995}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000996
Robert Peterson6c532672007-05-10 16:54:38 -0500997/**
Steven Whitehouse8339ee52011-08-31 16:38:29 +0100998 * gfs2_rindex_update - Update the rindex if required
David Teiglandb3b94fa2006-01-16 16:50:04 +0000999 * @sdp: The GFS2 superblock
David Teiglandb3b94fa2006-01-16 16:50:04 +00001000 *
1001 * We grab a lock on the rindex inode to make sure that it doesn't
1002 * change whilst we are performing an operation. We keep this lock
1003 * for quite long periods of time compared to other locks. This
1004 * doesn't matter, since it is shared and it is very, very rarely
1005 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
1006 *
1007 * This makes sure that we're using the latest copy of the resource index
1008 * special file, which might have been updated if someone expanded the
1009 * filesystem (via gfs2_grow utility), which adds new resource groups.
1010 *
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001011 * Returns: 0 on succeess, error code otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001012 */
1013
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001014int gfs2_rindex_update(struct gfs2_sbd *sdp)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001015{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001016 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001017 struct gfs2_glock *gl = ip->i_gl;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001018 struct gfs2_holder ri_gh;
1019 int error = 0;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001020 int unlock_required = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001021
1022 /* Read new copy from disk if we don't have the latest */
Bob Petersoncf45b752008-01-31 10:31:39 -06001023 if (!sdp->sd_rindex_uptodate) {
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001024 if (!gfs2_glock_is_locked_by_me(gl)) {
1025 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
1026 if (error)
Bob Peterson6aad1c32012-03-05 09:20:59 -05001027 return error;
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001028 unlock_required = 1;
1029 }
Steven Whitehouse8339ee52011-08-31 16:38:29 +01001030 if (!sdp->sd_rindex_uptodate)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001031 error = gfs2_ri_update(ip);
Steven Whitehousea365fbf2012-02-24 15:09:14 +00001032 if (unlock_required)
1033 gfs2_glock_dq_uninit(&ri_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001034 }
1035
1036 return error;
1037}
1038
Bob Peterson42d52e32008-01-28 18:38:07 -06001039static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001040{
1041 const struct gfs2_rgrp *str = buf;
Bob Peterson42d52e32008-01-28 18:38:07 -06001042 u32 rg_flags;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001043
Bob Peterson42d52e32008-01-28 18:38:07 -06001044 rg_flags = be32_to_cpu(str->rg_flags);
Steven Whitehouse09010972009-05-20 10:48:47 +01001045 rg_flags &= ~GFS2_RDF_MASK;
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001046 rgd->rd_flags &= GFS2_RDF_MASK;
1047 rgd->rd_flags |= rg_flags;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001048 rgd->rd_free = be32_to_cpu(str->rg_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001049 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001050 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001051}
1052
Bob Peterson42d52e32008-01-28 18:38:07 -06001053static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001054{
1055 struct gfs2_rgrp *str = buf;
1056
Steven Whitehouse09010972009-05-20 10:48:47 +01001057 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00001058 str->rg_free = cpu_to_be32(rgd->rd_free);
Steven Whitehouse73f74942008-11-04 10:32:57 +00001059 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001060 str->__pad = cpu_to_be32(0);
Steven Whitehoused8b71f72008-11-04 10:19:03 +00001061 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001062 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
1063}
1064
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001065static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
1066{
1067 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1068 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
1069
1070 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
1071 rgl->rl_dinodes != str->rg_dinodes ||
1072 rgl->rl_igeneration != str->rg_igeneration)
1073 return 0;
1074 return 1;
1075}
1076
1077static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
1078{
1079 const struct gfs2_rgrp *str = buf;
1080
1081 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
1082 rgl->rl_flags = str->rg_flags;
1083 rgl->rl_free = str->rg_free;
1084 rgl->rl_dinodes = str->rg_dinodes;
1085 rgl->rl_igeneration = str->rg_igeneration;
1086 rgl->__pad = 0UL;
1087}
1088
1089static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
1090{
1091 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
1092 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
1093 rgl->rl_unlinked = cpu_to_be32(unlinked);
1094}
1095
1096static u32 count_unlinked(struct gfs2_rgrpd *rgd)
1097{
1098 struct gfs2_bitmap *bi;
1099 const u32 length = rgd->rd_length;
1100 const u8 *buffer = NULL;
1101 u32 i, goal, count = 0;
1102
1103 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
1104 goal = 0;
1105 buffer = bi->bi_bh->b_data + bi->bi_offset;
1106 WARN_ON(!buffer_uptodate(bi->bi_bh));
1107 while (goal < bi->bi_len * GFS2_NBBY) {
1108 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
1109 GFS2_BLKST_UNLINKED);
1110 if (goal == BFITNOENT)
1111 break;
1112 count++;
1113 goal++;
1114 }
1115 }
1116
1117 return count;
1118}
1119
1120
David Teiglandb3b94fa2006-01-16 16:50:04 +00001121/**
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001122 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
1123 * @rgd: the struct gfs2_rgrpd describing the RG to read in
David Teiglandb3b94fa2006-01-16 16:50:04 +00001124 *
1125 * Read in all of a Resource Group's header and bitmap blocks.
1126 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
1127 *
1128 * Returns: errno
1129 */
1130
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301131static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001132{
1133 struct gfs2_sbd *sdp = rgd->rd_sbd;
1134 struct gfs2_glock *gl = rgd->rd_gl;
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001135 unsigned int length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001136 struct gfs2_bitmap *bi;
1137 unsigned int x, y;
1138 int error;
1139
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001140 if (rgd->rd_bits[0].bi_bh != NULL)
1141 return 0;
1142
David Teiglandb3b94fa2006-01-16 16:50:04 +00001143 for (x = 0; x < length; x++) {
1144 bi = rgd->rd_bits + x;
Andreas Gruenbacherc8d57702015-11-11 15:00:35 -06001145 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, 0, &bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001146 if (error)
1147 goto fail;
1148 }
1149
1150 for (y = length; y--;) {
1151 bi = rgd->rd_bits + y;
Steven Whitehouse7276b3b2006-09-21 17:05:23 -04001152 error = gfs2_meta_wait(sdp, bi->bi_bh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00001153 if (error)
1154 goto fail;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001155 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
David Teiglandb3b94fa2006-01-16 16:50:04 +00001156 GFS2_METATYPE_RG)) {
1157 error = -EIO;
1158 goto fail;
1159 }
1160 }
1161
Bob Petersoncf45b752008-01-31 10:31:39 -06001162 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01001163 for (x = 0; x < length; x++)
1164 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
Bob Peterson42d52e32008-01-28 18:38:07 -06001165 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
Steven Whitehouse1ce97e52009-05-21 15:18:19 +01001166 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
Bob Peterson7c9ca622011-08-31 09:53:19 +01001167 rgd->rd_free_clone = rgd->rd_free;
Bob Peterson5ea50502013-11-25 11:16:25 +00001168 /* max out the rgrp allocation failure point */
1169 rgd->rd_extfail_pt = rgd->rd_free;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001170 }
Al Viro951b4bd2013-06-02 19:53:40 -04001171 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001172 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
1173 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
1174 rgd->rd_bits[0].bi_bh->b_data);
1175 }
1176 else if (sdp->sd_args.ar_rgrplvb) {
1177 if (!gfs2_rgrp_lvb_valid(rgd)){
1178 gfs2_consist_rgrpd(rgd);
1179 error = -EIO;
1180 goto fail;
1181 }
1182 if (rgd->rd_rgl->rl_unlinked == 0)
1183 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1184 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001185 return 0;
1186
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001187fail:
David Teiglandb3b94fa2006-01-16 16:50:04 +00001188 while (x--) {
1189 bi = rgd->rd_bits + x;
1190 brelse(bi->bi_bh);
1191 bi->bi_bh = NULL;
1192 gfs2_assert_warn(sdp, !bi->bi_clone);
1193 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001194
1195 return error;
1196}
1197
Rashika Kheriac2b0b302014-02-09 18:40:19 +05301198static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001199{
1200 u32 rl_flags;
1201
1202 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1203 return 0;
1204
Al Viro951b4bd2013-06-02 19:53:40 -04001205 if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001206 return gfs2_rgrp_bh_get(rgd);
1207
1208 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1209 rl_flags &= ~GFS2_RDF_MASK;
1210 rgd->rd_flags &= GFS2_RDF_MASK;
1211 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1212 if (rgd->rd_rgl->rl_unlinked == 0)
1213 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1214 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1215 rgd->rd_free_clone = rgd->rd_free;
1216 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1217 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1218 return 0;
1219}
1220
1221int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1222{
1223 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1224 struct gfs2_sbd *sdp = rgd->rd_sbd;
1225
1226 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1227 return 0;
Bob Peterson8b127d02014-01-16 08:52:16 -05001228 return gfs2_rgrp_bh_get(rgd);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001229}
1230
David Teiglandb3b94fa2006-01-16 16:50:04 +00001231/**
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001232 * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
1233 * @rgd: The resource group
David Teiglandb3b94fa2006-01-16 16:50:04 +00001234 *
1235 */
1236
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001237void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001238{
Steven Whitehousebb8d8a62007-06-01 14:11:58 +01001239 int x, length = rgd->rd_length;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001240
David Teiglandb3b94fa2006-01-16 16:50:04 +00001241 for (x = 0; x < length; x++) {
1242 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001243 if (bi->bi_bh) {
1244 brelse(bi->bi_bh);
1245 bi->bi_bh = NULL;
1246 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00001247 }
1248
David Teiglandb3b94fa2006-01-16 16:50:04 +00001249}
1250
Bob Peterson39b0f1e2015-06-05 08:38:57 -05001251/**
1252 * gfs2_rgrp_go_unlock - Unlock a rgrp glock
1253 * @gh: The glock holder for the resource group
1254 *
1255 */
1256
1257void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
1258{
1259 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1260 int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
1261 test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
1262
1263 if (rgd && demote_requested)
1264 gfs2_rgrp_brelse(rgd);
1265}
1266
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001267int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
Bob Peterson7c9ca622011-08-31 09:53:19 +01001268 struct buffer_head *bh,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001269 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001270{
1271 struct super_block *sb = sdp->sd_vfs;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001272 u64 blk;
Steven Whitehouse64d576b2009-02-12 13:31:58 +00001273 sector_t start = 0;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001274 sector_t nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001275 int rv;
1276 unsigned int x;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001277 u32 trimmed = 0;
1278 u8 diff;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001279
1280 for (x = 0; x < bi->bi_len; x++) {
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001281 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1282 clone += bi->bi_offset;
1283 clone += x;
1284 if (bh) {
1285 const u8 *orig = bh->b_data + bi->bi_offset + x;
1286 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1287 } else {
1288 diff = ~(*clone | (*clone >> 1));
1289 }
Steven Whitehousef15ab562009-02-09 09:25:01 +00001290 diff &= 0x55;
1291 if (diff == 0)
1292 continue;
1293 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001294 while(diff) {
1295 if (diff & 1) {
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001296 if (nr_blks == 0)
Steven Whitehousef15ab562009-02-09 09:25:01 +00001297 goto start_new_extent;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001298 if ((start + nr_blks) != blk) {
1299 if (nr_blks >= minlen) {
1300 rv = sb_issue_discard(sb,
1301 start, nr_blks,
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001302 GFP_NOFS, 0);
1303 if (rv)
1304 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001305 trimmed += nr_blks;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001306 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001307 nr_blks = 0;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001308start_new_extent:
1309 start = blk;
1310 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001311 nr_blks++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001312 }
1313 diff >>= 2;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001314 blk++;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001315 }
1316 }
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001317 if (nr_blks >= minlen) {
1318 rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001319 if (rv)
1320 goto fail;
Bob Petersonb2c87ca2013-03-22 10:07:24 -04001321 trimmed += nr_blks;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001322 }
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001323 if (ptrimmed)
1324 *ptrimmed = trimmed;
1325 return 0;
1326
Steven Whitehousef15ab562009-02-09 09:25:01 +00001327fail:
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001328 if (sdp->sd_args.ar_discard)
1329 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
Steven Whitehousef15ab562009-02-09 09:25:01 +00001330 sdp->sd_args.ar_discard = 0;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001331 return -EIO;
1332}
1333
1334/**
1335 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1336 * @filp: Any file on the filesystem
1337 * @argp: Pointer to the arguments (also used to pass result)
1338 *
1339 * Returns: 0 on success, otherwise error code
1340 */
1341
1342int gfs2_fitrim(struct file *filp, void __user *argp)
1343{
Al Viro496ad9a2013-01-23 17:07:38 -05001344 struct inode *inode = file_inode(filp);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001345 struct gfs2_sbd *sdp = GFS2_SB(inode);
1346 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1347 struct buffer_head *bh;
1348 struct gfs2_rgrpd *rgd;
1349 struct gfs2_rgrpd *rgd_end;
1350 struct gfs2_holder gh;
1351 struct fstrim_range r;
1352 int ret = 0;
1353 u64 amt;
1354 u64 trimmed = 0;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001355 u64 start, end, minlen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001356 unsigned int x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001357 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001358
1359 if (!capable(CAP_SYS_ADMIN))
1360 return -EPERM;
1361
1362 if (!blk_queue_discard(q))
1363 return -EOPNOTSUPP;
1364
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001365 if (copy_from_user(&r, argp, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001366 return -EFAULT;
1367
Bob Peterson5e2f7d62012-04-04 22:11:16 -04001368 ret = gfs2_rindex_update(sdp);
1369 if (ret)
1370 return ret;
1371
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001372 start = r.start >> bs_shift;
1373 end = start + (r.len >> bs_shift);
1374 minlen = max_t(u64, r.minlen,
1375 q->limits.discard_granularity) >> bs_shift;
1376
Abhijith Das6a98c332013-06-19 17:03:29 -04001377 if (end <= start || minlen > sdp->sd_max_rg_data)
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001378 return -EINVAL;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001379
Abhijith Das6a98c332013-06-19 17:03:29 -04001380 rgd = gfs2_blk2rgrpd(sdp, start, 0);
1381 rgd_end = gfs2_blk2rgrpd(sdp, end, 0);
1382
1383 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end))
1384 && (start > rgd_end->rd_data0 + rgd_end->rd_data))
1385 return -EINVAL; /* start is beyond the end of the fs */
1386
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001387 while (1) {
1388
1389 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1390 if (ret)
1391 goto out;
1392
1393 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1394 /* Trim each bitmap in the rgrp */
1395 for (x = 0; x < rgd->rd_length; x++) {
1396 struct gfs2_bitmap *bi = rgd->rd_bits + x;
Lukas Czerner076f0fa2012-10-16 11:39:08 +02001397 ret = gfs2_rgrp_send_discards(sdp,
1398 rgd->rd_data0, NULL, bi, minlen,
1399 &amt);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001400 if (ret) {
1401 gfs2_glock_dq_uninit(&gh);
1402 goto out;
1403 }
1404 trimmed += amt;
1405 }
1406
1407 /* Mark rgrp as having been trimmed */
1408 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1409 if (ret == 0) {
1410 bh = rgd->rd_bits[0].bi_bh;
1411 rgd->rd_flags |= GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00001412 gfs2_trans_add_meta(rgd->rd_gl, bh);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001413 gfs2_rgrp_out(rgd, bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001414 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001415 gfs2_trans_end(sdp);
1416 }
1417 }
1418 gfs2_glock_dq_uninit(&gh);
1419
1420 if (rgd == rgd_end)
1421 break;
1422
1423 rgd = gfs2_rgrpd_get_next(rgd);
1424 }
1425
1426out:
Abhijith Das6a98c332013-06-19 17:03:29 -04001427 r.len = trimmed << bs_shift;
Lukas Czerner3a238ad2012-10-16 11:39:07 +02001428 if (copy_to_user(argp, &r, sizeof(r)))
Steven Whitehouse66fc0612012-02-08 12:58:32 +00001429 return -EFAULT;
1430
1431 return ret;
Steven Whitehousef15ab562009-02-09 09:25:01 +00001432}
1433
David Teiglandb3b94fa2006-01-16 16:50:04 +00001434/**
Bob Peterson8e2e0042012-07-19 08:12:40 -04001435 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
Bob Peterson8e2e0042012-07-19 08:12:40 -04001436 * @ip: the inode structure
Bob Peterson8e2e0042012-07-19 08:12:40 -04001437 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001438 */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001439static void rs_insert(struct gfs2_inode *ip)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001440{
1441 struct rb_node **newn, *parent = NULL;
1442 int rc;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001443 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001444 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001445 u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm);
1446
1447 BUG_ON(gfs2_rs_active(rs));
Bob Peterson8e2e0042012-07-19 08:12:40 -04001448
1449 spin_lock(&rgd->rd_rsspin);
1450 newn = &rgd->rd_rstree.rb_node;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001451 while (*newn) {
1452 struct gfs2_blkreserv *cur =
1453 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1454
1455 parent = *newn;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001456 rc = rs_cmp(fsblock, rs->rs_free, cur);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001457 if (rc > 0)
1458 newn = &((*newn)->rb_right);
1459 else if (rc < 0)
1460 newn = &((*newn)->rb_left);
1461 else {
1462 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001463 WARN_ON(1);
1464 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001465 }
1466 }
1467
Bob Peterson8e2e0042012-07-19 08:12:40 -04001468 rb_link_node(&rs->rs_node, parent, newn);
1469 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1470
Bob Peterson8e2e0042012-07-19 08:12:40 -04001471 /* Do our rgrp accounting for the reservation */
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001472 rgd->rd_reserved += rs->rs_free; /* blocks reserved */
Bob Peterson8e2e0042012-07-19 08:12:40 -04001473 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse9e733d32012-08-23 15:37:59 +01001474 trace_gfs2_rs(rs, TRACE_RS_INSERT);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001475}
1476
1477/**
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001478 * rg_mblk_search - find a group of multiple free blocks to form a reservation
Bob Peterson8e2e0042012-07-19 08:12:40 -04001479 * @rgd: the resource group descriptor
Bob Peterson8e2e0042012-07-19 08:12:40 -04001480 * @ip: pointer to the inode for which we're reserving blocks
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001481 * @ap: the allocation parameters
Bob Peterson8e2e0042012-07-19 08:12:40 -04001482 *
Bob Peterson8e2e0042012-07-19 08:12:40 -04001483 */
1484
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001485static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001486 const struct gfs2_alloc_parms *ap)
Bob Peterson8e2e0042012-07-19 08:12:40 -04001487{
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001488 struct gfs2_rbm rbm = { .rgd = rgd, };
1489 u64 goal;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001490 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001491 u32 extlen;
1492 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1493 int ret;
Bob Petersonaf21ca82013-05-14 13:04:29 -04001494 struct inode *inode = &ip->i_inode;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001495
Bob Petersonaf21ca82013-05-14 13:04:29 -04001496 if (S_ISDIR(inode->i_mode))
1497 extlen = 1;
1498 else {
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001499 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
Bob Petersonaf21ca82013-05-14 13:04:29 -04001500 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1501 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001502 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001503 return;
1504
Bob Peterson8e2e0042012-07-19 08:12:40 -04001505 /* Find bitmap block that contains bits for goal block */
1506 if (rgrp_contains_block(rgd, ip->i_goal))
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001507 goal = ip->i_goal;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001508 else
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001509 goal = rgd->rd_last_alloc + rgd->rd_data0;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001510
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001511 if (WARN_ON(gfs2_rbm_from_block(&rbm, goal)))
1512 return;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001513
Bob Peterson8381e602016-05-02 09:42:49 -05001514 ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001515 if (ret == 0) {
1516 rs->rs_rbm = rbm;
1517 rs->rs_free = extlen;
1518 rs->rs_inum = ip->i_no_addr;
1519 rs_insert(ip);
Bob Peterson13d2eb02012-12-20 13:23:04 -05001520 } else {
1521 if (goal == rgd->rd_last_alloc + rgd->rd_data0)
1522 rgd->rd_last_alloc = 0;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001523 }
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001524}
1525
David Teiglandb3b94fa2006-01-16 16:50:04 +00001526/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001527 * gfs2_next_unreserved_block - Return next block that is not reserved
1528 * @rgd: The resource group
1529 * @block: The starting block
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001530 * @length: The required length
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001531 * @ip: Ignore any reservations for this inode
1532 *
1533 * If the block does not appear in any reservation, then return the
1534 * block number unchanged. If it does appear in the reservation, then
1535 * keep looking through the tree of reservations in order to find the
1536 * first block number which is not reserved.
1537 */
1538
1539static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001540 u32 length,
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001541 const struct gfs2_inode *ip)
1542{
1543 struct gfs2_blkreserv *rs;
1544 struct rb_node *n;
1545 int rc;
1546
1547 spin_lock(&rgd->rd_rsspin);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001548 n = rgd->rd_rstree.rb_node;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001549 while (n) {
1550 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001551 rc = rs_cmp(block, length, rs);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001552 if (rc < 0)
1553 n = n->rb_left;
1554 else if (rc > 0)
1555 n = n->rb_right;
1556 else
1557 break;
1558 }
1559
1560 if (n) {
Bob Petersona097dc7e2015-07-16 08:28:04 -05001561 while ((rs_cmp(block, length, rs) == 0) && (&ip->i_res != rs)) {
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001562 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001563 n = n->rb_right;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001564 if (n == NULL)
1565 break;
1566 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1567 }
1568 }
1569
1570 spin_unlock(&rgd->rd_rsspin);
1571 return block;
1572}
1573
1574/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001575 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1576 * @rbm: The current position in the resource group
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001577 * @ip: The inode for which we are searching for blocks
1578 * @minext: The minimum extent length
Bob Peterson5ce13432013-11-06 10:55:52 -05001579 * @maxext: A pointer to the maximum extent structure
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001580 *
1581 * This checks the current position in the rgrp to see whether there is
1582 * a reservation covering this block. If not then this function is a
1583 * no-op. If there is, then the position is moved to the end of the
1584 * contiguous reservation(s) so that we are pointing at the first
1585 * non-reserved block.
1586 *
1587 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1588 */
1589
1590static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001591 const struct gfs2_inode *ip,
Bob Peterson5ce13432013-11-06 10:55:52 -05001592 u32 minext,
1593 struct gfs2_extent *maxext)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001594{
1595 u64 block = gfs2_rbm_to_block(rbm);
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001596 u32 extlen = 1;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001597 u64 nblock;
1598 int ret;
1599
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001600 /*
1601 * If we have a minimum extent length, then skip over any extent
1602 * which is less than the min extent length in size.
1603 */
1604 if (minext) {
1605 extlen = gfs2_free_extlen(rbm, minext);
Bob Peterson5ce13432013-11-06 10:55:52 -05001606 if (extlen <= maxext->len)
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001607 goto fail;
1608 }
1609
1610 /*
1611 * Check the extent which has been found against the reservations
1612 * and skip if parts of it are already reserved
1613 */
1614 nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip);
Bob Peterson5ce13432013-11-06 10:55:52 -05001615 if (nblock == block) {
1616 if (!minext || extlen >= minext)
1617 return 0;
1618
1619 if (extlen > maxext->len) {
1620 maxext->len = extlen;
1621 maxext->rbm = *rbm;
1622 }
Steven Whitehouseff7f4cb2012-09-10 10:03:50 +01001623fail:
Bob Peterson5ce13432013-11-06 10:55:52 -05001624 nblock = block + extlen;
1625 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001626 ret = gfs2_rbm_from_block(rbm, nblock);
1627 if (ret < 0)
1628 return ret;
1629 return 1;
1630}
1631
1632/**
1633 * gfs2_rbm_find - Look for blocks of a particular state
1634 * @rbm: Value/result starting position and final position
1635 * @state: The state which we want to find
Bob Peterson5ce13432013-11-06 10:55:52 -05001636 * @minext: Pointer to the requested extent length (NULL for a single block)
1637 * This is updated to be the actual reservation size.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001638 * @ip: If set, check for reservations
1639 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1640 * around until we've reached the starting point.
1641 *
1642 * Side effects:
1643 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1644 * has no free blocks in it.
Bob Peterson5ea50502013-11-25 11:16:25 +00001645 * - If looking for free blocks, we set rd_extfail_pt on each rgrp which
1646 * has come up short on a free block search.
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001647 *
1648 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1649 */
1650
Bob Peterson5ce13432013-11-06 10:55:52 -05001651static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
Bob Peterson8381e602016-05-02 09:42:49 -05001652 const struct gfs2_inode *ip, bool nowrap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001653{
1654 struct buffer_head *bh;
Bob Petersone579ed42013-09-17 13:12:15 -04001655 int initial_bii;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001656 u32 initial_offset;
Bob Peterson5ea50502013-11-25 11:16:25 +00001657 int first_bii = rbm->bii;
1658 u32 first_offset = rbm->offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001659 u32 offset;
1660 u8 *buffer;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001661 int n = 0;
1662 int iters = rbm->rgd->rd_length;
1663 int ret;
Bob Petersone579ed42013-09-17 13:12:15 -04001664 struct gfs2_bitmap *bi;
Bob Peterson5ce13432013-11-06 10:55:52 -05001665 struct gfs2_extent maxext = { .rbm.rgd = rbm->rgd, };
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001666
1667 /* If we are not starting at the beginning of a bitmap, then we
1668 * need to add one to the bitmap count to ensure that we search
1669 * the starting bitmap twice.
1670 */
1671 if (rbm->offset != 0)
1672 iters++;
1673
1674 while(1) {
Bob Petersone579ed42013-09-17 13:12:15 -04001675 bi = rbm_bi(rbm);
1676 if (test_bit(GBF_FULL, &bi->bi_flags) &&
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001677 (state == GFS2_BLKST_FREE))
1678 goto next_bitmap;
1679
Bob Petersone579ed42013-09-17 13:12:15 -04001680 bh = bi->bi_bh;
1681 buffer = bh->b_data + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001682 WARN_ON(!buffer_uptodate(bh));
Bob Petersone579ed42013-09-17 13:12:15 -04001683 if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1684 buffer = bi->bi_clone + bi->bi_offset;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001685 initial_offset = rbm->offset;
Bob Petersone579ed42013-09-17 13:12:15 -04001686 offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001687 if (offset == BFITNOENT)
1688 goto bitmap_full;
1689 rbm->offset = offset;
1690 if (ip == NULL)
1691 return 0;
1692
Bob Petersone579ed42013-09-17 13:12:15 -04001693 initial_bii = rbm->bii;
Bob Peterson5ce13432013-11-06 10:55:52 -05001694 ret = gfs2_reservation_check_and_update(rbm, ip,
1695 minext ? *minext : 0,
1696 &maxext);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001697 if (ret == 0)
1698 return 0;
1699 if (ret > 0) {
Bob Petersone579ed42013-09-17 13:12:15 -04001700 n += (rbm->bii - initial_bii);
Bob Peterson8d8b7522012-08-07 13:28:17 -04001701 goto next_iter;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001702 }
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001703 if (ret == -E2BIG) {
Bob Petersone579ed42013-09-17 13:12:15 -04001704 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001705 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001706 n += (rbm->bii - initial_bii);
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001707 goto res_covered_end_of_rgrp;
1708 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001709 return ret;
1710
1711bitmap_full: /* Mark bitmap as full and fall through */
Fabian Fredericka3e32132015-05-18 15:23:03 -05001712 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
Bob Petersone579ed42013-09-17 13:12:15 -04001713 set_bit(GBF_FULL, &bi->bi_flags);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001714
1715next_bitmap: /* Find next bitmap in the rgrp */
1716 rbm->offset = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001717 rbm->bii++;
1718 if (rbm->bii == rbm->rgd->rd_length)
1719 rbm->bii = 0;
Steven Whitehouse5d50d532012-08-07 13:47:12 +01001720res_covered_end_of_rgrp:
Bob Petersone579ed42013-09-17 13:12:15 -04001721 if ((rbm->bii == 0) && nowrap)
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001722 break;
1723 n++;
Bob Peterson8d8b7522012-08-07 13:28:17 -04001724next_iter:
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001725 if (n >= iters)
1726 break;
1727 }
1728
Bob Peterson5ce13432013-11-06 10:55:52 -05001729 if (minext == NULL || state != GFS2_BLKST_FREE)
1730 return -ENOSPC;
1731
Bob Peterson5ea50502013-11-25 11:16:25 +00001732 /* If the extent was too small, and it's smaller than the smallest
1733 to have failed before, remember for future reference that it's
1734 useless to search this rgrp again for this amount or more. */
1735 if ((first_offset == 0) && (first_bii == 0) &&
1736 (*minext < rbm->rgd->rd_extfail_pt))
1737 rbm->rgd->rd_extfail_pt = *minext;
1738
Bob Peterson5ce13432013-11-06 10:55:52 -05001739 /* If the maximum extent we found is big enough to fulfill the
1740 minimum requirements, use it anyway. */
1741 if (maxext.len) {
1742 *rbm = maxext.rbm;
1743 *minext = maxext.len;
1744 return 0;
1745 }
1746
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001747 return -ENOSPC;
1748}
1749
1750/**
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001751 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1752 * @rgd: The rgrp
Bob Peterson886b1412012-04-11 13:03:52 -04001753 * @last_unlinked: block address of the last dinode we unlinked
1754 * @skip: block address we should explicitly not unlink
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001755 *
Bob Peterson1a0eae82010-04-14 11:58:16 -04001756 * Returns: 0 if no error
1757 * The inode, if one has been found, in inode.
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001758 */
1759
Steven Whitehouse044b9412010-11-03 20:01:07 +00001760static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001761{
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001762 u64 block;
Bob Peterson5f3eae72007-08-08 16:52:09 -05001763 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001764 struct gfs2_glock *gl;
1765 struct gfs2_inode *ip;
1766 int error;
1767 int found = 0;
Bob Petersone579ed42013-09-17 13:12:15 -04001768 struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001769
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001770 while (1) {
Bob Peterson5f3eae72007-08-08 16:52:09 -05001771 down_write(&sdp->sd_log_flush_lock);
Bob Peterson5ce13432013-11-06 10:55:52 -05001772 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
Bob Peterson8381e602016-05-02 09:42:49 -05001773 true);
Bob Peterson5f3eae72007-08-08 16:52:09 -05001774 up_write(&sdp->sd_log_flush_lock);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001775 if (error == -ENOSPC)
1776 break;
1777 if (WARN_ON_ONCE(error))
Bob Peterson24c73872007-07-12 16:58:50 -05001778 break;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05001779
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001780 block = gfs2_rbm_to_block(&rbm);
1781 if (gfs2_rbm_from_block(&rbm, block + 1))
1782 break;
1783 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001784 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001785 if (block == skip)
Steven Whitehouse1e19a192009-07-10 21:13:38 +01001786 continue;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01001787 *last_unlinked = block;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001788
Bob Peterson5ea31bc2015-12-04 12:57:00 -06001789 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl);
Steven Whitehouse044b9412010-11-03 20:01:07 +00001790 if (error)
1791 continue;
1792
1793 /* If the inode is already in cache, we can ignore it here
1794 * because the existing inode disposal code will deal with
1795 * it when all refs have gone away. Accessing gl_object like
1796 * this is not safe in general. Here it is ok because we do
1797 * not dereference the pointer, and we only need an approx
1798 * answer to whether it is NULL or not.
1799 */
1800 ip = gl->gl_object;
1801
1802 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1803 gfs2_glock_put(gl);
1804 else
1805 found++;
1806
1807 /* Limit reclaim to sensible number of tasks */
Bob Peterson44ad37d2011-03-17 16:19:58 -04001808 if (found > NR_CPUS)
Steven Whitehouse044b9412010-11-03 20:01:07 +00001809 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001810 }
1811
1812 rgd->rd_flags &= ~GFS2_RDF_CHECK;
Steven Whitehouse044b9412010-11-03 20:01:07 +00001813 return;
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001814}
1815
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001816/**
1817 * gfs2_rgrp_congested - Use stats to figure out whether an rgrp is congested
1818 * @rgd: The rgrp in question
1819 * @loops: An indication of how picky we can be (0=very, 1=less so)
1820 *
1821 * This function uses the recently added glock statistics in order to
1822 * figure out whether a parciular resource group is suffering from
1823 * contention from multiple nodes. This is done purely on the basis
1824 * of timings, since this is the only data we have to work with and
1825 * our aim here is to reject a resource group which is highly contended
1826 * but (very important) not to do this too often in order to ensure that
1827 * we do not land up introducing fragmentation by changing resource
1828 * groups when not actually required.
1829 *
1830 * The calculation is fairly simple, we want to know whether the SRTTB
1831 * (i.e. smoothed round trip time for blocking operations) to acquire
1832 * the lock for this rgrp's glock is significantly greater than the
1833 * time taken for resource groups on average. We introduce a margin in
1834 * the form of the variable @var which is computed as the sum of the two
1835 * respective variences, and multiplied by a factor depending on @loops
1836 * and whether we have a lot of data to base the decision on. This is
1837 * then tested against the square difference of the means in order to
1838 * decide whether the result is statistically significant or not.
1839 *
1840 * Returns: A boolean verdict on the congestion status
1841 */
1842
1843static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
1844{
1845 const struct gfs2_glock *gl = rgd->rd_gl;
Bob Peterson15562c42015-03-16 11:52:05 -05001846 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001847 struct gfs2_lkstats *st;
Ben Hutchings4d207132015-08-27 12:51:45 -05001848 u64 r_dcount, l_dcount;
1849 u64 l_srttb, a_srttb = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001850 s64 srttb_diff;
Ben Hutchings4d207132015-08-27 12:51:45 -05001851 u64 sqr_diff;
1852 u64 var;
Bob Peterson0166b192015-04-22 11:24:12 -05001853 int cpu, nonzero = 0;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001854
1855 preempt_disable();
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001856 for_each_present_cpu(cpu) {
1857 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001858 if (st->stats[GFS2_LKS_SRTTB]) {
1859 a_srttb += st->stats[GFS2_LKS_SRTTB];
1860 nonzero++;
1861 }
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001862 }
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001863 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP];
Bob Peterson0166b192015-04-22 11:24:12 -05001864 if (nonzero)
1865 do_div(a_srttb, nonzero);
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001866 r_dcount = st->stats[GFS2_LKS_DCOUNT];
1867 var = st->stats[GFS2_LKS_SRTTVARB] +
1868 gl->gl_stats.stats[GFS2_LKS_SRTTVARB];
1869 preempt_enable();
1870
1871 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB];
1872 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT];
1873
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001874 if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0))
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001875 return false;
1876
Bob Petersonf4a3ae92014-11-19 12:27:11 -06001877 srttb_diff = a_srttb - l_srttb;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001878 sqr_diff = srttb_diff * srttb_diff;
1879
1880 var *= 2;
1881 if (l_dcount < 8 || r_dcount < 8)
1882 var *= 2;
1883 if (loops == 1)
1884 var *= 2;
1885
1886 return ((srttb_diff < 0) && (sqr_diff > var));
1887}
1888
1889/**
1890 * gfs2_rgrp_used_recently
1891 * @rs: The block reservation with the rgrp to test
1892 * @msecs: The time limit in milliseconds
1893 *
1894 * Returns: True if the rgrp glock has been used within the time limit
1895 */
1896static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs,
1897 u64 msecs)
1898{
1899 u64 tdiff;
1900
1901 tdiff = ktime_to_ns(ktime_sub(ktime_get_real(),
1902 rs->rs_rbm.rgd->rd_gl->gl_dstamp));
1903
1904 return tdiff > (msecs * 1000 * 1000);
1905}
1906
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001907static u32 gfs2_orlov_skip(const struct gfs2_inode *ip)
1908{
1909 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1910 u32 skip;
1911
1912 get_random_bytes(&skip, sizeof(skip));
1913 return skip % sdp->sd_rgrps;
1914}
1915
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001916static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin)
1917{
1918 struct gfs2_rgrpd *rgd = *pos;
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001919 struct gfs2_sbd *sdp = rgd->rd_sbd;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001920
1921 rgd = gfs2_rgrpd_get_next(rgd);
1922 if (rgd == NULL)
Steven Whitehouseaa8920c2012-11-13 14:50:35 +00001923 rgd = gfs2_rgrpd_get_first(sdp);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001924 *pos = rgd;
1925 if (rgd != begin) /* If we didn't wrap */
1926 return true;
1927 return false;
1928}
1929
Steven Whitehousec8cdf472007-06-08 10:05:33 +01001930/**
Bob Peterson0e27c182014-10-29 08:02:28 -05001931 * fast_to_acquire - determine if a resource group will be fast to acquire
1932 *
1933 * If this is one of our preferred rgrps, it should be quicker to acquire,
1934 * because we tried to set ourselves up as dlm lock master.
1935 */
1936static inline int fast_to_acquire(struct gfs2_rgrpd *rgd)
1937{
1938 struct gfs2_glock *gl = rgd->rd_gl;
1939
1940 if (gl->gl_state != LM_ST_UNLOCKED && list_empty(&gl->gl_holders) &&
1941 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
1942 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1943 return 1;
1944 if (rgd->rd_flags & GFS2_RDF_PREFERRED)
1945 return 1;
1946 return 0;
1947}
1948
1949/**
Bob Peterson666d1d82012-06-13 23:03:56 -04001950 * gfs2_inplace_reserve - Reserve space in the filesystem
David Teiglandb3b94fa2006-01-16 16:50:04 +00001951 * @ip: the inode to reserve space for
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001952 * @ap: the allocation parameters
David Teiglandb3b94fa2006-01-16 16:50:04 +00001953 *
Abhi Das25435e52015-03-18 12:04:37 -05001954 * We try our best to find an rgrp that has at least ap->target blocks
1955 * available. After a couple of passes (loops == 2), the prospects of finding
1956 * such an rgrp diminish. At this stage, we return the first rgrp that has
1957 * atleast ap->min_target blocks available. Either way, we set ap->allowed to
1958 * the number of blocks available in the chosen rgrp.
1959 *
1960 * Returns: 0 on success,
1961 * -ENOMEM if a suitable rgrp can't be found
1962 * errno otherwise
David Teiglandb3b94fa2006-01-16 16:50:04 +00001963 */
1964
Abhi Das25435e52015-03-18 12:04:37 -05001965int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
David Teiglandb3b94fa2006-01-16 16:50:04 +00001966{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04001967 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001968 struct gfs2_rgrpd *begin = NULL;
Bob Petersona097dc7e2015-07-16 08:28:04 -05001969 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00001970 int error = 0, rg_locked, flags = 0;
Bob Peterson666d1d82012-06-13 23:03:56 -04001971 u64 last_unlinked = NO_BLOCK;
Bob Peterson7c9ca622011-08-31 09:53:19 +01001972 int loops = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001973 u32 skip = 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00001974
Benjamin Marzinski90306c42012-05-29 23:01:09 -05001975 if (sdp->sd_args.ar_rgrplvb)
1976 flags |= GL_SKIP;
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001977 if (gfs2_assert_warn(sdp, ap->target))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001978 return -EINVAL;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001979 if (gfs2_rs_active(rs)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001980 begin = rs->rs_rbm.rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001981 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001982 rs->rs_rbm.rgd = begin = ip->i_rgd;
Bob Peterson8e2e0042012-07-19 08:12:40 -04001983 } else {
Abhi Das00a158b2014-09-18 21:40:28 -05001984 check_and_update_goal(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001985 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
Bob Peterson8e2e0042012-07-19 08:12:40 -04001986 }
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01001987 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001988 skip = gfs2_orlov_skip(ip);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01001989 if (rs->rs_rbm.rgd == NULL)
Bob Peterson7c9ca622011-08-31 09:53:19 +01001990 return -EBADSLT;
1991
1992 while (loops < 3) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001993 rg_locked = 1;
Abhijith Das292c8c12007-11-29 14:13:54 -06001994
Steven Whitehousec743ffd2012-08-25 18:21:47 +01001995 if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
1996 rg_locked = 0;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00001997 if (skip && skip--)
1998 goto next_rgrp;
Bob Peterson0e27c182014-10-29 08:02:28 -05001999 if (!gfs2_rs_active(rs)) {
2000 if (loops == 0 &&
2001 !fast_to_acquire(rs->rs_rbm.rgd))
2002 goto next_rgrp;
2003 if ((loops < 2) &&
2004 gfs2_rgrp_used_recently(rs, 1000) &&
2005 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2006 goto next_rgrp;
2007 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002008 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002009 LM_ST_EXCLUSIVE, flags,
2010 &rs->rs_rgd_gh);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002011 if (unlikely(error))
2012 return error;
Steven Whitehousebcd97c02012-10-31 09:58:42 +00002013 if (!gfs2_rs_active(rs) && (loops < 2) &&
2014 gfs2_rgrp_congested(rs->rs_rbm.rgd, loops))
2015 goto skip_rgrp;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002016 if (sdp->sd_args.ar_rgrplvb) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002017 error = update_rgrp_lvb(rs->rs_rbm.rgd);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002018 if (unlikely(error)) {
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002019 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2020 return error;
2021 }
2022 }
Abhijith Das292c8c12007-11-29 14:13:54 -06002023 }
Bob Peterson666d1d82012-06-13 23:03:56 -04002024
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002025 /* Skip unuseable resource groups */
Bob Peterson5ea50502013-11-25 11:16:25 +00002026 if ((rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC |
2027 GFS2_RDF_ERROR)) ||
Abhi Das25435e52015-03-18 12:04:37 -05002028 (loops == 0 && ap->target > rs->rs_rbm.rgd->rd_extfail_pt))
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002029 goto skip_rgrp;
2030
2031 if (sdp->sd_args.ar_rgrplvb)
2032 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
2033
2034 /* Get a reservation if we don't already have one */
2035 if (!gfs2_rs_active(rs))
Steven Whitehouse7b9cff42013-10-02 11:13:25 +01002036 rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002037
2038 /* Skip rgrps when we can't get a reservation on first pass */
2039 if (!gfs2_rs_active(rs) && (loops < 1))
2040 goto check_rgrp;
2041
2042 /* If rgrp has enough free space, use it */
Abhi Das25435e52015-03-18 12:04:37 -05002043 if (rs->rs_rbm.rgd->rd_free_clone >= ap->target ||
2044 (loops == 2 && ap->min_target &&
2045 rs->rs_rbm.rgd->rd_free_clone >= ap->min_target)) {
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002046 ip->i_rgd = rs->rs_rbm.rgd;
Abhi Das25435e52015-03-18 12:04:37 -05002047 ap->allowed = ip->i_rgd->rd_free_clone;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002048 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002049 }
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002050check_rgrp:
2051 /* Check for unlinked inodes which can be reclaimed */
2052 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
2053 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
2054 ip->i_no_addr);
2055skip_rgrp:
Bob Peterson1330edb2013-11-06 10:58:00 -05002056 /* Drop reservation, if we couldn't use reserved rgrp */
2057 if (gfs2_rs_active(rs))
2058 gfs2_rs_deltree(rs);
2059
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002060 /* Unlock rgrp if required */
2061 if (!rg_locked)
2062 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
2063next_rgrp:
2064 /* Find the next rgrp, and continue looking */
2065 if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin))
2066 continue;
Steven Whitehouse9dbe9612012-10-31 10:37:10 +00002067 if (skip)
2068 continue;
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002069
2070 /* If we've scanned all the rgrps, but found no free blocks
2071 * then this checks for some less likely conditions before
2072 * trying again.
2073 */
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002074 loops++;
2075 /* Check that fs hasn't grown if writing to rindex */
2076 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
2077 error = gfs2_ri_update(ip);
2078 if (error)
2079 return error;
2080 }
2081 /* Flushing the log may release space */
2082 if (loops == 2)
Benjamin Marzinski24972552014-05-01 22:26:55 -05002083 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002084 }
2085
2086 return -ENOSPC;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002087}
2088
2089/**
2090 * gfs2_inplace_release - release an inplace reservation
2091 * @ip: the inode the reservation was taken out on
2092 *
2093 * Release a reservation made by gfs2_inplace_reserve().
2094 */
2095
2096void gfs2_inplace_release(struct gfs2_inode *ip)
2097{
Bob Petersona097dc7e2015-07-16 08:28:04 -05002098 struct gfs2_blkreserv *rs = &ip->i_res;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002099
Bob Peterson564e12b2011-11-21 13:36:17 -05002100 if (rs->rs_rgd_gh.gh_gl)
2101 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002102}
2103
2104/**
2105 * gfs2_get_block_type - Check a block in a RG is of given type
2106 * @rgd: the resource group holding the block
2107 * @block: the block number
2108 *
2109 * Returns: The block type (GFS2_BLKST_*)
2110 */
2111
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002112static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002113{
Steven Whitehouse39839032012-08-03 11:10:30 +01002114 struct gfs2_rbm rbm = { .rgd = rgd, };
2115 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002116
Steven Whitehouse39839032012-08-03 11:10:30 +01002117 ret = gfs2_rbm_from_block(&rbm, block);
2118 WARN_ON_ONCE(ret != 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002119
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002120 return gfs2_testbit(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002121}
2122
David Teiglandb3b94fa2006-01-16 16:50:04 +00002123
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002124/**
2125 * gfs2_alloc_extent - allocate an extent from a given bitmap
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002126 * @rbm: the resource group information
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002127 * @dinode: TRUE if the first block we allocate is for a dinode
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002128 * @n: The extent length (value/result)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002129 *
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002130 * Add the bitmap buffer to the transaction.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002131 * Set the found bits to @new_state to change block's allocation state.
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002132 */
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002133static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002134 unsigned int *n)
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002135{
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002136 struct gfs2_rbm pos = { .rgd = rbm->rgd, };
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002137 const unsigned int elen = *n;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002138 u64 block;
2139 int ret;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002140
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002141 *n = 1;
2142 block = gfs2_rbm_to_block(rbm);
Bob Petersone579ed42013-09-17 13:12:15 -04002143 gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002144 gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002145 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002146 while (*n < elen) {
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002147 ret = gfs2_rbm_from_block(&pos, block);
Bob Peterson0688a5e2012-08-28 08:45:56 -04002148 if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002149 break;
Bob Petersone579ed42013-09-17 13:12:15 -04002150 gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002151 gfs2_setbit(&pos, true, GFS2_BLKST_USED);
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002152 (*n)++;
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002153 block++;
Steven Whitehouse60a0b8f2009-05-21 12:23:12 +01002154 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002155}
2156
2157/**
2158 * rgblk_free - Change alloc state of given block(s)
2159 * @sdp: the filesystem
2160 * @bstart: the start of a run of blocks to free
2161 * @blen: the length of the block run (all must lie within ONE RG!)
2162 * @new_state: GFS2_BLKST_XXX the after-allocation block state
2163 *
2164 * Returns: Resource group containing the block(s)
2165 */
2166
Steven Whitehousecd915492006-09-04 12:49:07 -04002167static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
2168 u32 blen, unsigned char new_state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002169{
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002170 struct gfs2_rbm rbm;
Bob Petersond24e0562014-10-03 08:38:06 -04002171 struct gfs2_bitmap *bi, *bi_prev = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002172
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002173 rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
2174 if (!rbm.rgd) {
David Teiglandb3b94fa2006-01-16 16:50:04 +00002175 if (gfs2_consist(sdp))
Steven Whitehouse382066d2006-05-24 10:22:09 -04002176 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002177 return NULL;
2178 }
2179
Bob Petersond24e0562014-10-03 08:38:06 -04002180 gfs2_rbm_from_block(&rbm, bstart);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002181 while (blen--) {
Bob Petersone579ed42013-09-17 13:12:15 -04002182 bi = rbm_bi(&rbm);
Bob Petersond24e0562014-10-03 08:38:06 -04002183 if (bi != bi_prev) {
2184 if (!bi->bi_clone) {
2185 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
2186 GFP_NOFS | __GFP_NOFAIL);
2187 memcpy(bi->bi_clone + bi->bi_offset,
2188 bi->bi_bh->b_data + bi->bi_offset,
2189 bi->bi_len);
2190 }
2191 gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
2192 bi_prev = bi;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002193 }
Steven Whitehouse3e6339d2012-08-13 11:37:51 +01002194 gfs2_setbit(&rbm, false, new_state);
Bob Petersond24e0562014-10-03 08:38:06 -04002195 gfs2_rbm_incr(&rbm);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002196 }
2197
Steven Whitehouse3b1d0b92012-08-03 11:23:28 +01002198 return rbm.rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002199}
2200
2201/**
Steven Whitehouse09010972009-05-20 10:48:47 +01002202 * gfs2_rgrp_dump - print out an rgrp
2203 * @seq: The iterator
2204 * @gl: The glock in question
David Teiglandb3b94fa2006-01-16 16:50:04 +00002205 *
David Teiglandb3b94fa2006-01-16 16:50:04 +00002206 */
2207
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002208void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
Steven Whitehouse09010972009-05-20 10:48:47 +01002209{
Bob Peterson8e2e0042012-07-19 08:12:40 -04002210 struct gfs2_rgrpd *rgd = gl->gl_object;
2211 struct gfs2_blkreserv *trs;
2212 const struct rb_node *n;
2213
Steven Whitehouse09010972009-05-20 10:48:47 +01002214 if (rgd == NULL)
Steven Whitehouseac3beb62014-01-16 10:31:13 +00002215 return;
Bob Peterson5ea50502013-11-25 11:16:25 +00002216 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u e:%u\n",
Steven Whitehouse09010972009-05-20 10:48:47 +01002217 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
Bob Peterson8e2e0042012-07-19 08:12:40 -04002218 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
Bob Peterson5ea50502013-11-25 11:16:25 +00002219 rgd->rd_reserved, rgd->rd_extfail_pt);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002220 spin_lock(&rgd->rd_rsspin);
2221 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
2222 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
2223 dump_rs(seq, trs);
2224 }
2225 spin_unlock(&rgd->rd_rsspin);
Steven Whitehouse09010972009-05-20 10:48:47 +01002226}
2227
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002228static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
2229{
2230 struct gfs2_sbd *sdp = rgd->rd_sbd;
2231 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
Steven Whitehouse86d00632009-09-14 09:50:57 +01002232 (unsigned long long)rgd->rd_addr);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002233 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
2234 gfs2_rgrp_dump(NULL, rgd->rd_gl);
2235 rgd->rd_flags |= GFS2_RDF_ERROR;
2236}
2237
Steven Whitehouse09010972009-05-20 10:48:47 +01002238/**
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002239 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
2240 * @ip: The inode we have just allocated blocks for
2241 * @rbm: The start of the allocated blocks
2242 * @len: The extent length
Bob Peterson8e2e0042012-07-19 08:12:40 -04002243 *
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002244 * Adjusts a reservation after an allocation has taken place. If the
2245 * reservation does not match the allocation, or if it is now empty
2246 * then it is removed.
Bob Peterson8e2e0042012-07-19 08:12:40 -04002247 */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002248
2249static void gfs2_adjust_reservation(struct gfs2_inode *ip,
2250 const struct gfs2_rbm *rbm, unsigned len)
Bob Peterson8e2e0042012-07-19 08:12:40 -04002251{
Bob Petersona097dc7e2015-07-16 08:28:04 -05002252 struct gfs2_blkreserv *rs = &ip->i_res;
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002253 struct gfs2_rgrpd *rgd = rbm->rgd;
2254 unsigned rlen;
2255 u64 block;
2256 int ret;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002257
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002258 spin_lock(&rgd->rd_rsspin);
2259 if (gfs2_rs_active(rs)) {
2260 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
2261 block = gfs2_rbm_to_block(rbm);
2262 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
2263 rlen = min(rs->rs_free, len);
2264 rs->rs_free -= rlen;
2265 rgd->rd_reserved -= rlen;
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002266 trace_gfs2_rs(rs, TRACE_RS_CLAIM);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002267 if (rs->rs_free && !ret)
2268 goto out;
Bob Peterson1a855032014-10-29 08:02:30 -05002269 /* We used up our block reservation, so we should
2270 reserve more blocks next time. */
2271 atomic_add(RGRP_RSRV_ADDBLKS, &rs->rs_sizehint);
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002272 }
Bob Peterson20095212013-03-13 10:26:38 -04002273 __rs_deltree(rs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002274 }
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002275out:
2276 spin_unlock(&rgd->rd_rsspin);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002277}
2278
2279/**
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002280 * gfs2_set_alloc_start - Set starting point for block allocation
2281 * @rbm: The rbm which will be set to the required location
2282 * @ip: The gfs2 inode
2283 * @dinode: Flag to say if allocation includes a new inode
2284 *
2285 * This sets the starting point from the reservation if one is active
2286 * otherwise it falls back to guessing a start point based on the
2287 * inode's goal block or the last allocation point in the rgrp.
2288 */
2289
2290static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
2291 const struct gfs2_inode *ip, bool dinode)
2292{
2293 u64 goal;
2294
Bob Petersona097dc7e2015-07-16 08:28:04 -05002295 if (gfs2_rs_active(&ip->i_res)) {
2296 *rbm = ip->i_res.rs_rbm;
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002297 return;
2298 }
2299
2300 if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
2301 goal = ip->i_goal;
2302 else
2303 goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
2304
2305 gfs2_rbm_from_block(rbm, goal);
2306}
2307
2308/**
Bob Peterson6e87ed02011-11-18 10:58:32 -05002309 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
Steven Whitehouse09010972009-05-20 10:48:47 +01002310 * @ip: the inode to allocate the block for
2311 * @bn: Used to return the starting block number
Bob Peterson8e2e0042012-07-19 08:12:40 -04002312 * @nblocks: requested number of blocks/extent length (value/result)
Bob Peterson6e87ed02011-11-18 10:58:32 -05002313 * @dinode: 1 if we're allocating a dinode block, else 0
Bob Peterson3c5d7852011-11-14 11:17:08 -05002314 * @generation: the generation number of the inode
Steven Whitehouse09010972009-05-20 10:48:47 +01002315 *
2316 * Returns: 0 or error
2317 */
2318
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002319int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002320 bool dinode, u64 *generation)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002321{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002322 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002323 struct buffer_head *dibh;
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002324 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002325 unsigned int ndata;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002326 u64 block; /* block, within the file system scope */
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002327 int error;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002328
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002329 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson8381e602016-05-02 09:42:49 -05002330 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002331
Steven Whitehouse137834a2012-08-23 13:43:40 +01002332 if (error == -ENOSPC) {
Steven Whitehouse9e07f2c2013-10-02 14:42:45 +01002333 gfs2_set_alloc_start(&rbm, ip, dinode);
Bob Peterson8381e602016-05-02 09:42:49 -05002334 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false);
Steven Whitehouse137834a2012-08-23 13:43:40 +01002335 }
2336
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002337 /* Since all blocks are reserved in advance, this shouldn't happen */
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002338 if (error) {
Bob Peterson5ea50502013-11-25 11:16:25 +00002339 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n",
Steven Whitehouse9e733d32012-08-23 15:37:59 +01002340 (unsigned long long)ip->i_no_addr, error, *nblocks,
Bob Peterson5ea50502013-11-25 11:16:25 +00002341 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags),
2342 rbm.rgd->rd_extfail_pt);
Steven Whitehouse62e252e2012-07-30 11:06:08 +01002343 goto rgrp_error;
2344 }
2345
Steven Whitehousec04a2ef2012-08-13 11:14:57 +01002346 gfs2_alloc_extent(&rbm, dinode, nblocks);
2347 block = gfs2_rbm_to_block(&rbm);
Steven Whitehousec743ffd2012-08-25 18:21:47 +01002348 rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0;
Bob Petersona097dc7e2015-07-16 08:28:04 -05002349 if (gfs2_rs_active(&ip->i_res))
Steven Whitehouse5b924ae2012-08-01 20:35:05 +01002350 gfs2_adjust_reservation(ip, &rbm, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002351 ndata = *nblocks;
2352 if (dinode)
2353 ndata--;
Bob Petersonb3e47ca2011-11-21 11:47:08 -05002354
Bob Peterson3c5d7852011-11-14 11:17:08 -05002355 if (!dinode) {
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002356 ip->i_goal = block + ndata - 1;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002357 error = gfs2_meta_inode_buffer(ip, &dibh);
2358 if (error == 0) {
2359 struct gfs2_dinode *di =
2360 (struct gfs2_dinode *)dibh->b_data;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002361 gfs2_trans_add_meta(ip->i_gl, dibh);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002362 di->di_goal_meta = di->di_goal_data =
2363 cpu_to_be64(ip->i_goal);
2364 brelse(dibh);
2365 }
Steven Whitehoused9ba7612009-04-23 08:59:41 +01002366 }
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002367 if (rbm.rgd->rd_free < *nblocks) {
Fabian Frederickfc554ed2014-03-05 22:06:42 +08002368 pr_warn("nblocks=%u\n", *nblocks);
Steven Whitehouse09010972009-05-20 10:48:47 +01002369 goto rgrp_error;
Bob Peterson8e2e0042012-07-19 08:12:40 -04002370 }
Steven Whitehouse09010972009-05-20 10:48:47 +01002371
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002372 rbm.rgd->rd_free -= *nblocks;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002373 if (dinode) {
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002374 rbm.rgd->rd_dinodes++;
2375 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002376 if (*generation == 0)
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002377 *generation = rbm.rgd->rd_igeneration++;
Bob Peterson3c5d7852011-11-14 11:17:08 -05002378 }
David Teiglandb3b94fa2006-01-16 16:50:04 +00002379
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002380 gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002381 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2382 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002383
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002384 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
Bob Peterson3c5d7852011-11-14 11:17:08 -05002385 if (dinode)
Steven Whitehouseb2c8b3e2014-02-04 15:45:11 +00002386 gfs2_trans_add_unrevoke(sdp, block, *nblocks);
Steven Whitehouse6a8099e2011-11-22 12:18:51 +00002387
Steven Whitehousefd4b4e02013-02-26 16:15:20 +00002388 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002389
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002390 rbm.rgd->rd_free_clone -= *nblocks;
2391 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
Bob Peterson6e87ed02011-11-18 10:58:32 -05002392 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002393 *bn = block;
2394 return 0;
2395
2396rgrp_error:
Steven Whitehouse4a993fb2012-07-31 15:21:20 +01002397 gfs2_rgrp_error(rbm.rgd);
Steven Whitehouse6050b9c2009-07-31 16:19:40 +01002398 return -EIO;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002399}
2400
2401/**
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002402 * __gfs2_free_blocks - free a contiguous run of block(s)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002403 * @ip: the inode these blocks are being freed from
2404 * @bstart: first block of a run of contiguous blocks
2405 * @blen: the length of the block run
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002406 * @meta: 1 if the blocks represent metadata
David Teiglandb3b94fa2006-01-16 16:50:04 +00002407 *
2408 */
2409
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002410void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002411{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002412 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002413 struct gfs2_rgrpd *rgd;
2414
2415 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2416 if (!rgd)
2417 return;
Bob Peterson41db1ab2012-05-09 12:11:35 -04002418 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002419 rgd->rd_free += blen;
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002420 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002421 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002422 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002423 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002424
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002425 /* Directories keep their data in the metadata address space */
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002426 if (meta || ip->i_depth)
Steven Whitehouse6d3117b2011-05-21 14:05:58 +01002427 gfs2_meta_wipe(ip, bstart, blen);
Bob Peterson4c16c362011-02-23 16:11:33 -05002428}
David Teiglandb3b94fa2006-01-16 16:50:04 +00002429
Bob Peterson4c16c362011-02-23 16:11:33 -05002430/**
Bob Peterson4c16c362011-02-23 16:11:33 -05002431 * gfs2_free_meta - free a contiguous run of data block(s)
2432 * @ip: the inode these blocks are being freed from
2433 * @bstart: first block of a run of contiguous blocks
2434 * @blen: the length of the block run
2435 *
2436 */
2437
Steven Whitehousecd915492006-09-04 12:49:07 -04002438void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002439{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002440 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002441
Eric Sandeen46fcb2e2011-06-23 10:39:34 -05002442 __gfs2_free_blocks(ip, bstart, blen, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002443 gfs2_statfs_change(sdp, 0, +blen, 0);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002444 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002445}
2446
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002447void gfs2_unlink_di(struct inode *inode)
2448{
2449 struct gfs2_inode *ip = GFS2_I(inode);
2450 struct gfs2_sbd *sdp = GFS2_SB(inode);
2451 struct gfs2_rgrpd *rgd;
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002452 u64 blkno = ip->i_no_addr;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002453
2454 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2455 if (!rgd)
2456 return;
Bob Peterson41db1ab2012-05-09 12:11:35 -04002457 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002458 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002459 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002460 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2461 update_rgrp_lvb_unlinked(rgd, 1);
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -04002462}
2463
Steven Whitehousecd915492006-09-04 12:49:07 -04002464static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002465{
2466 struct gfs2_sbd *sdp = rgd->rd_sbd;
2467 struct gfs2_rgrpd *tmp_rgd;
2468
2469 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2470 if (!tmp_rgd)
2471 return;
2472 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2473
Steven Whitehouse73f74942008-11-04 10:32:57 +00002474 if (!rgd->rd_dinodes)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002475 gfs2_consist_rgrpd(rgd);
Steven Whitehouse73f74942008-11-04 10:32:57 +00002476 rgd->rd_dinodes--;
Steven Whitehousecfc8b542008-11-04 10:25:13 +00002477 rgd->rd_free++;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002478
Steven Whitehouse350a9b02012-12-14 12:36:02 +00002479 gfs2_trans_add_meta(rgd->rd_gl, rgd->rd_bits[0].bi_bh);
Bob Peterson42d52e32008-01-28 18:38:07 -06002480 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
Benjamin Marzinski90306c42012-05-29 23:01:09 -05002481 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2482 update_rgrp_lvb_unlinked(rgd, -1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002483
2484 gfs2_statfs_change(sdp, 0, +1, -1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002485}
2486
David Teiglandb3b94fa2006-01-16 16:50:04 +00002487
2488void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2489{
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002490 gfs2_free_uninit_di(rgd, ip->i_no_addr);
Bob Peterson41db1ab2012-05-09 12:11:35 -04002491 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
Steven Whitehouse2933f922006-11-01 13:23:29 -05002492 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
Steven Whitehousedbb7cae2007-05-15 15:37:50 +01002493 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002494}
2495
2496/**
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002497 * gfs2_check_blk_type - Check the type of a block
2498 * @sdp: The superblock
2499 * @no_addr: The block number to check
2500 * @type: The block type we are looking for
2501 *
2502 * Returns: 0 if the block type matches the expected type
2503 * -ESTALE if it doesn't match
2504 * or -ve errno if something went wrong while checking
2505 */
2506
2507int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2508{
2509 struct gfs2_rgrpd *rgd;
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002510 struct gfs2_holder rgd_gh;
Bob Peterson58884c42012-03-05 10:19:35 -05002511 int error = -EINVAL;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002512
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002513 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002514 if (!rgd)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002515 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002516
2517 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2518 if (error)
Steven Whitehouse8339ee52011-08-31 16:38:29 +01002519 goto fail;
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002520
2521 if (gfs2_get_block_type(rgd, no_addr) != type)
2522 error = -ESTALE;
2523
2524 gfs2_glock_dq_uninit(&rgd_gh);
Steven Whitehouseacf7e242009-09-08 18:00:30 +01002525fail:
2526 return error;
2527}
2528
2529/**
David Teiglandb3b94fa2006-01-16 16:50:04 +00002530 * gfs2_rlist_add - add a RG to a list of RGs
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002531 * @ip: the inode
David Teiglandb3b94fa2006-01-16 16:50:04 +00002532 * @rlist: the list of resource groups
2533 * @block: the block
2534 *
2535 * Figure out what RG a block belongs to and add that RG to the list
2536 *
2537 * FIXME: Don't use NOFAIL
2538 *
2539 */
2540
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002541void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
Steven Whitehousecd915492006-09-04 12:49:07 -04002542 u64 block)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002543{
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002544 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002545 struct gfs2_rgrpd *rgd;
2546 struct gfs2_rgrpd **tmp;
2547 unsigned int new_space;
2548 unsigned int x;
2549
2550 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2551 return;
2552
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002553 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2554 rgd = ip->i_rgd;
2555 else
Steven Whitehouse66fc0612012-02-08 12:58:32 +00002556 rgd = gfs2_blk2rgrpd(sdp, block, 1);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002557 if (!rgd) {
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002558 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002559 return;
2560 }
Steven Whitehouse70b0c362011-09-02 16:08:09 +01002561 ip->i_rgd = rgd;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002562
2563 for (x = 0; x < rlist->rl_rgrps; x++)
2564 if (rlist->rl_rgd[x] == rgd)
2565 return;
2566
2567 if (rlist->rl_rgrps == rlist->rl_space) {
2568 new_space = rlist->rl_space + 10;
2569
2570 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002571 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002572
2573 if (rlist->rl_rgd) {
2574 memcpy(tmp, rlist->rl_rgd,
2575 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2576 kfree(rlist->rl_rgd);
2577 }
2578
2579 rlist->rl_space = new_space;
2580 rlist->rl_rgd = tmp;
2581 }
2582
2583 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2584}
2585
2586/**
2587 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2588 * and initialize an array of glock holders for them
2589 * @rlist: the list of resource groups
2590 * @state: the lock state to acquire the RG lock in
David Teiglandb3b94fa2006-01-16 16:50:04 +00002591 *
2592 * FIXME: Don't use NOFAIL
2593 *
2594 */
2595
Bob Petersonfe6c9912008-01-28 11:13:02 -06002596void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
David Teiglandb3b94fa2006-01-16 16:50:04 +00002597{
2598 unsigned int x;
2599
2600 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
Steven Whitehousedd894be2006-07-27 14:29:00 -04002601 GFP_NOFS | __GFP_NOFAIL);
David Teiglandb3b94fa2006-01-16 16:50:04 +00002602 for (x = 0; x < rlist->rl_rgrps; x++)
2603 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
Bob Petersonfe6c9912008-01-28 11:13:02 -06002604 state, 0,
David Teiglandb3b94fa2006-01-16 16:50:04 +00002605 &rlist->rl_ghs[x]);
2606}
2607
2608/**
2609 * gfs2_rlist_free - free a resource group list
Fabian Frederick27ff6a02014-07-02 22:05:27 +02002610 * @rlist: the list of resource groups
David Teiglandb3b94fa2006-01-16 16:50:04 +00002611 *
2612 */
2613
2614void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2615{
2616 unsigned int x;
2617
2618 kfree(rlist->rl_rgd);
2619
2620 if (rlist->rl_ghs) {
2621 for (x = 0; x < rlist->rl_rgrps; x++)
2622 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2623 kfree(rlist->rl_ghs);
Bob Peterson8e2e0042012-07-19 08:12:40 -04002624 rlist->rl_ghs = NULL;
David Teiglandb3b94fa2006-01-16 16:50:04 +00002625 }
2626}
2627