blob: 8779893d74e5ca01a3c88ff6ac1b7676b3aec420 [file] [log] [blame]
Alex Tomasc9de5602008-01-29 00:19:52 -05001/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Adam Buchbinderb8a074632016-03-09 23:49:05 -050014 * You should have received a copy of the GNU General Public License
Alex Tomasc9de5602008-01-29 00:19:52 -050015 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
Bobi Jam18aadd42012-02-20 17:53:02 -050024#include "ext4_jbd2.h"
Mingming Cao8f6e39a2008-04-29 22:01:31 -040025#include "mballoc.h"
Theodore Ts'o28623c22012-09-05 01:31:50 -040026#include <linux/log2.h>
Theodore Ts'oa0b30c12013-02-09 16:28:20 -050027#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040029#include <linux/backing-dev.h>
Theodore Ts'o9bffad12009-06-17 11:48:11 -040030#include <trace/events/ext4.h>
31
Theodore Ts'oa0b30c12013-02-09 16:28:20 -050032#ifdef CONFIG_EXT4_DEBUG
33ushort ext4_mballoc_debug __read_mostly;
34
35module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
36MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
37#endif
38
Alex Tomasc9de5602008-01-29 00:19:52 -050039/*
40 * MUSTDO:
41 * - test ext4_ext_search_left() and ext4_ext_search_right()
42 * - search for metadata in few groups
43 *
44 * TODO v4:
45 * - normalization should take into account whether file is still open
46 * - discard preallocations if no free space left (policy?)
47 * - don't normalize tails
48 * - quota
49 * - reservation for superuser
50 *
51 * TODO v3:
52 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
53 * - track min/max extents in each group for better group selection
54 * - mb_mark_used() may allocate chunk right after splitting buddy
55 * - tree of groups sorted by number of free blocks
56 * - error handling
57 */
58
59/*
60 * The allocation request involve request for multiple number of blocks
61 * near to the goal(block) value specified.
62 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040063 * During initialization phase of the allocator we decide to use the
64 * group preallocation or inode preallocation depending on the size of
65 * the file. The size of the file could be the resulting file size we
66 * would have after allocation, or the current file size, which ever
67 * is larger. If the size is less than sbi->s_mb_stream_request we
68 * select to use the group preallocation. The default value of
69 * s_mb_stream_request is 16 blocks. This can also be tuned via
70 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
71 * terms of number of blocks.
Alex Tomasc9de5602008-01-29 00:19:52 -050072 *
73 * The main motivation for having small file use group preallocation is to
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040074 * ensure that we have small files closer together on the disk.
Alex Tomasc9de5602008-01-29 00:19:52 -050075 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040076 * First stage the allocator looks at the inode prealloc list,
77 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
78 * spaces for this particular inode. The inode prealloc space is
79 * represented as:
Alex Tomasc9de5602008-01-29 00:19:52 -050080 *
81 * pa_lstart -> the logical start block for this prealloc space
82 * pa_pstart -> the physical start block for this prealloc space
Theodore Ts'o53accfa2011-09-09 18:48:51 -040083 * pa_len -> length for this prealloc space (in clusters)
84 * pa_free -> free space available in this prealloc space (in clusters)
Alex Tomasc9de5602008-01-29 00:19:52 -050085 *
86 * The inode preallocation space is used looking at the _logical_ start
87 * block. If only the logical file block falls within the range of prealloc
Tao Macaaf7a22011-07-11 18:42:42 -040088 * space we will consume the particular prealloc space. This makes sure that
89 * we have contiguous physical blocks representing the file blocks
Alex Tomasc9de5602008-01-29 00:19:52 -050090 *
91 * The important thing to be noted in case of inode prealloc space is that
92 * we don't modify the values associated to inode prealloc space except
93 * pa_free.
94 *
95 * If we are not able to find blocks in the inode prealloc space and if we
96 * have the group allocation flag set then we look at the locality group
Tao Macaaf7a22011-07-11 18:42:42 -040097 * prealloc space. These are per CPU prealloc list represented as
Alex Tomasc9de5602008-01-29 00:19:52 -050098 *
99 * ext4_sb_info.s_locality_groups[smp_processor_id()]
100 *
101 * The reason for having a per cpu locality group is to reduce the contention
102 * between CPUs. It is possible to get scheduled at this point.
103 *
104 * The locality group prealloc space is used looking at whether we have
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300105 * enough free space (pa_free) within the prealloc space.
Alex Tomasc9de5602008-01-29 00:19:52 -0500106 *
107 * If we can't allocate blocks via inode prealloc or/and locality group
108 * prealloc then we look at the buddy cache. The buddy cache is represented
109 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
110 * mapped to the buddy and bitmap information regarding different
111 * groups. The buddy information is attached to buddy cache inode so that
112 * we can access them through the page cache. The information regarding
113 * each group is loaded via ext4_mb_load_buddy. The information involve
114 * block bitmap and buddy information. The information are stored in the
115 * inode as:
116 *
117 * { page }
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500118 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
Alex Tomasc9de5602008-01-29 00:19:52 -0500119 *
120 *
121 * one block each for bitmap and buddy information. So for each group we
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
Alex Tomasc9de5602008-01-29 00:19:52 -0500123 * blocksize) blocks. So it can have information regarding groups_per_page
124 * which is blocks_per_page/2
125 *
126 * The buddy cache inode is not stored on disk. The inode is thrown
127 * away when the filesystem is unmounted.
128 *
129 * We look for count number of blocks in the buddy cache. If we were able
130 * to locate that many free blocks we return with additional information
131 * regarding rest of the contiguous physical block available
132 *
133 * Before allocating blocks via buddy cache we normalize the request
134 * blocks. This ensure we ask for more blocks that we needed. The extra
135 * blocks that we get after allocation is added to the respective prealloc
136 * list. In case of inode preallocation we follow a list of heuristics
137 * based on file size. This can be found in ext4_mb_normalize_request. If
138 * we are doing a group prealloc we try to normalize the request to
Theodore Ts'o27baebb2011-09-09 19:02:51 -0400139 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
140 * dependent on the cluster size; for non-bigalloc file systems, it is
Alex Tomasc9de5602008-01-29 00:19:52 -0500141 * 512 blocks. This can be tuned via
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400142 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
Alex Tomasc9de5602008-01-29 00:19:52 -0500143 * terms of number of blocks. If we have mounted the file system with -O
144 * stripe=<value> option the group prealloc request is normalized to the
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400145 * the smallest multiple of the stripe value (sbi->s_stripe) which is
146 * greater than the default mb_group_prealloc.
Alex Tomasc9de5602008-01-29 00:19:52 -0500147 *
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400148 * The regular allocator (using the buddy cache) supports a few tunables.
Alex Tomasc9de5602008-01-29 00:19:52 -0500149 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400150 * /sys/fs/ext4/<partition>/mb_min_to_scan
151 * /sys/fs/ext4/<partition>/mb_max_to_scan
152 * /sys/fs/ext4/<partition>/mb_order2_req
Alex Tomasc9de5602008-01-29 00:19:52 -0500153 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400154 * The regular allocator uses buddy scan only if the request len is power of
Alex Tomasc9de5602008-01-29 00:19:52 -0500155 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
156 * value of s_mb_order2_reqs can be tuned via
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400157 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200158 * stripe size (sbi->s_stripe), we try to search for contiguous block in
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400159 * stripe size. This should result in better allocation on RAID setups. If
160 * not, we search in the specific group using bitmap for best extents. The
161 * tunable min_to_scan and max_to_scan control the behaviour here.
Alex Tomasc9de5602008-01-29 00:19:52 -0500162 * min_to_scan indicate how long the mballoc __must__ look for a best
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400163 * extent and max_to_scan indicates how long the mballoc __can__ look for a
Alex Tomasc9de5602008-01-29 00:19:52 -0500164 * best extent in the found extents. Searching for the blocks starts with
165 * the group specified as the goal value in allocation context via
166 * ac_g_ex. Each group is first checked based on the criteria whether it
Tao Macaaf7a22011-07-11 18:42:42 -0400167 * can be used for allocation. ext4_mb_good_group explains how the groups are
Alex Tomasc9de5602008-01-29 00:19:52 -0500168 * checked.
169 *
170 * Both the prealloc space are getting populated as above. So for the first
171 * request we will hit the buddy cache which will result in this prealloc
172 * space getting filled. The prealloc space is then later used for the
173 * subsequent request.
174 */
175
176/*
177 * mballoc operates on the following data:
178 * - on-disk bitmap
179 * - in-core buddy (actually includes buddy and bitmap)
180 * - preallocation descriptors (PAs)
181 *
182 * there are two types of preallocations:
183 * - inode
184 * assiged to specific inode and can be used for this inode only.
185 * it describes part of inode's space preallocated to specific
186 * physical blocks. any block from that preallocated can be used
187 * independent. the descriptor just tracks number of blocks left
188 * unused. so, before taking some block from descriptor, one must
189 * make sure corresponded logical block isn't allocated yet. this
190 * also means that freeing any block within descriptor's range
191 * must discard all preallocated blocks.
192 * - locality group
193 * assigned to specific locality group which does not translate to
194 * permanent set of inodes: inode can join and leave group. space
195 * from this type of preallocation can be used for any inode. thus
196 * it's consumed from the beginning to the end.
197 *
198 * relation between them can be expressed as:
199 * in-core buddy = on-disk bitmap + preallocation descriptors
200 *
201 * this mean blocks mballoc considers used are:
202 * - allocated blocks (persistent)
203 * - preallocated blocks (non-persistent)
204 *
205 * consistency in mballoc world means that at any time a block is either
206 * free or used in ALL structures. notice: "any time" should not be read
207 * literally -- time is discrete and delimited by locks.
208 *
209 * to keep it simple, we don't use block numbers, instead we count number of
210 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
211 *
212 * all operations can be expressed as:
213 * - init buddy: buddy = on-disk + PAs
214 * - new PA: buddy += N; PA = N
215 * - use inode PA: on-disk += N; PA -= N
216 * - discard inode PA buddy -= on-disk - PA; PA = 0
217 * - use locality group PA on-disk += N; PA -= N
218 * - discard locality group PA buddy -= PA; PA = 0
219 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
220 * is used in real operation because we can't know actual used
221 * bits from PA, only from on-disk bitmap
222 *
223 * if we follow this strict logic, then all operations above should be atomic.
224 * given some of them can block, we'd have to use something like semaphores
225 * killing performance on high-end SMP hardware. let's try to relax it using
226 * the following knowledge:
227 * 1) if buddy is referenced, it's already initialized
228 * 2) while block is used in buddy and the buddy is referenced,
229 * nobody can re-allocate that block
230 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
231 * bit set and PA claims same block, it's OK. IOW, one can set bit in
232 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
233 * block
234 *
235 * so, now we're building a concurrency table:
236 * - init buddy vs.
237 * - new PA
238 * blocks for PA are allocated in the buddy, buddy must be referenced
239 * until PA is linked to allocation group to avoid concurrent buddy init
240 * - use inode PA
241 * we need to make sure that either on-disk bitmap or PA has uptodate data
242 * given (3) we care that PA-=N operation doesn't interfere with init
243 * - discard inode PA
244 * the simplest way would be to have buddy initialized by the discard
245 * - use locality group PA
246 * again PA-=N must be serialized with init
247 * - discard locality group PA
248 * the simplest way would be to have buddy initialized by the discard
249 * - new PA vs.
250 * - use inode PA
251 * i_data_sem serializes them
252 * - discard inode PA
253 * discard process must wait until PA isn't used by another process
254 * - use locality group PA
255 * some mutex should serialize them
256 * - discard locality group PA
257 * discard process must wait until PA isn't used by another process
258 * - use inode PA
259 * - use inode PA
260 * i_data_sem or another mutex should serializes them
261 * - discard inode PA
262 * discard process must wait until PA isn't used by another process
263 * - use locality group PA
264 * nothing wrong here -- they're different PAs covering different blocks
265 * - discard locality group PA
266 * discard process must wait until PA isn't used by another process
267 *
268 * now we're ready to make few consequences:
269 * - PA is referenced and while it is no discard is possible
270 * - PA is referenced until block isn't marked in on-disk bitmap
271 * - PA changes only after on-disk bitmap
272 * - discard must not compete with init. either init is done before
273 * any discard or they're serialized somehow
274 * - buddy init as sum of on-disk bitmap and PAs is done atomically
275 *
276 * a special case when we've used PA to emptiness. no need to modify buddy
277 * in this case, but we should care about concurrent init
278 *
279 */
280
281 /*
282 * Logic in few words:
283 *
284 * - allocation:
285 * load group
286 * find blocks
287 * mark bits in on-disk bitmap
288 * release group
289 *
290 * - use preallocation:
291 * find proper PA (per-inode or group)
292 * load group
293 * mark bits in on-disk bitmap
294 * release group
295 * release PA
296 *
297 * - free:
298 * load group
299 * mark bits in on-disk bitmap
300 * release group
301 *
302 * - discard preallocations in group:
303 * mark PAs deleted
304 * move them onto local list
305 * load on-disk bitmap
306 * load group
307 * remove PA from object (inode or locality group)
308 * mark free blocks in-core
309 *
310 * - discard inode's preallocations:
311 */
312
313/*
314 * Locking rules
315 *
316 * Locks:
317 * - bitlock on a group (group)
318 * - object (inode/locality) (object)
319 * - per-pa lock (pa)
320 *
321 * Paths:
322 * - new pa
323 * object
324 * group
325 *
326 * - find and use pa:
327 * pa
328 *
329 * - release consumed pa:
330 * pa
331 * group
332 * object
333 *
334 * - generate in-core bitmap:
335 * group
336 * pa
337 *
338 * - discard all for given object (inode, locality group):
339 * object
340 * pa
341 * group
342 *
343 * - discard all for given group:
344 * group
345 * pa
346 * group
347 * object
348 *
349 */
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500350static struct kmem_cache *ext4_pspace_cachep;
351static struct kmem_cache *ext4_ac_cachep;
Bobi Jam18aadd42012-02-20 17:53:02 -0500352static struct kmem_cache *ext4_free_data_cachep;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -0400353
354/* We create slab caches for groupinfo data structures based on the
355 * superblock block size. There will be one per mounted filesystem for
356 * each unique s_blocksize_bits */
Eric Sandeen2892c152011-02-12 08:12:18 -0500357#define NR_GRPINFO_CACHES 8
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -0400358static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
359
Eric Biggersd6006182017-04-29 23:47:50 -0400360static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
Eric Sandeen2892c152011-02-12 08:12:18 -0500361 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
362 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
363 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
364};
365
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500366static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
367 ext4_group_t group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500368static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
369 ext4_group_t group);
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500370
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500371static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
372{
Alex Tomasc9de5602008-01-29 00:19:52 -0500373#if BITS_PER_LONG == 64
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500374 *bit += ((unsigned long) addr & 7UL) << 3;
375 addr = (void *) ((unsigned long) addr & ~7UL);
Alex Tomasc9de5602008-01-29 00:19:52 -0500376#elif BITS_PER_LONG == 32
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500377 *bit += ((unsigned long) addr & 3UL) << 3;
378 addr = (void *) ((unsigned long) addr & ~3UL);
Alex Tomasc9de5602008-01-29 00:19:52 -0500379#else
380#error "how many bits you are?!"
381#endif
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500382 return addr;
383}
Alex Tomasc9de5602008-01-29 00:19:52 -0500384
385static inline int mb_test_bit(int bit, void *addr)
386{
387 /*
388 * ext4_test_bit on architecture like powerpc
389 * needs unsigned long aligned address
390 */
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500391 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500392 return ext4_test_bit(bit, addr);
393}
394
395static inline void mb_set_bit(int bit, void *addr)
396{
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500397 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500398 ext4_set_bit(bit, addr);
399}
400
Alex Tomasc9de5602008-01-29 00:19:52 -0500401static inline void mb_clear_bit(int bit, void *addr)
402{
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500403 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500404 ext4_clear_bit(bit, addr);
405}
406
Andrey Sidoroveabe0442013-04-09 12:22:29 -0400407static inline int mb_test_and_clear_bit(int bit, void *addr)
408{
409 addr = mb_correct_addr_and_bit(&bit, addr);
410 return ext4_test_and_clear_bit(bit, addr);
411}
412
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500413static inline int mb_find_next_zero_bit(void *addr, int max, int start)
414{
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400415 int fix = 0, ret, tmpmax;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500416 addr = mb_correct_addr_and_bit(&fix, addr);
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400417 tmpmax = max + fix;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500418 start += fix;
419
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400420 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
421 if (ret > max)
422 return max;
423 return ret;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500424}
425
426static inline int mb_find_next_bit(void *addr, int max, int start)
427{
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400428 int fix = 0, ret, tmpmax;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500429 addr = mb_correct_addr_and_bit(&fix, addr);
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400430 tmpmax = max + fix;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500431 start += fix;
432
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400433 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
434 if (ret > max)
435 return max;
436 return ret;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500437}
438
Alex Tomasc9de5602008-01-29 00:19:52 -0500439static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
440{
441 char *bb;
442
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500443 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
Alex Tomasc9de5602008-01-29 00:19:52 -0500444 BUG_ON(max == NULL);
445
446 if (order > e4b->bd_blkbits + 1) {
447 *max = 0;
448 return NULL;
449 }
450
451 /* at order 0 we see each particular block */
Coly Li84b775a2011-02-24 12:51:59 -0500452 if (order == 0) {
453 *max = 1 << (e4b->bd_blkbits + 3);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500454 return e4b->bd_bitmap;
Coly Li84b775a2011-02-24 12:51:59 -0500455 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500456
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500457 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
Alex Tomasc9de5602008-01-29 00:19:52 -0500458 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
459
460 return bb;
461}
462
463#ifdef DOUBLE_CHECK
464static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
465 int first, int count)
466{
467 int i;
468 struct super_block *sb = e4b->bd_sb;
469
470 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
471 return;
Vincent Minetbc8e6742009-05-15 08:33:18 -0400472 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -0500473 for (i = 0; i < count; i++) {
474 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
475 ext4_fsblk_t blocknr;
Akinobu Mita5661bd62010-03-03 23:53:39 -0500476
477 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
Theodore Ts'o53accfa2011-09-09 18:48:51 -0400478 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -0500479 ext4_grp_locked_error(sb, e4b->bd_group,
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400480 inode ? inode->i_ino : 0,
481 blocknr,
482 "freeing block already freed "
483 "(bit %u)",
484 first + i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500485 }
486 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
487 }
488}
489
490static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
491{
492 int i;
493
494 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
495 return;
Vincent Minetbc8e6742009-05-15 08:33:18 -0400496 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -0500497 for (i = 0; i < count; i++) {
498 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
499 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
500 }
501}
502
503static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
504{
505 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
506 unsigned char *b1, *b2;
507 int i;
508 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
509 b2 = (unsigned char *) bitmap;
510 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
511 if (b1[i] != b2[i]) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -0400512 ext4_msg(e4b->bd_sb, KERN_ERR,
513 "corruption in group %u "
514 "at byte %u(%u): %x in copy != %x "
515 "on disk/prealloc",
516 e4b->bd_group, i, i * 8, b1[i], b2[i]);
Alex Tomasc9de5602008-01-29 00:19:52 -0500517 BUG();
518 }
519 }
520 }
521}
522
523#else
524static inline void mb_free_blocks_double(struct inode *inode,
525 struct ext4_buddy *e4b, int first, int count)
526{
527 return;
528}
529static inline void mb_mark_used_double(struct ext4_buddy *e4b,
530 int first, int count)
531{
532 return;
533}
534static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
535{
536 return;
537}
538#endif
539
540#ifdef AGGRESSIVE_CHECK
541
542#define MB_CHECK_ASSERT(assert) \
543do { \
544 if (!(assert)) { \
545 printk(KERN_EMERG \
546 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
547 function, file, line, # assert); \
548 BUG(); \
549 } \
550} while (0)
551
552static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
553 const char *function, int line)
554{
555 struct super_block *sb = e4b->bd_sb;
556 int order = e4b->bd_blkbits + 1;
557 int max;
558 int max2;
559 int i;
560 int j;
561 int k;
562 int count;
563 struct ext4_group_info *grp;
564 int fragments = 0;
565 int fstart;
566 struct list_head *cur;
567 void *buddy;
568 void *buddy2;
569
Alex Tomasc9de5602008-01-29 00:19:52 -0500570 {
571 static int mb_check_counter;
572 if (mb_check_counter++ % 100 != 0)
573 return 0;
574 }
575
576 while (order > 1) {
577 buddy = mb_find_buddy(e4b, order, &max);
578 MB_CHECK_ASSERT(buddy);
579 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
580 MB_CHECK_ASSERT(buddy2);
581 MB_CHECK_ASSERT(buddy != buddy2);
582 MB_CHECK_ASSERT(max * 2 == max2);
583
584 count = 0;
585 for (i = 0; i < max; i++) {
586
587 if (mb_test_bit(i, buddy)) {
588 /* only single bit in buddy2 may be 1 */
589 if (!mb_test_bit(i << 1, buddy2)) {
590 MB_CHECK_ASSERT(
591 mb_test_bit((i<<1)+1, buddy2));
592 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
593 MB_CHECK_ASSERT(
594 mb_test_bit(i << 1, buddy2));
595 }
596 continue;
597 }
598
Robin Dong0a10da72011-10-26 08:48:54 -0400599 /* both bits in buddy2 must be 1 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500600 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
601 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
602
603 for (j = 0; j < (1 << order); j++) {
604 k = (i * (1 << order)) + j;
605 MB_CHECK_ASSERT(
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500606 !mb_test_bit(k, e4b->bd_bitmap));
Alex Tomasc9de5602008-01-29 00:19:52 -0500607 }
608 count++;
609 }
610 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
611 order--;
612 }
613
614 fstart = -1;
615 buddy = mb_find_buddy(e4b, 0, &max);
616 for (i = 0; i < max; i++) {
617 if (!mb_test_bit(i, buddy)) {
618 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
619 if (fstart == -1) {
620 fragments++;
621 fstart = i;
622 }
623 continue;
624 }
625 fstart = -1;
626 /* check used bits only */
627 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
628 buddy2 = mb_find_buddy(e4b, j, &max2);
629 k = i >> j;
630 MB_CHECK_ASSERT(k < max2);
631 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
632 }
633 }
634 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
635 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
636
637 grp = ext4_get_group_info(sb, e4b->bd_group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500638 list_for_each(cur, &grp->bb_prealloc_list) {
639 ext4_group_t groupnr;
640 struct ext4_prealloc_space *pa;
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -0400641 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
642 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
Alex Tomasc9de5602008-01-29 00:19:52 -0500643 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -0400644 for (i = 0; i < pa->pa_len; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -0500645 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
646 }
647 return 0;
648}
649#undef MB_CHECK_ASSERT
650#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
Harvey Harrison46e665e2008-04-17 10:38:59 -0400651 __FILE__, __func__, __LINE__)
Alex Tomasc9de5602008-01-29 00:19:52 -0500652#else
653#define mb_check_buddy(e4b)
654#endif
655
Coly Li7c786052011-02-24 13:24:25 -0500656/*
657 * Divide blocks started from @first with length @len into
658 * smaller chunks with power of 2 blocks.
659 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
660 * then increase bb_counters[] for corresponded chunk size.
661 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500662static void ext4_mb_mark_free_simple(struct super_block *sb,
Eric Sandeena36b4492009-08-25 22:36:45 -0400663 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
Alex Tomasc9de5602008-01-29 00:19:52 -0500664 struct ext4_group_info *grp)
665{
666 struct ext4_sb_info *sbi = EXT4_SB(sb);
Eric Sandeena36b4492009-08-25 22:36:45 -0400667 ext4_grpblk_t min;
668 ext4_grpblk_t max;
669 ext4_grpblk_t chunk;
Chandan Rajendra69e43e82016-11-14 21:04:37 -0500670 unsigned int border;
Alex Tomasc9de5602008-01-29 00:19:52 -0500671
Theodore Ts'o7137d7a2011-09-09 18:38:51 -0400672 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
Alex Tomasc9de5602008-01-29 00:19:52 -0500673
674 border = 2 << sb->s_blocksize_bits;
675
676 while (len > 0) {
677 /* find how many blocks can be covered since this position */
678 max = ffs(first | border) - 1;
679
680 /* find how many blocks of power 2 we need to mark */
681 min = fls(len) - 1;
682
683 if (max < min)
684 min = max;
685 chunk = 1 << min;
686
687 /* mark multiblock chunks only */
688 grp->bb_counters[min]++;
689 if (min > 0)
690 mb_clear_bit(first >> min,
691 buddy + sbi->s_mb_offsets[min]);
692
693 len -= chunk;
694 first += chunk;
695 }
696}
697
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400698/*
699 * Cache the order of the largest free extent we have available in this block
700 * group.
701 */
702static void
703mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
704{
705 int i;
706 int bits;
707
708 grp->bb_largest_free_order = -1; /* uninit */
709
710 bits = sb->s_blocksize_bits + 1;
711 for (i = bits; i >= 0; i--) {
712 if (grp->bb_counters[i] > 0) {
713 grp->bb_largest_free_order = i;
714 break;
715 }
716 }
717}
718
Eric Sandeen089ceec2009-07-05 22:17:31 -0400719static noinline_for_stack
720void ext4_mb_generate_buddy(struct super_block *sb,
Alex Tomasc9de5602008-01-29 00:19:52 -0500721 void *buddy, void *bitmap, ext4_group_t group)
722{
723 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
Namjae Jeone43bb4e2014-06-26 10:11:53 -0400724 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -0400725 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
Eric Sandeena36b4492009-08-25 22:36:45 -0400726 ext4_grpblk_t i = 0;
727 ext4_grpblk_t first;
728 ext4_grpblk_t len;
Alex Tomasc9de5602008-01-29 00:19:52 -0500729 unsigned free = 0;
730 unsigned fragments = 0;
731 unsigned long long period = get_cycles();
732
733 /* initialize buddy from bitmap which is aggregation
734 * of on-disk bitmap and preallocations */
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500735 i = mb_find_next_zero_bit(bitmap, max, 0);
Alex Tomasc9de5602008-01-29 00:19:52 -0500736 grp->bb_first_free = i;
737 while (i < max) {
738 fragments++;
739 first = i;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500740 i = mb_find_next_bit(bitmap, max, i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500741 len = i - first;
742 free += len;
743 if (len > 1)
744 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
745 else
746 grp->bb_counters[0]++;
747 if (i < max)
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500748 i = mb_find_next_zero_bit(bitmap, max, i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500749 }
750 grp->bb_fragments = fragments;
751
752 if (free != grp->bb_free) {
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400753 ext4_grp_locked_error(sb, group, 0, 0,
Theodore Ts'o94d4c062014-07-05 19:15:50 -0400754 "block bitmap and bg descriptor "
755 "inconsistent: %u vs %u free clusters",
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400756 free, grp->bb_free);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -0500757 /*
Darrick J. Wong163a2032013-08-28 17:35:51 -0400758 * If we intend to continue, we consider group descriptor
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -0500759 * corrupt and update bb_free using bitmap value
760 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500761 grp->bb_free = free;
Namjae Jeone43bb4e2014-06-26 10:11:53 -0400762 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
763 percpu_counter_sub(&sbi->s_freeclusters_counter,
764 grp->bb_free);
Darrick J. Wong163a2032013-08-28 17:35:51 -0400765 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
Alex Tomasc9de5602008-01-29 00:19:52 -0500766 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400767 mb_set_largest_free_order(sb, grp);
Alex Tomasc9de5602008-01-29 00:19:52 -0500768
769 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
770
771 period = get_cycles() - period;
772 spin_lock(&EXT4_SB(sb)->s_bal_lock);
773 EXT4_SB(sb)->s_mb_buddies_generated++;
774 EXT4_SB(sb)->s_mb_generation_time += period;
775 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
776}
777
Andrey Sidoroveabe0442013-04-09 12:22:29 -0400778static void mb_regenerate_buddy(struct ext4_buddy *e4b)
779{
780 int count;
781 int order = 1;
782 void *buddy;
783
784 while ((buddy = mb_find_buddy(e4b, order++, &count))) {
785 ext4_set_bits(buddy, 0, count);
786 }
787 e4b->bd_info->bb_fragments = 0;
788 memset(e4b->bd_info->bb_counters, 0,
789 sizeof(*e4b->bd_info->bb_counters) *
790 (e4b->bd_sb->s_blocksize_bits + 2));
791
792 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
793 e4b->bd_bitmap, e4b->bd_group);
794}
795
Alex Tomasc9de5602008-01-29 00:19:52 -0500796/* The buddy information is attached the buddy cache inode
797 * for convenience. The information regarding each group
798 * is loaded via ext4_mb_load_buddy. The information involve
799 * block bitmap and buddy information. The information are
800 * stored in the inode as
801 *
802 * { page }
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500803 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
Alex Tomasc9de5602008-01-29 00:19:52 -0500804 *
805 *
806 * one block each for bitmap and buddy information.
807 * So for each group we take up 2 blocks. A page can
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300808 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
Alex Tomasc9de5602008-01-29 00:19:52 -0500809 * So it can have information regarding groups_per_page which
810 * is blocks_per_page/2
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400811 *
812 * Locking note: This routine takes the block group lock of all groups
813 * for this page; do not hold this lock when calling this routine!
Alex Tomasc9de5602008-01-29 00:19:52 -0500814 */
815
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -0400816static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
Alex Tomasc9de5602008-01-29 00:19:52 -0500817{
Theodore Ts'o8df96752009-05-01 08:50:38 -0400818 ext4_group_t ngroups;
Alex Tomasc9de5602008-01-29 00:19:52 -0500819 int blocksize;
820 int blocks_per_page;
821 int groups_per_page;
822 int err = 0;
823 int i;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500824 ext4_group_t first_group, group;
Alex Tomasc9de5602008-01-29 00:19:52 -0500825 int first_block;
826 struct super_block *sb;
827 struct buffer_head *bhs;
Darrick J. Wongfa77dcf2012-04-29 18:35:10 -0400828 struct buffer_head **bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -0500829 struct inode *inode;
830 char *data;
831 char *bitmap;
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400832 struct ext4_group_info *grinfo;
Alex Tomasc9de5602008-01-29 00:19:52 -0500833
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400834 mb_debug(1, "init page %lu\n", page->index);
Alex Tomasc9de5602008-01-29 00:19:52 -0500835
836 inode = page->mapping->host;
837 sb = inode->i_sb;
Theodore Ts'o8df96752009-05-01 08:50:38 -0400838 ngroups = ext4_get_groups_count(sb);
Fabian Frederick93407472017-02-27 14:28:32 -0800839 blocksize = i_blocksize(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300840 blocks_per_page = PAGE_SIZE / blocksize;
Alex Tomasc9de5602008-01-29 00:19:52 -0500841
842 groups_per_page = blocks_per_page >> 1;
843 if (groups_per_page == 0)
844 groups_per_page = 1;
845
846 /* allocate buffer_heads to read bitmaps */
847 if (groups_per_page > 1) {
Alex Tomasc9de5602008-01-29 00:19:52 -0500848 i = sizeof(struct buffer_head *) * groups_per_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -0400849 bh = kzalloc(i, gfp);
Theodore Ts'o813e5722012-02-20 17:52:46 -0500850 if (bh == NULL) {
851 err = -ENOMEM;
Alex Tomasc9de5602008-01-29 00:19:52 -0500852 goto out;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500853 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500854 } else
855 bh = &bhs;
856
857 first_group = page->index * blocks_per_page / 2;
858
859 /* read all groups the page covers into the cache */
Theodore Ts'o813e5722012-02-20 17:52:46 -0500860 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
861 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -0500862 break;
863
Theodore Ts'o813e5722012-02-20 17:52:46 -0500864 grinfo = ext4_get_group_info(sb, group);
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400865 /*
866 * If page is uptodate then we came here after online resize
867 * which added some new uninitialized group info structs, so
868 * we must skip all initialized uptodate buddies on the page,
869 * which may be currently in use by an allocating task.
870 */
871 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
872 bh[i] = NULL;
873 continue;
874 }
Darrick J. Wong9008a582015-10-17 21:33:24 -0400875 bh[i] = ext4_read_block_bitmap_nowait(sb, group);
876 if (IS_ERR(bh[i])) {
877 err = PTR_ERR(bh[i]);
878 bh[i] = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -0500879 goto out;
Aneesh Kumar K.V2ccb5fb2009-01-05 21:49:55 -0500880 }
Theodore Ts'o813e5722012-02-20 17:52:46 -0500881 mb_debug(1, "read bitmap for group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500882 }
883
884 /* wait for I/O completion */
Theodore Ts'o813e5722012-02-20 17:52:46 -0500885 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
Darrick J. Wong9008a582015-10-17 21:33:24 -0400886 int err2;
887
888 if (!bh[i])
889 continue;
890 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
891 if (!err)
892 err = err2;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500893 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500894
895 first_block = page->index * blocks_per_page;
896 for (i = 0; i < blocks_per_page; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -0500897 group = (first_block + i) >> 1;
Theodore Ts'o8df96752009-05-01 08:50:38 -0400898 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -0500899 break;
900
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400901 if (!bh[group - first_group])
902 /* skip initialized uptodate buddy */
903 continue;
904
Lukas Czernerbbdc3222015-06-08 11:38:37 -0400905 if (!buffer_verified(bh[group - first_group]))
906 /* Skip faulty bitmaps */
907 continue;
908 err = 0;
909
Alex Tomasc9de5602008-01-29 00:19:52 -0500910 /*
911 * data carry information regarding this
912 * particular group in the format specified
913 * above
914 *
915 */
916 data = page_address(page) + (i * blocksize);
917 bitmap = bh[group - first_group]->b_data;
918
919 /*
920 * We place the buddy block and bitmap block
921 * close together
922 */
923 if ((first_block + i) & 1) {
924 /* this is block of buddy */
925 BUG_ON(incore == NULL);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400926 mb_debug(1, "put buddy for group %u in page %lu/%x\n",
Alex Tomasc9de5602008-01-29 00:19:52 -0500927 group, page->index, i * blocksize);
Theodore Ts'of3073332010-05-17 03:00:00 -0400928 trace_ext4_mb_buddy_bitmap_load(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500929 grinfo = ext4_get_group_info(sb, group);
930 grinfo->bb_fragments = 0;
931 memset(grinfo->bb_counters, 0,
Eric Sandeen19278052009-08-25 22:36:25 -0400932 sizeof(*grinfo->bb_counters) *
933 (sb->s_blocksize_bits+2));
Alex Tomasc9de5602008-01-29 00:19:52 -0500934 /*
935 * incore got set to the group block bitmap below
936 */
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500937 ext4_lock_group(sb, group);
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400938 /* init the buddy */
939 memset(data, 0xff, blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -0500940 ext4_mb_generate_buddy(sb, data, incore, group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500941 ext4_unlock_group(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500942 incore = NULL;
943 } else {
944 /* this is block of bitmap */
945 BUG_ON(incore != NULL);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400946 mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
Alex Tomasc9de5602008-01-29 00:19:52 -0500947 group, page->index, i * blocksize);
Theodore Ts'of3073332010-05-17 03:00:00 -0400948 trace_ext4_mb_bitmap_load(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500949
950 /* see comments in ext4_mb_put_pa() */
951 ext4_lock_group(sb, group);
952 memcpy(data, bitmap, blocksize);
953
954 /* mark all preallocated blks used in in-core bitmap */
955 ext4_mb_generate_from_pa(sb, data, group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500956 ext4_mb_generate_from_freelist(sb, data, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500957 ext4_unlock_group(sb, group);
958
959 /* set incore so that the buddy information can be
960 * generated using this
961 */
962 incore = data;
963 }
964 }
965 SetPageUptodate(page);
966
967out:
968 if (bh) {
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400969 for (i = 0; i < groups_per_page; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -0500970 brelse(bh[i]);
971 if (bh != &bhs)
972 kfree(bh);
973 }
974 return err;
975}
976
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400977/*
Amir Goldstein2de88072011-05-09 21:48:13 -0400978 * Lock the buddy and bitmap pages. This make sure other parallel init_group
979 * on the same buddy page doesn't happen whild holding the buddy page lock.
980 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
981 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400982 */
Amir Goldstein2de88072011-05-09 21:48:13 -0400983static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -0400984 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400985{
Amir Goldstein2de88072011-05-09 21:48:13 -0400986 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
987 int block, pnum, poff;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400988 int blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -0400989 struct page *page;
990
991 e4b->bd_buddy_page = NULL;
992 e4b->bd_bitmap_page = NULL;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400993
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300994 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400995 /*
996 * the buddy cache inode stores the block bitmap
997 * and buddy information in consecutive blocks.
998 * So for each group we need two blocks.
999 */
1000 block = group * 2;
1001 pnum = block / blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -04001002 poff = block % blocks_per_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001003 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001004 if (!page)
Younger Liuc57ab392014-04-10 23:03:43 -04001005 return -ENOMEM;
Amir Goldstein2de88072011-05-09 21:48:13 -04001006 BUG_ON(page->mapping != inode->i_mapping);
1007 e4b->bd_bitmap_page = page;
1008 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001009
Amir Goldstein2de88072011-05-09 21:48:13 -04001010 if (blocks_per_page >= 2) {
1011 /* buddy and bitmap are on the same page */
1012 return 0;
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001013 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001014
1015 block++;
1016 pnum = block / blocks_per_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001017 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001018 if (!page)
Younger Liuc57ab392014-04-10 23:03:43 -04001019 return -ENOMEM;
Amir Goldstein2de88072011-05-09 21:48:13 -04001020 BUG_ON(page->mapping != inode->i_mapping);
1021 e4b->bd_buddy_page = page;
1022 return 0;
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001023}
1024
Amir Goldstein2de88072011-05-09 21:48:13 -04001025static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001026{
Amir Goldstein2de88072011-05-09 21:48:13 -04001027 if (e4b->bd_bitmap_page) {
1028 unlock_page(e4b->bd_bitmap_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001029 put_page(e4b->bd_bitmap_page);
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001030 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001031 if (e4b->bd_buddy_page) {
1032 unlock_page(e4b->bd_buddy_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001033 put_page(e4b->bd_buddy_page);
Amir Goldstein2de88072011-05-09 21:48:13 -04001034 }
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001035}
1036
1037/*
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001038 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1039 * block group lock of all groups for this page; do not hold the BG lock when
1040 * calling this routine!
1041 */
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001042static noinline_for_stack
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001043int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001044{
1045
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001046 struct ext4_group_info *this_grp;
Amir Goldstein2de88072011-05-09 21:48:13 -04001047 struct ext4_buddy e4b;
1048 struct page *page;
1049 int ret = 0;
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001050
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04001051 might_sleep();
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001052 mb_debug(1, "init group %u\n", group);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001053 this_grp = ext4_get_group_info(sb, group);
1054 /*
Aneesh Kumar K.V08c3a812009-09-09 23:50:17 -04001055 * This ensures that we don't reinit the buddy cache
1056 * page which map to the group from which we are already
1057 * allocating. If we are looking at the buddy cache we would
1058 * have taken a reference using ext4_mb_load_buddy and that
Amir Goldstein2de88072011-05-09 21:48:13 -04001059 * would have pinned buddy page to page cache.
Mel Gorman2457aec2014-06-04 16:10:31 -07001060 * The call to ext4_mb_get_buddy_page_lock will mark the
1061 * page accessed.
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001062 */
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001063 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001064 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001065 /*
1066 * somebody initialized the group
1067 * return without doing anything
1068 */
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001069 goto err;
1070 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001071
1072 page = e4b.bd_bitmap_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001073 ret = ext4_mb_init_cache(page, NULL, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001074 if (ret)
1075 goto err;
1076 if (!PageUptodate(page)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001077 ret = -EIO;
1078 goto err;
1079 }
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001080
Amir Goldstein2de88072011-05-09 21:48:13 -04001081 if (e4b.bd_buddy_page == NULL) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001082 /*
1083 * If both the bitmap and buddy are in
1084 * the same page we don't need to force
1085 * init the buddy
1086 */
Amir Goldstein2de88072011-05-09 21:48:13 -04001087 ret = 0;
1088 goto err;
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001089 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001090 /* init buddy cache */
1091 page = e4b.bd_buddy_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001092 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001093 if (ret)
1094 goto err;
1095 if (!PageUptodate(page)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001096 ret = -EIO;
1097 goto err;
1098 }
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001099err:
Amir Goldstein2de88072011-05-09 21:48:13 -04001100 ext4_mb_put_buddy_page_lock(&e4b);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001101 return ret;
1102}
1103
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001104/*
1105 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1106 * block group lock of all groups for this page; do not hold the BG lock when
1107 * calling this routine!
1108 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04001109static noinline_for_stack int
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001110ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1111 struct ext4_buddy *e4b, gfp_t gfp)
Alex Tomasc9de5602008-01-29 00:19:52 -05001112{
Alex Tomasc9de5602008-01-29 00:19:52 -05001113 int blocks_per_page;
1114 int block;
1115 int pnum;
1116 int poff;
1117 struct page *page;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001118 int ret;
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001119 struct ext4_group_info *grp;
1120 struct ext4_sb_info *sbi = EXT4_SB(sb);
1121 struct inode *inode = sbi->s_buddy_cache;
Alex Tomasc9de5602008-01-29 00:19:52 -05001122
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04001123 might_sleep();
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04001124 mb_debug(1, "load group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001125
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001126 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001127 grp = ext4_get_group_info(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001128
1129 e4b->bd_blkbits = sb->s_blocksize_bits;
Tao Ma529da702011-07-23 16:07:26 -04001130 e4b->bd_info = grp;
Alex Tomasc9de5602008-01-29 00:19:52 -05001131 e4b->bd_sb = sb;
1132 e4b->bd_group = group;
1133 e4b->bd_buddy_page = NULL;
1134 e4b->bd_bitmap_page = NULL;
1135
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001136 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001137 /*
1138 * we need full data about the group
1139 * to make a good selection
1140 */
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001141 ret = ext4_mb_init_group(sb, group, gfp);
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001142 if (ret)
1143 return ret;
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001144 }
1145
Alex Tomasc9de5602008-01-29 00:19:52 -05001146 /*
1147 * the buddy cache inode stores the block bitmap
1148 * and buddy information in consecutive blocks.
1149 * So for each group we need two blocks.
1150 */
1151 block = group * 2;
1152 pnum = block / blocks_per_page;
1153 poff = block % blocks_per_page;
1154
1155 /* we could use find_or_create_page(), but it locks page
1156 * what we'd like to avoid in fast path ... */
Mel Gorman2457aec2014-06-04 16:10:31 -07001157 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
Alex Tomasc9de5602008-01-29 00:19:52 -05001158 if (page == NULL || !PageUptodate(page)) {
1159 if (page)
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001160 /*
1161 * drop the page reference and try
1162 * to get the page with lock. If we
1163 * are not uptodate that implies
1164 * somebody just created the page but
1165 * is yet to initialize the same. So
1166 * wait for it to initialize.
1167 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001168 put_page(page);
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001169 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Alex Tomasc9de5602008-01-29 00:19:52 -05001170 if (page) {
1171 BUG_ON(page->mapping != inode->i_mapping);
1172 if (!PageUptodate(page)) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001173 ret = ext4_mb_init_cache(page, NULL, gfp);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001174 if (ret) {
1175 unlock_page(page);
1176 goto err;
1177 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001178 mb_cmp_bitmaps(e4b, page_address(page) +
1179 (poff * sb->s_blocksize));
1180 }
1181 unlock_page(page);
1182 }
1183 }
Younger Liuc57ab392014-04-10 23:03:43 -04001184 if (page == NULL) {
1185 ret = -ENOMEM;
1186 goto err;
1187 }
1188 if (!PageUptodate(page)) {
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001189 ret = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05001190 goto err;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001191 }
Mel Gorman2457aec2014-06-04 16:10:31 -07001192
1193 /* Pages marked accessed already */
Alex Tomasc9de5602008-01-29 00:19:52 -05001194 e4b->bd_bitmap_page = page;
1195 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -05001196
1197 block++;
1198 pnum = block / blocks_per_page;
1199 poff = block % blocks_per_page;
1200
Mel Gorman2457aec2014-06-04 16:10:31 -07001201 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
Alex Tomasc9de5602008-01-29 00:19:52 -05001202 if (page == NULL || !PageUptodate(page)) {
1203 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001204 put_page(page);
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001205 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Alex Tomasc9de5602008-01-29 00:19:52 -05001206 if (page) {
1207 BUG_ON(page->mapping != inode->i_mapping);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001208 if (!PageUptodate(page)) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001209 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1210 gfp);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001211 if (ret) {
1212 unlock_page(page);
1213 goto err;
1214 }
1215 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001216 unlock_page(page);
1217 }
1218 }
Younger Liuc57ab392014-04-10 23:03:43 -04001219 if (page == NULL) {
1220 ret = -ENOMEM;
1221 goto err;
1222 }
1223 if (!PageUptodate(page)) {
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001224 ret = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05001225 goto err;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001226 }
Mel Gorman2457aec2014-06-04 16:10:31 -07001227
1228 /* Pages marked accessed already */
Alex Tomasc9de5602008-01-29 00:19:52 -05001229 e4b->bd_buddy_page = page;
1230 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -05001231
1232 BUG_ON(e4b->bd_bitmap_page == NULL);
1233 BUG_ON(e4b->bd_buddy_page == NULL);
1234
1235 return 0;
1236
1237err:
Yang Ruirui26626f112011-04-16 19:17:48 -04001238 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001239 put_page(page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001240 if (e4b->bd_bitmap_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001241 put_page(e4b->bd_bitmap_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001242 if (e4b->bd_buddy_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001243 put_page(e4b->bd_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001244 e4b->bd_buddy = NULL;
1245 e4b->bd_bitmap = NULL;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001246 return ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05001247}
1248
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001249static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1250 struct ext4_buddy *e4b)
1251{
1252 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1253}
1254
Jing Zhange39e07f2010-05-14 00:00:00 -04001255static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
Alex Tomasc9de5602008-01-29 00:19:52 -05001256{
1257 if (e4b->bd_bitmap_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001258 put_page(e4b->bd_bitmap_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001259 if (e4b->bd_buddy_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001260 put_page(e4b->bd_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001261}
1262
1263
1264static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1265{
1266 int order = 1;
Nicolai Stangeb5cb3162016-05-05 17:38:03 -04001267 int bb_incr = 1 << (e4b->bd_blkbits - 1);
Alex Tomasc9de5602008-01-29 00:19:52 -05001268 void *bb;
1269
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001270 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
Alex Tomasc9de5602008-01-29 00:19:52 -05001271 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1272
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001273 bb = e4b->bd_buddy;
Alex Tomasc9de5602008-01-29 00:19:52 -05001274 while (order <= e4b->bd_blkbits + 1) {
1275 block = block >> 1;
1276 if (!mb_test_bit(block, bb)) {
1277 /* this block is part of buddy of order 'order' */
1278 return order;
1279 }
Nicolai Stangeb5cb3162016-05-05 17:38:03 -04001280 bb += bb_incr;
1281 bb_incr >>= 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001282 order++;
1283 }
1284 return 0;
1285}
1286
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001287static void mb_clear_bits(void *bm, int cur, int len)
Alex Tomasc9de5602008-01-29 00:19:52 -05001288{
1289 __u32 *addr;
1290
1291 len = cur + len;
1292 while (cur < len) {
1293 if ((cur & 31) == 0 && (len - cur) >= 32) {
1294 /* fast path: clear whole word at once */
1295 addr = bm + (cur >> 3);
1296 *addr = 0;
1297 cur += 32;
1298 continue;
1299 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001300 mb_clear_bit(cur, bm);
Alex Tomasc9de5602008-01-29 00:19:52 -05001301 cur++;
1302 }
1303}
1304
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001305/* clear bits in given range
1306 * will return first found zero bit if any, -1 otherwise
1307 */
1308static int mb_test_and_clear_bits(void *bm, int cur, int len)
1309{
1310 __u32 *addr;
1311 int zero_bit = -1;
1312
1313 len = cur + len;
1314 while (cur < len) {
1315 if ((cur & 31) == 0 && (len - cur) >= 32) {
1316 /* fast path: clear whole word at once */
1317 addr = bm + (cur >> 3);
1318 if (*addr != (__u32)(-1) && zero_bit == -1)
1319 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1320 *addr = 0;
1321 cur += 32;
1322 continue;
1323 }
1324 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1325 zero_bit = cur;
1326 cur++;
1327 }
1328
1329 return zero_bit;
1330}
1331
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04001332void ext4_set_bits(void *bm, int cur, int len)
Alex Tomasc9de5602008-01-29 00:19:52 -05001333{
1334 __u32 *addr;
1335
1336 len = cur + len;
1337 while (cur < len) {
1338 if ((cur & 31) == 0 && (len - cur) >= 32) {
1339 /* fast path: set whole word at once */
1340 addr = bm + (cur >> 3);
1341 *addr = 0xffffffff;
1342 cur += 32;
1343 continue;
1344 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001345 mb_set_bit(cur, bm);
Alex Tomasc9de5602008-01-29 00:19:52 -05001346 cur++;
1347 }
1348}
1349
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001350/*
1351 * _________________________________________________________________ */
1352
1353static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
Alex Tomasc9de5602008-01-29 00:19:52 -05001354{
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001355 if (mb_test_bit(*bit + side, bitmap)) {
1356 mb_clear_bit(*bit, bitmap);
1357 (*bit) -= side;
1358 return 1;
1359 }
1360 else {
1361 (*bit) += side;
1362 mb_set_bit(*bit, bitmap);
1363 return -1;
1364 }
1365}
1366
1367static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1368{
1369 int max;
1370 int order = 1;
1371 void *buddy = mb_find_buddy(e4b, order, &max);
1372
1373 while (buddy) {
1374 void *buddy2;
1375
1376 /* Bits in range [first; last] are known to be set since
1377 * corresponding blocks were allocated. Bits in range
1378 * (first; last) will stay set because they form buddies on
1379 * upper layer. We just deal with borders if they don't
1380 * align with upper layer and then go up.
1381 * Releasing entire group is all about clearing
1382 * single bit of highest order buddy.
1383 */
1384
1385 /* Example:
1386 * ---------------------------------
1387 * | 1 | 1 | 1 | 1 |
1388 * ---------------------------------
1389 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1390 * ---------------------------------
1391 * 0 1 2 3 4 5 6 7
1392 * \_____________________/
1393 *
1394 * Neither [1] nor [6] is aligned to above layer.
1395 * Left neighbour [0] is free, so mark it busy,
1396 * decrease bb_counters and extend range to
1397 * [0; 6]
1398 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1399 * mark [6] free, increase bb_counters and shrink range to
1400 * [0; 5].
1401 * Then shift range to [0; 2], go up and do the same.
1402 */
1403
1404
1405 if (first & 1)
1406 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1407 if (!(last & 1))
1408 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1409 if (first > last)
1410 break;
1411 order++;
1412
1413 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1414 mb_clear_bits(buddy, first, last - first + 1);
1415 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1416 break;
1417 }
1418 first >>= 1;
1419 last >>= 1;
1420 buddy = buddy2;
1421 }
1422}
1423
1424static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1425 int first, int count)
1426{
1427 int left_is_free = 0;
1428 int right_is_free = 0;
1429 int block;
1430 int last = first + count - 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001431 struct super_block *sb = e4b->bd_sb;
1432
Theodore Ts'oc99d1e62014-08-23 17:47:28 -04001433 if (WARN_ON(count == 0))
1434 return;
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001435 BUG_ON(last >= (sb->s_blocksize << 3));
Vincent Minetbc8e6742009-05-15 08:33:18 -04001436 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
Darrick J. Wong163a2032013-08-28 17:35:51 -04001437 /* Don't bother if the block group is corrupt. */
1438 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1439 return;
1440
Alex Tomasc9de5602008-01-29 00:19:52 -05001441 mb_check_buddy(e4b);
1442 mb_free_blocks_double(inode, e4b, first, count);
1443
1444 e4b->bd_info->bb_free += count;
1445 if (first < e4b->bd_info->bb_first_free)
1446 e4b->bd_info->bb_first_free = first;
1447
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001448 /* access memory sequentially: check left neighbour,
1449 * clear range and then check right neighbour
1450 */
Alex Tomasc9de5602008-01-29 00:19:52 -05001451 if (first != 0)
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001452 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1453 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1454 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1455 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1456
1457 if (unlikely(block != -1)) {
Namjae Jeone43bb4e2014-06-26 10:11:53 -04001458 struct ext4_sb_info *sbi = EXT4_SB(sb);
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001459 ext4_fsblk_t blocknr;
1460
1461 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1462 blocknr += EXT4_C2B(EXT4_SB(sb), block);
1463 ext4_grp_locked_error(sb, e4b->bd_group,
1464 inode ? inode->i_ino : 0,
1465 blocknr,
1466 "freeing already freed block "
Darrick J. Wong163a2032013-08-28 17:35:51 -04001467 "(bit %u); block bitmap corrupt.",
1468 block);
Namjae Jeone43bb4e2014-06-26 10:11:53 -04001469 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1470 percpu_counter_sub(&sbi->s_freeclusters_counter,
1471 e4b->bd_info->bb_free);
Darrick J. Wong163a2032013-08-28 17:35:51 -04001472 /* Mark the block group as corrupt. */
1473 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1474 &e4b->bd_info->bb_state);
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001475 mb_regenerate_buddy(e4b);
1476 goto done;
1477 }
1478
1479 /* let's maintain fragments counter */
1480 if (left_is_free && right_is_free)
Alex Tomasc9de5602008-01-29 00:19:52 -05001481 e4b->bd_info->bb_fragments--;
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001482 else if (!left_is_free && !right_is_free)
Alex Tomasc9de5602008-01-29 00:19:52 -05001483 e4b->bd_info->bb_fragments++;
1484
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001485 /* buddy[0] == bd_bitmap is a special case, so handle
1486 * it right away and let mb_buddy_mark_free stay free of
1487 * zero order checks.
1488 * Check if neighbours are to be coaleasced,
1489 * adjust bitmap bb_counters and borders appropriately.
1490 */
1491 if (first & 1) {
1492 first += !left_is_free;
1493 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001494 }
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001495 if (!(last & 1)) {
1496 last -= !right_is_free;
1497 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1498 }
1499
1500 if (first <= last)
1501 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1502
1503done:
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001504 mb_set_largest_free_order(sb, e4b->bd_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05001505 mb_check_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001506}
1507
Robin Dong15c006a2012-08-17 10:02:17 -04001508static int mb_find_extent(struct ext4_buddy *e4b, int block,
Alex Tomasc9de5602008-01-29 00:19:52 -05001509 int needed, struct ext4_free_extent *ex)
1510{
1511 int next = block;
Robin Dong15c006a2012-08-17 10:02:17 -04001512 int max, order;
Alex Tomasc9de5602008-01-29 00:19:52 -05001513 void *buddy;
1514
Vincent Minetbc8e6742009-05-15 08:33:18 -04001515 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001516 BUG_ON(ex == NULL);
1517
Robin Dong15c006a2012-08-17 10:02:17 -04001518 buddy = mb_find_buddy(e4b, 0, &max);
Alex Tomasc9de5602008-01-29 00:19:52 -05001519 BUG_ON(buddy == NULL);
1520 BUG_ON(block >= max);
1521 if (mb_test_bit(block, buddy)) {
1522 ex->fe_len = 0;
1523 ex->fe_start = 0;
1524 ex->fe_group = 0;
1525 return 0;
1526 }
1527
Robin Dong15c006a2012-08-17 10:02:17 -04001528 /* find actual order */
1529 order = mb_find_order_for_block(e4b, block);
1530 block = block >> order;
Alex Tomasc9de5602008-01-29 00:19:52 -05001531
1532 ex->fe_len = 1 << order;
1533 ex->fe_start = block << order;
1534 ex->fe_group = e4b->bd_group;
1535
1536 /* calc difference from given start */
1537 next = next - ex->fe_start;
1538 ex->fe_len -= next;
1539 ex->fe_start += next;
1540
1541 while (needed > ex->fe_len &&
Alan Coxd8ec0c32012-11-08 12:19:58 -05001542 mb_find_buddy(e4b, order, &max)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001543
1544 if (block + 1 >= max)
1545 break;
1546
1547 next = (block + 1) * (1 << order);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001548 if (mb_test_bit(next, e4b->bd_bitmap))
Alex Tomasc9de5602008-01-29 00:19:52 -05001549 break;
1550
Robin Dongb051d8d2011-10-26 05:30:30 -04001551 order = mb_find_order_for_block(e4b, next);
Alex Tomasc9de5602008-01-29 00:19:52 -05001552
Alex Tomasc9de5602008-01-29 00:19:52 -05001553 block = next >> order;
1554 ex->fe_len += 1 << order;
1555 }
1556
Theodore Ts'o43c73222017-01-22 19:35:52 -05001557 if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
1558 /* Should never happen! (but apparently sometimes does?!?) */
1559 WARN_ON(1);
1560 ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
1561 "block=%d, order=%d needed=%d ex=%u/%d/%d@%u",
1562 block, order, needed, ex->fe_group, ex->fe_start,
1563 ex->fe_len, ex->fe_logical);
1564 ex->fe_len = 0;
1565 ex->fe_start = 0;
1566 ex->fe_group = 0;
1567 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001568 return ex->fe_len;
1569}
1570
1571static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1572{
1573 int ord;
1574 int mlen = 0;
1575 int max = 0;
1576 int cur;
1577 int start = ex->fe_start;
1578 int len = ex->fe_len;
1579 unsigned ret = 0;
1580 int len0 = len;
1581 void *buddy;
1582
1583 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1584 BUG_ON(e4b->bd_group != ex->fe_group);
Vincent Minetbc8e6742009-05-15 08:33:18 -04001585 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001586 mb_check_buddy(e4b);
1587 mb_mark_used_double(e4b, start, len);
1588
1589 e4b->bd_info->bb_free -= len;
1590 if (e4b->bd_info->bb_first_free == start)
1591 e4b->bd_info->bb_first_free += len;
1592
1593 /* let's maintain fragments counter */
1594 if (start != 0)
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001595 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001596 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001597 max = !mb_test_bit(start + len, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001598 if (mlen && max)
1599 e4b->bd_info->bb_fragments++;
1600 else if (!mlen && !max)
1601 e4b->bd_info->bb_fragments--;
1602
1603 /* let's maintain buddy itself */
1604 while (len) {
1605 ord = mb_find_order_for_block(e4b, start);
1606
1607 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1608 /* the whole chunk may be allocated at once! */
1609 mlen = 1 << ord;
1610 buddy = mb_find_buddy(e4b, ord, &max);
1611 BUG_ON((start >> ord) >= max);
1612 mb_set_bit(start >> ord, buddy);
1613 e4b->bd_info->bb_counters[ord]--;
1614 start += mlen;
1615 len -= mlen;
1616 BUG_ON(len < 0);
1617 continue;
1618 }
1619
1620 /* store for history */
1621 if (ret == 0)
1622 ret = len | (ord << 16);
1623
1624 /* we have to split large buddy */
1625 BUG_ON(ord <= 0);
1626 buddy = mb_find_buddy(e4b, ord, &max);
1627 mb_set_bit(start >> ord, buddy);
1628 e4b->bd_info->bb_counters[ord]--;
1629
1630 ord--;
1631 cur = (start >> ord) & ~1U;
1632 buddy = mb_find_buddy(e4b, ord, &max);
1633 mb_clear_bit(cur, buddy);
1634 mb_clear_bit(cur + 1, buddy);
1635 e4b->bd_info->bb_counters[ord]++;
1636 e4b->bd_info->bb_counters[ord]++;
1637 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001638 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05001639
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001640 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001641 mb_check_buddy(e4b);
1642
1643 return ret;
1644}
1645
1646/*
1647 * Must be called under group lock!
1648 */
1649static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1650 struct ext4_buddy *e4b)
1651{
1652 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1653 int ret;
1654
1655 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1656 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1657
1658 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1659 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1660 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1661
1662 /* preallocation can change ac_b_ex, thus we store actually
1663 * allocated blocks for history */
1664 ac->ac_f_ex = ac->ac_b_ex;
1665
1666 ac->ac_status = AC_STATUS_FOUND;
1667 ac->ac_tail = ret & 0xffff;
1668 ac->ac_buddy = ret >> 16;
1669
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -05001670 /*
1671 * take the page reference. We want the page to be pinned
1672 * so that we don't get a ext4_mb_init_cache_call for this
1673 * group until we update the bitmap. That would mean we
1674 * double allocate blocks. The reference is dropped
1675 * in ext4_mb_release_context
1676 */
Alex Tomasc9de5602008-01-29 00:19:52 -05001677 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1678 get_page(ac->ac_bitmap_page);
1679 ac->ac_buddy_page = e4b->bd_buddy_page;
1680 get_page(ac->ac_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001681 /* store last allocated for subsequent stream allocation */
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04001682 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001683 spin_lock(&sbi->s_md_lock);
1684 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1685 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1686 spin_unlock(&sbi->s_md_lock);
1687 }
1688}
1689
1690/*
1691 * regular allocator, for general purposes allocation
1692 */
1693
1694static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1695 struct ext4_buddy *e4b,
1696 int finish_group)
1697{
1698 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1699 struct ext4_free_extent *bex = &ac->ac_b_ex;
1700 struct ext4_free_extent *gex = &ac->ac_g_ex;
1701 struct ext4_free_extent ex;
1702 int max;
1703
Aneesh Kumar K.V032115f2009-01-05 21:34:30 -05001704 if (ac->ac_status == AC_STATUS_FOUND)
1705 return;
Alex Tomasc9de5602008-01-29 00:19:52 -05001706 /*
1707 * We don't want to scan for a whole year
1708 */
1709 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1710 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1711 ac->ac_status = AC_STATUS_BREAK;
1712 return;
1713 }
1714
1715 /*
1716 * Haven't found good chunk so far, let's continue
1717 */
1718 if (bex->fe_len < gex->fe_len)
1719 return;
1720
1721 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1722 && bex->fe_group == e4b->bd_group) {
1723 /* recheck chunk's availability - we don't know
1724 * when it was found (within this lock-unlock
1725 * period or not) */
Robin Dong15c006a2012-08-17 10:02:17 -04001726 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05001727 if (max >= gex->fe_len) {
1728 ext4_mb_use_best_found(ac, e4b);
1729 return;
1730 }
1731 }
1732}
1733
1734/*
1735 * The routine checks whether found extent is good enough. If it is,
1736 * then the extent gets marked used and flag is set to the context
1737 * to stop scanning. Otherwise, the extent is compared with the
1738 * previous found extent and if new one is better, then it's stored
1739 * in the context. Later, the best found extent will be used, if
1740 * mballoc can't find good enough extent.
1741 *
1742 * FIXME: real allocation policy is to be designed yet!
1743 */
1744static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1745 struct ext4_free_extent *ex,
1746 struct ext4_buddy *e4b)
1747{
1748 struct ext4_free_extent *bex = &ac->ac_b_ex;
1749 struct ext4_free_extent *gex = &ac->ac_g_ex;
1750
1751 BUG_ON(ex->fe_len <= 0);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001752 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1753 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05001754 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1755
1756 ac->ac_found++;
1757
1758 /*
1759 * The special case - take what you catch first
1760 */
1761 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1762 *bex = *ex;
1763 ext4_mb_use_best_found(ac, e4b);
1764 return;
1765 }
1766
1767 /*
1768 * Let's check whether the chuck is good enough
1769 */
1770 if (ex->fe_len == gex->fe_len) {
1771 *bex = *ex;
1772 ext4_mb_use_best_found(ac, e4b);
1773 return;
1774 }
1775
1776 /*
1777 * If this is first found extent, just store it in the context
1778 */
1779 if (bex->fe_len == 0) {
1780 *bex = *ex;
1781 return;
1782 }
1783
1784 /*
1785 * If new found extent is better, store it in the context
1786 */
1787 if (bex->fe_len < gex->fe_len) {
1788 /* if the request isn't satisfied, any found extent
1789 * larger than previous best one is better */
1790 if (ex->fe_len > bex->fe_len)
1791 *bex = *ex;
1792 } else if (ex->fe_len > gex->fe_len) {
1793 /* if the request is satisfied, then we try to find
1794 * an extent that still satisfy the request, but is
1795 * smaller than previous one */
1796 if (ex->fe_len < bex->fe_len)
1797 *bex = *ex;
1798 }
1799
1800 ext4_mb_check_limits(ac, e4b, 0);
1801}
1802
Eric Sandeen089ceec2009-07-05 22:17:31 -04001803static noinline_for_stack
1804int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001805 struct ext4_buddy *e4b)
1806{
1807 struct ext4_free_extent ex = ac->ac_b_ex;
1808 ext4_group_t group = ex.fe_group;
1809 int max;
1810 int err;
1811
1812 BUG_ON(ex.fe_len <= 0);
1813 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1814 if (err)
1815 return err;
1816
1817 ext4_lock_group(ac->ac_sb, group);
Robin Dong15c006a2012-08-17 10:02:17 -04001818 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05001819
1820 if (max > 0) {
1821 ac->ac_b_ex = ex;
1822 ext4_mb_use_best_found(ac, e4b);
1823 }
1824
1825 ext4_unlock_group(ac->ac_sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04001826 ext4_mb_unload_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001827
1828 return 0;
1829}
1830
Eric Sandeen089ceec2009-07-05 22:17:31 -04001831static noinline_for_stack
1832int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001833 struct ext4_buddy *e4b)
1834{
1835 ext4_group_t group = ac->ac_g_ex.fe_group;
1836 int max;
1837 int err;
1838 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Yongqiang Yang838cd0c2012-09-23 23:10:51 -04001839 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001840 struct ext4_free_extent ex;
1841
1842 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1843 return 0;
Yongqiang Yang838cd0c2012-09-23 23:10:51 -04001844 if (grp->bb_free == 0)
1845 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05001846
1847 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1848 if (err)
1849 return err;
1850
Darrick J. Wong163a2032013-08-28 17:35:51 -04001851 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1852 ext4_mb_unload_buddy(e4b);
1853 return 0;
1854 }
1855
Alex Tomasc9de5602008-01-29 00:19:52 -05001856 ext4_lock_group(ac->ac_sb, group);
Robin Dong15c006a2012-08-17 10:02:17 -04001857 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
Alex Tomasc9de5602008-01-29 00:19:52 -05001858 ac->ac_g_ex.fe_len, &ex);
Theodore Ts'oab0c00f2014-02-20 00:36:41 -05001859 ex.fe_logical = 0xDEADFA11; /* debug value */
Alex Tomasc9de5602008-01-29 00:19:52 -05001860
1861 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1862 ext4_fsblk_t start;
1863
Akinobu Mita5661bd62010-03-03 23:53:39 -05001864 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1865 ex.fe_start;
Alex Tomasc9de5602008-01-29 00:19:52 -05001866 /* use do_div to get remainder (would be 64-bit modulo) */
1867 if (do_div(start, sbi->s_stripe) == 0) {
1868 ac->ac_found++;
1869 ac->ac_b_ex = ex;
1870 ext4_mb_use_best_found(ac, e4b);
1871 }
1872 } else if (max >= ac->ac_g_ex.fe_len) {
1873 BUG_ON(ex.fe_len <= 0);
1874 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1875 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1876 ac->ac_found++;
1877 ac->ac_b_ex = ex;
1878 ext4_mb_use_best_found(ac, e4b);
1879 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1880 /* Sometimes, caller may want to merge even small
1881 * number of blocks to an existing extent */
1882 BUG_ON(ex.fe_len <= 0);
1883 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1884 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1885 ac->ac_found++;
1886 ac->ac_b_ex = ex;
1887 ext4_mb_use_best_found(ac, e4b);
1888 }
1889 ext4_unlock_group(ac->ac_sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04001890 ext4_mb_unload_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001891
1892 return 0;
1893}
1894
1895/*
1896 * The routine scans buddy structures (not bitmap!) from given order
1897 * to max order and tries to find big enough chunk to satisfy the req
1898 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001899static noinline_for_stack
1900void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001901 struct ext4_buddy *e4b)
1902{
1903 struct super_block *sb = ac->ac_sb;
1904 struct ext4_group_info *grp = e4b->bd_info;
1905 void *buddy;
1906 int i;
1907 int k;
1908 int max;
1909
1910 BUG_ON(ac->ac_2order <= 0);
1911 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1912 if (grp->bb_counters[i] == 0)
1913 continue;
1914
1915 buddy = mb_find_buddy(e4b, i, &max);
1916 BUG_ON(buddy == NULL);
1917
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05001918 k = mb_find_next_zero_bit(buddy, max, 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001919 BUG_ON(k >= max);
1920
1921 ac->ac_found++;
1922
1923 ac->ac_b_ex.fe_len = 1 << i;
1924 ac->ac_b_ex.fe_start = k << i;
1925 ac->ac_b_ex.fe_group = e4b->bd_group;
1926
1927 ext4_mb_use_best_found(ac, e4b);
1928
1929 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1930
1931 if (EXT4_SB(sb)->s_mb_stats)
1932 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1933
1934 break;
1935 }
1936}
1937
1938/*
1939 * The routine scans the group and measures all found extents.
1940 * In order to optimize scanning, caller must pass number of
1941 * free blocks in the group, so the routine can know upper limit.
1942 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001943static noinline_for_stack
1944void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001945 struct ext4_buddy *e4b)
1946{
1947 struct super_block *sb = ac->ac_sb;
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001948 void *bitmap = e4b->bd_bitmap;
Alex Tomasc9de5602008-01-29 00:19:52 -05001949 struct ext4_free_extent ex;
1950 int i;
1951 int free;
1952
1953 free = e4b->bd_info->bb_free;
1954 BUG_ON(free <= 0);
1955
1956 i = e4b->bd_info->bb_first_free;
1957
1958 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05001959 i = mb_find_next_zero_bit(bitmap,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001960 EXT4_CLUSTERS_PER_GROUP(sb), i);
1961 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001962 /*
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05001963 * IF we have corrupt bitmap, we won't find any
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001964 * free blocks even though group info says we
1965 * we have free blocks
1966 */
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001967 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001968 "%d free clusters as per "
Theodore Ts'ofde4d952009-01-05 22:17:35 -05001969 "group info. But bitmap says 0",
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001970 free);
Alex Tomasc9de5602008-01-29 00:19:52 -05001971 break;
1972 }
1973
Robin Dong15c006a2012-08-17 10:02:17 -04001974 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05001975 BUG_ON(ex.fe_len <= 0);
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001976 if (free < ex.fe_len) {
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001977 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001978 "%d free clusters as per "
Theodore Ts'ofde4d952009-01-05 22:17:35 -05001979 "group info. But got %d blocks",
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001980 free, ex.fe_len);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05001981 /*
1982 * The number of free blocks differs. This mostly
1983 * indicate that the bitmap is corrupt. So exit
1984 * without claiming the space.
1985 */
1986 break;
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001987 }
Theodore Ts'oab0c00f2014-02-20 00:36:41 -05001988 ex.fe_logical = 0xDEADC0DE; /* debug value */
Alex Tomasc9de5602008-01-29 00:19:52 -05001989 ext4_mb_measure_extent(ac, &ex, e4b);
1990
1991 i += ex.fe_len;
1992 free -= ex.fe_len;
1993 }
1994
1995 ext4_mb_check_limits(ac, e4b, 1);
1996}
1997
1998/*
1999 * This is a special case for storages like raid5
Eric Sandeen506bf2d2010-07-27 11:56:06 -04002000 * we try to find stripe-aligned chunks for stripe-size-multiple requests
Alex Tomasc9de5602008-01-29 00:19:52 -05002001 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04002002static noinline_for_stack
2003void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05002004 struct ext4_buddy *e4b)
2005{
2006 struct super_block *sb = ac->ac_sb;
2007 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05002008 void *bitmap = e4b->bd_bitmap;
Alex Tomasc9de5602008-01-29 00:19:52 -05002009 struct ext4_free_extent ex;
2010 ext4_fsblk_t first_group_block;
2011 ext4_fsblk_t a;
2012 ext4_grpblk_t i;
2013 int max;
2014
2015 BUG_ON(sbi->s_stripe == 0);
2016
2017 /* find first stripe-aligned block in group */
Akinobu Mita5661bd62010-03-03 23:53:39 -05002018 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2019
Alex Tomasc9de5602008-01-29 00:19:52 -05002020 a = first_group_block + sbi->s_stripe - 1;
2021 do_div(a, sbi->s_stripe);
2022 i = (a * sbi->s_stripe) - first_group_block;
2023
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04002024 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002025 if (!mb_test_bit(i, bitmap)) {
Robin Dong15c006a2012-08-17 10:02:17 -04002026 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05002027 if (max >= sbi->s_stripe) {
2028 ac->ac_found++;
Theodore Ts'oab0c00f2014-02-20 00:36:41 -05002029 ex.fe_logical = 0xDEADF00D; /* debug value */
Alex Tomasc9de5602008-01-29 00:19:52 -05002030 ac->ac_b_ex = ex;
2031 ext4_mb_use_best_found(ac, e4b);
2032 break;
2033 }
2034 }
2035 i += sbi->s_stripe;
2036 }
2037}
2038
Lukas Czerner42ac1842015-06-08 11:40:40 -04002039/*
2040 * This is now called BEFORE we load the buddy bitmap.
2041 * Returns either 1 or 0 indicating that the group is either suitable
2042 * for the allocation or not. In addition it can also return negative
2043 * error code when something goes wrong.
2044 */
Alex Tomasc9de5602008-01-29 00:19:52 -05002045static int ext4_mb_good_group(struct ext4_allocation_context *ac,
2046 ext4_group_t group, int cr)
2047{
2048 unsigned free, fragments;
Theodore Ts'oa4912122009-03-12 12:18:34 -04002049 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05002050 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2051
2052 BUG_ON(cr < 0 || cr >= 4);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002053
Theodore Ts'o01fc48e2012-08-17 09:46:17 -04002054 free = grp->bb_free;
2055 if (free == 0)
2056 return 0;
2057 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2058 return 0;
2059
Darrick J. Wong163a2032013-08-28 17:35:51 -04002060 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2061 return 0;
2062
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002063 /* We only do this if the grp has never been initialized */
2064 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04002065 int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002066 if (ret)
Lukas Czerner42ac1842015-06-08 11:40:40 -04002067 return ret;
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002068 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002069
Alex Tomasc9de5602008-01-29 00:19:52 -05002070 fragments = grp->bb_fragments;
Alex Tomasc9de5602008-01-29 00:19:52 -05002071 if (fragments == 0)
2072 return 0;
2073
2074 switch (cr) {
2075 case 0:
2076 BUG_ON(ac->ac_2order == 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05002077
Theodore Ts'oa4912122009-03-12 12:18:34 -04002078 /* Avoid using the first bg of a flexgroup for data files */
2079 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2080 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2081 ((group % flex_size) == 0))
2082 return 0;
2083
Theodore Ts'o40ae3482013-02-04 15:08:40 -05002084 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
2085 (free / fragments) >= ac->ac_g_ex.fe_len)
2086 return 1;
2087
2088 if (grp->bb_largest_free_order < ac->ac_2order)
2089 return 0;
2090
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002091 return 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002092 case 1:
2093 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2094 return 1;
2095 break;
2096 case 2:
2097 if (free >= ac->ac_g_ex.fe_len)
2098 return 1;
2099 break;
2100 case 3:
2101 return 1;
2102 default:
2103 BUG();
2104 }
2105
2106 return 0;
2107}
2108
Eric Sandeen4ddfef72008-04-29 08:11:12 -04002109static noinline_for_stack int
2110ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05002111{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002112 ext4_group_t ngroups, group, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05002113 int cr;
Lukas Czerner42ac1842015-06-08 11:40:40 -04002114 int err = 0, first_err = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002115 struct ext4_sb_info *sbi;
2116 struct super_block *sb;
2117 struct ext4_buddy e4b;
Alex Tomasc9de5602008-01-29 00:19:52 -05002118
2119 sb = ac->ac_sb;
2120 sbi = EXT4_SB(sb);
Theodore Ts'o8df96752009-05-01 08:50:38 -04002121 ngroups = ext4_get_groups_count(sb);
Eric Sandeenfb0a3872009-09-16 14:45:10 -04002122 /* non-extent files are limited to low blocks/groups */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04002123 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
Eric Sandeenfb0a3872009-09-16 14:45:10 -04002124 ngroups = sbi->s_blockfile_groups;
2125
Alex Tomasc9de5602008-01-29 00:19:52 -05002126 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2127
2128 /* first, try the goal */
2129 err = ext4_mb_find_by_goal(ac, &e4b);
2130 if (err || ac->ac_status == AC_STATUS_FOUND)
2131 goto out;
2132
2133 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2134 goto out;
2135
2136 /*
2137 * ac->ac2_order is set only if the fe_len is a power of 2
2138 * if ac2_order is set we also set criteria to 0 so that we
2139 * try exact allocation using buddy.
2140 */
2141 i = fls(ac->ac_g_ex.fe_len);
2142 ac->ac_2order = 0;
2143 /*
2144 * We search using buddy data only if the order of the request
2145 * is greater than equal to the sbi_s_mb_order2_reqs
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04002146 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
Jan Karad9b22cf2017-02-10 00:50:56 -05002147 * We also support searching for power-of-two requests only for
2148 * requests upto maximum buddy size we have constructed.
Alex Tomasc9de5602008-01-29 00:19:52 -05002149 */
Jan Karad9b22cf2017-02-10 00:50:56 -05002150 if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002151 /*
2152 * This should tell if fe_len is exactly power of 2
2153 */
2154 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2155 ac->ac_2order = i - 1;
2156 }
2157
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04002158 /* if stream allocation is enabled, use global goal */
2159 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002160 /* TBD: may be hot point */
2161 spin_lock(&sbi->s_md_lock);
2162 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2163 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2164 spin_unlock(&sbi->s_md_lock);
2165 }
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04002166
Alex Tomasc9de5602008-01-29 00:19:52 -05002167 /* Let's just scan groups to find more-less suitable blocks */
2168 cr = ac->ac_2order ? 0 : 1;
2169 /*
2170 * cr == 0 try to get exact allocation,
2171 * cr == 3 try to get anything
2172 */
2173repeat:
2174 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2175 ac->ac_criteria = cr;
Aneesh Kumar K.Ved8f9c72008-07-11 19:27:31 -04002176 /*
2177 * searching for the right group start
2178 * from the goal value specified
2179 */
2180 group = ac->ac_g_ex.fe_group;
2181
Theodore Ts'o8df96752009-05-01 08:50:38 -04002182 for (i = 0; i < ngroups; group++, i++) {
Lukas Czerner42ac1842015-06-08 11:40:40 -04002183 int ret = 0;
Theodore Ts'o2ed57242013-06-12 11:43:02 -04002184 cond_resched();
Lachlan McIlroye6155732013-05-05 23:10:00 -04002185 /*
2186 * Artificially restricted ngroups for non-extent
2187 * files makes group > ngroups possible on first loop.
2188 */
2189 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -05002190 group = 0;
2191
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002192 /* This now checks without needing the buddy page */
Lukas Czerner42ac1842015-06-08 11:40:40 -04002193 ret = ext4_mb_good_group(ac, group, cr);
2194 if (ret <= 0) {
2195 if (!first_err)
2196 first_err = ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002197 continue;
Lukas Czerner42ac1842015-06-08 11:40:40 -04002198 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002199
Alex Tomasc9de5602008-01-29 00:19:52 -05002200 err = ext4_mb_load_buddy(sb, group, &e4b);
2201 if (err)
2202 goto out;
2203
2204 ext4_lock_group(sb, group);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002205
2206 /*
2207 * We need to check again after locking the
2208 * block group
2209 */
Lukas Czerner42ac1842015-06-08 11:40:40 -04002210 ret = ext4_mb_good_group(ac, group, cr);
2211 if (ret <= 0) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002212 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04002213 ext4_mb_unload_buddy(&e4b);
Lukas Czerner42ac1842015-06-08 11:40:40 -04002214 if (!first_err)
2215 first_err = ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002216 continue;
2217 }
2218
2219 ac->ac_groups_scanned++;
Jan Karad9b22cf2017-02-10 00:50:56 -05002220 if (cr == 0)
Alex Tomasc9de5602008-01-29 00:19:52 -05002221 ext4_mb_simple_scan_group(ac, &e4b);
Eric Sandeen506bf2d2010-07-27 11:56:06 -04002222 else if (cr == 1 && sbi->s_stripe &&
2223 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
Alex Tomasc9de5602008-01-29 00:19:52 -05002224 ext4_mb_scan_aligned(ac, &e4b);
2225 else
2226 ext4_mb_complex_scan_group(ac, &e4b);
2227
2228 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04002229 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002230
2231 if (ac->ac_status != AC_STATUS_CONTINUE)
2232 break;
2233 }
2234 }
2235
2236 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2237 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2238 /*
2239 * We've been searching too long. Let's try to allocate
2240 * the best chunk we've found so far
2241 */
2242
2243 ext4_mb_try_best_found(ac, &e4b);
2244 if (ac->ac_status != AC_STATUS_FOUND) {
2245 /*
2246 * Someone more lucky has already allocated it.
2247 * The only thing we can do is just take first
2248 * found block(s)
2249 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2250 */
2251 ac->ac_b_ex.fe_group = 0;
2252 ac->ac_b_ex.fe_start = 0;
2253 ac->ac_b_ex.fe_len = 0;
2254 ac->ac_status = AC_STATUS_CONTINUE;
2255 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2256 cr = 3;
2257 atomic_inc(&sbi->s_mb_lost_chunks);
2258 goto repeat;
2259 }
2260 }
2261out:
Lukas Czerner42ac1842015-06-08 11:40:40 -04002262 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2263 err = first_err;
Alex Tomasc9de5602008-01-29 00:19:52 -05002264 return err;
2265}
2266
Alex Tomasc9de5602008-01-29 00:19:52 -05002267static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2268{
2269 struct super_block *sb = seq->private;
Alex Tomasc9de5602008-01-29 00:19:52 -05002270 ext4_group_t group;
2271
Theodore Ts'o8df96752009-05-01 08:50:38 -04002272 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
Alex Tomasc9de5602008-01-29 00:19:52 -05002273 return NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002274 group = *pos + 1;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002275 return (void *) ((unsigned long) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002276}
2277
2278static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2279{
2280 struct super_block *sb = seq->private;
Alex Tomasc9de5602008-01-29 00:19:52 -05002281 ext4_group_t group;
2282
2283 ++*pos;
Theodore Ts'o8df96752009-05-01 08:50:38 -04002284 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
Alex Tomasc9de5602008-01-29 00:19:52 -05002285 return NULL;
2286 group = *pos + 1;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002287 return (void *) ((unsigned long) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002288}
2289
2290static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2291{
2292 struct super_block *sb = seq->private;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002293 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
Alex Tomasc9de5602008-01-29 00:19:52 -05002294 int i;
Aditya Kali1c8457c2012-06-30 19:10:57 -04002295 int err, buddy_loaded = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002296 struct ext4_buddy e4b;
Aditya Kali1c8457c2012-06-30 19:10:57 -04002297 struct ext4_group_info *grinfo;
Alex Tomasc9de5602008-01-29 00:19:52 -05002298 struct sg {
2299 struct ext4_group_info info;
Chandan Rajendra30a9d7a2016-11-14 21:26:26 -05002300 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
Alex Tomasc9de5602008-01-29 00:19:52 -05002301 } sg;
2302
2303 group--;
2304 if (group == 0)
Rasmus Villemoes97b4af22015-06-15 00:32:58 -04002305 seq_puts(seq, "#group: free frags first ["
2306 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
Huaitong Han802cf1f2016-02-12 00:17:16 -05002307 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
Alex Tomasc9de5602008-01-29 00:19:52 -05002308
2309 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2310 sizeof(struct ext4_group_info);
Aditya Kali1c8457c2012-06-30 19:10:57 -04002311 grinfo = ext4_get_group_info(sb, group);
2312 /* Load the group info in memory only if not already loaded. */
2313 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2314 err = ext4_mb_load_buddy(sb, group, &e4b);
2315 if (err) {
2316 seq_printf(seq, "#%-5u: I/O error\n", group);
2317 return 0;
2318 }
2319 buddy_loaded = 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002320 }
Aditya Kali1c8457c2012-06-30 19:10:57 -04002321
Alex Tomasc9de5602008-01-29 00:19:52 -05002322 memcpy(&sg, ext4_get_group_info(sb, group), i);
Aditya Kali1c8457c2012-06-30 19:10:57 -04002323
2324 if (buddy_loaded)
2325 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002326
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002327 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
Alex Tomasc9de5602008-01-29 00:19:52 -05002328 sg.info.bb_fragments, sg.info.bb_first_free);
2329 for (i = 0; i <= 13; i++)
2330 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2331 sg.info.bb_counters[i] : 0);
2332 seq_printf(seq, " ]\n");
2333
2334 return 0;
2335}
2336
2337static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2338{
2339}
2340
Tobias Klauser7f1346a2009-09-05 09:28:54 -04002341static const struct seq_operations ext4_mb_seq_groups_ops = {
Alex Tomasc9de5602008-01-29 00:19:52 -05002342 .start = ext4_mb_seq_groups_start,
2343 .next = ext4_mb_seq_groups_next,
2344 .stop = ext4_mb_seq_groups_stop,
2345 .show = ext4_mb_seq_groups_show,
2346};
2347
2348static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2349{
Al Virod9dda782013-03-31 18:16:14 -04002350 struct super_block *sb = PDE_DATA(inode);
Alex Tomasc9de5602008-01-29 00:19:52 -05002351 int rc;
2352
2353 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2354 if (rc == 0) {
Joe Perchesa271fe82010-07-27 11:56:04 -04002355 struct seq_file *m = file->private_data;
Alex Tomasc9de5602008-01-29 00:19:52 -05002356 m->private = sb;
2357 }
2358 return rc;
2359
2360}
2361
Theodore Ts'oebd173b2015-09-23 12:46:17 -04002362const struct file_operations ext4_seq_mb_groups_fops = {
Alex Tomasc9de5602008-01-29 00:19:52 -05002363 .open = ext4_mb_seq_groups_open,
2364 .read = seq_read,
2365 .llseek = seq_lseek,
2366 .release = seq_release,
2367};
2368
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002369static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2370{
2371 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2372 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2373
2374 BUG_ON(!cachep);
2375 return cachep;
2376}
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002377
Theodore Ts'o28623c22012-09-05 01:31:50 -04002378/*
2379 * Allocate the top-level s_group_info array for the specified number
2380 * of groups
2381 */
2382int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2383{
2384 struct ext4_sb_info *sbi = EXT4_SB(sb);
2385 unsigned size;
2386 struct ext4_group_info ***new_groupinfo;
2387
2388 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2389 EXT4_DESC_PER_BLOCK_BITS(sb);
2390 if (size <= sbi->s_group_info_size)
2391 return 0;
2392
2393 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
Michal Hockoa7c3e902017-05-08 15:57:09 -07002394 new_groupinfo = kvzalloc(size, GFP_KERNEL);
Theodore Ts'o28623c22012-09-05 01:31:50 -04002395 if (!new_groupinfo) {
2396 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2397 return -ENOMEM;
2398 }
2399 if (sbi->s_group_info) {
2400 memcpy(new_groupinfo, sbi->s_group_info,
2401 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
Al Virob93b41d2014-11-20 12:19:11 -05002402 kvfree(sbi->s_group_info);
Theodore Ts'o28623c22012-09-05 01:31:50 -04002403 }
2404 sbi->s_group_info = new_groupinfo;
2405 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2406 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2407 sbi->s_group_info_size);
2408 return 0;
2409}
2410
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002411/* Create and initialize ext4_group_info data for the given group. */
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05002412int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002413 struct ext4_group_desc *desc)
2414{
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002415 int i;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002416 int metalen = 0;
2417 struct ext4_sb_info *sbi = EXT4_SB(sb);
2418 struct ext4_group_info **meta_group_info;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002419 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002420
2421 /*
2422 * First check if this group is the first of a reserved block.
2423 * If it's true, we have to allocate a new table of pointers
2424 * to ext4_group_info structures
2425 */
2426 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2427 metalen = sizeof(*meta_group_info) <<
2428 EXT4_DESC_PER_BLOCK_BITS(sb);
Dmitry Monakhov4fdb5542014-11-25 13:08:04 -05002429 meta_group_info = kmalloc(metalen, GFP_NOFS);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002430 if (meta_group_info == NULL) {
Joe Perches7f6a11e2012-03-19 23:09:43 -04002431 ext4_msg(sb, KERN_ERR, "can't allocate mem "
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002432 "for a buddy group");
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002433 goto exit_meta_group_info;
2434 }
2435 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2436 meta_group_info;
2437 }
2438
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002439 meta_group_info =
2440 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2441 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2442
Dmitry Monakhov4fdb5542014-11-25 13:08:04 -05002443 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002444 if (meta_group_info[i] == NULL) {
Joe Perches7f6a11e2012-03-19 23:09:43 -04002445 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002446 goto exit_group_info;
2447 }
2448 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2449 &(meta_group_info[i]->bb_state));
2450
2451 /*
2452 * initialize bb_free to be able to skip
2453 * empty groups without initialization
2454 */
2455 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2456 meta_group_info[i]->bb_free =
Theodore Ts'ocff1dfd72011-09-09 19:12:51 -04002457 ext4_free_clusters_after_init(sb, group, desc);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002458 } else {
2459 meta_group_info[i]->bb_free =
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002460 ext4_free_group_clusters(sb, desc);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002461 }
2462
2463 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05002464 init_rwsem(&meta_group_info[i]->alloc_sem);
Venkatesh Pallipadi64e290e2010-03-04 22:25:21 -05002465 meta_group_info[i]->bb_free_root = RB_ROOT;
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002466 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002467
2468#ifdef DOUBLE_CHECK
2469 {
2470 struct buffer_head *bh;
2471 meta_group_info[i]->bb_bitmap =
Dmitry Monakhov4fdb5542014-11-25 13:08:04 -05002472 kmalloc(sb->s_blocksize, GFP_NOFS);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002473 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2474 bh = ext4_read_block_bitmap(sb, group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04002475 BUG_ON(IS_ERR_OR_NULL(bh));
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002476 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2477 sb->s_blocksize);
2478 put_bh(bh);
2479 }
2480#endif
2481
2482 return 0;
2483
2484exit_group_info:
2485 /* If a meta_group_info table has been allocated, release it now */
Tao Macaaf7a22011-07-11 18:42:42 -04002486 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002487 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
Tao Macaaf7a22011-07-11 18:42:42 -04002488 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2489 }
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002490exit_meta_group_info:
2491 return -ENOMEM;
2492} /* ext4_mb_add_groupinfo */
2493
Alex Tomasc9de5602008-01-29 00:19:52 -05002494static int ext4_mb_init_backend(struct super_block *sb)
2495{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002496 ext4_group_t ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002497 ext4_group_t i;
Alex Tomasc9de5602008-01-29 00:19:52 -05002498 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o28623c22012-09-05 01:31:50 -04002499 int err;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002500 struct ext4_group_desc *desc;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002501 struct kmem_cache *cachep;
Alex Tomasc9de5602008-01-29 00:19:52 -05002502
Theodore Ts'o28623c22012-09-05 01:31:50 -04002503 err = ext4_mb_alloc_groupinfo(sb, ngroups);
2504 if (err)
2505 return err;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002506
Alex Tomasc9de5602008-01-29 00:19:52 -05002507 sbi->s_buddy_cache = new_inode(sb);
2508 if (sbi->s_buddy_cache == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002509 ext4_msg(sb, KERN_ERR, "can't get new inode");
Alex Tomasc9de5602008-01-29 00:19:52 -05002510 goto err_freesgi;
2511 }
Yu Jian48e60612011-08-01 17:41:39 -04002512 /* To avoid potentially colliding with an valid on-disk inode number,
2513 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
2514 * not in the inode hash, so it should never be found by iget(), but
2515 * this will avoid confusion if it ever shows up during debugging. */
2516 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
Alex Tomasc9de5602008-01-29 00:19:52 -05002517 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
Theodore Ts'o8df96752009-05-01 08:50:38 -04002518 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002519 desc = ext4_get_group_desc(sb, i, NULL);
2520 if (desc == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002521 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
Alex Tomasc9de5602008-01-29 00:19:52 -05002522 goto err_freebuddy;
2523 }
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002524 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2525 goto err_freebuddy;
Alex Tomasc9de5602008-01-29 00:19:52 -05002526 }
2527
2528 return 0;
2529
2530err_freebuddy:
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002531 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Roel Kluinf1fa3342008-04-29 22:01:15 -04002532 while (i-- > 0)
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002533 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
Theodore Ts'o28623c22012-09-05 01:31:50 -04002534 i = sbi->s_group_info_size;
Roel Kluinf1fa3342008-04-29 22:01:15 -04002535 while (i-- > 0)
Alex Tomasc9de5602008-01-29 00:19:52 -05002536 kfree(sbi->s_group_info[i]);
2537 iput(sbi->s_buddy_cache);
2538err_freesgi:
Al Virob93b41d2014-11-20 12:19:11 -05002539 kvfree(sbi->s_group_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05002540 return -ENOMEM;
2541}
2542
Eric Sandeen2892c152011-02-12 08:12:18 -05002543static void ext4_groupinfo_destroy_slabs(void)
2544{
2545 int i;
2546
2547 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2548 if (ext4_groupinfo_caches[i])
2549 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2550 ext4_groupinfo_caches[i] = NULL;
2551 }
2552}
2553
2554static int ext4_groupinfo_create_slab(size_t size)
2555{
2556 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2557 int slab_size;
2558 int blocksize_bits = order_base_2(size);
2559 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2560 struct kmem_cache *cachep;
2561
2562 if (cache_index >= NR_GRPINFO_CACHES)
2563 return -EINVAL;
2564
2565 if (unlikely(cache_index < 0))
2566 cache_index = 0;
2567
2568 mutex_lock(&ext4_grpinfo_slab_create_mutex);
2569 if (ext4_groupinfo_caches[cache_index]) {
2570 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2571 return 0; /* Already created */
2572 }
2573
2574 slab_size = offsetof(struct ext4_group_info,
2575 bb_counters[blocksize_bits + 2]);
2576
2577 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2578 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2579 NULL);
2580
Tao Ma823ba012011-07-11 18:26:01 -04002581 ext4_groupinfo_caches[cache_index] = cachep;
2582
Eric Sandeen2892c152011-02-12 08:12:18 -05002583 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2584 if (!cachep) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002585 printk(KERN_EMERG
2586 "EXT4-fs: no memory for groupinfo slab cache\n");
Eric Sandeen2892c152011-02-12 08:12:18 -05002587 return -ENOMEM;
2588 }
2589
Eric Sandeen2892c152011-02-12 08:12:18 -05002590 return 0;
2591}
2592
Akira Fujita9d990122012-05-28 14:19:25 -04002593int ext4_mb_init(struct super_block *sb)
Alex Tomasc9de5602008-01-29 00:19:52 -05002594{
2595 struct ext4_sb_info *sbi = EXT4_SB(sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04002596 unsigned i, j;
Nicolai Stange935244c2016-05-05 19:46:19 -04002597 unsigned offset, offset_incr;
Alex Tomasc9de5602008-01-29 00:19:52 -05002598 unsigned max;
Shen Feng74767c52008-07-11 19:27:31 -04002599 int ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002600
Eric Sandeen19278052009-08-25 22:36:25 -04002601 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
Alex Tomasc9de5602008-01-29 00:19:52 -05002602
2603 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2604 if (sbi->s_mb_offsets == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002605 ret = -ENOMEM;
2606 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002607 }
Yasunori Gotoff7ef322008-12-17 00:48:39 -05002608
Eric Sandeen19278052009-08-25 22:36:25 -04002609 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
Alex Tomasc9de5602008-01-29 00:19:52 -05002610 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2611 if (sbi->s_mb_maxs == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002612 ret = -ENOMEM;
2613 goto out;
2614 }
2615
Eric Sandeen2892c152011-02-12 08:12:18 -05002616 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2617 if (ret < 0)
2618 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002619
2620 /* order 0 is regular bitmap */
2621 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2622 sbi->s_mb_offsets[0] = 0;
2623
2624 i = 1;
2625 offset = 0;
Nicolai Stange935244c2016-05-05 19:46:19 -04002626 offset_incr = 1 << (sb->s_blocksize_bits - 1);
Alex Tomasc9de5602008-01-29 00:19:52 -05002627 max = sb->s_blocksize << 2;
2628 do {
2629 sbi->s_mb_offsets[i] = offset;
2630 sbi->s_mb_maxs[i] = max;
Nicolai Stange935244c2016-05-05 19:46:19 -04002631 offset += offset_incr;
2632 offset_incr = offset_incr >> 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002633 max = max >> 1;
2634 i++;
2635 } while (i <= sb->s_blocksize_bits + 1);
2636
Alex Tomasc9de5602008-01-29 00:19:52 -05002637 spin_lock_init(&sbi->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05002638 spin_lock_init(&sbi->s_bal_lock);
Theodore Ts'od08854f2016-06-26 18:24:01 -04002639 sbi->s_mb_free_pending = 0;
Daeho Jeonga0154342017-06-22 23:54:33 -04002640 INIT_LIST_HEAD(&sbi->s_freed_data_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05002641
2642 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2643 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2644 sbi->s_mb_stats = MB_DEFAULT_STATS;
2645 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2646 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
Theodore Ts'o27baebb2011-09-09 19:02:51 -04002647 /*
2648 * The default group preallocation is 512, which for 4k block
2649 * sizes translates to 2 megabytes. However for bigalloc file
2650 * systems, this is probably too big (i.e, if the cluster size
2651 * is 1 megabyte, then group preallocation size becomes half a
2652 * gigabyte!). As a default, we will keep a two megabyte
2653 * group pralloc size for cluster sizes up to 64k, and after
2654 * that, we will force a minimum group preallocation size of
2655 * 32 clusters. This translates to 8 megs when the cluster
2656 * size is 256k, and 32 megs when the cluster size is 1 meg,
2657 * which seems reasonable as a default.
2658 */
2659 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2660 sbi->s_cluster_bits, 32);
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04002661 /*
2662 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2663 * to the lowest multiple of s_stripe which is bigger than
2664 * the s_mb_group_prealloc as determined above. We want
2665 * the preallocation size to be an exact multiple of the
2666 * RAID stripe size so that preallocations don't fragment
2667 * the stripes.
2668 */
2669 if (sbi->s_stripe > 1) {
2670 sbi->s_mb_group_prealloc = roundup(
2671 sbi->s_mb_group_prealloc, sbi->s_stripe);
2672 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002673
Eric Sandeen730c2132008-09-13 15:23:29 -04002674 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002675 if (sbi->s_locality_groups == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002676 ret = -ENOMEM;
Andrey Tsyvarev029b10c2014-05-12 12:34:21 -04002677 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002678 }
Eric Sandeen730c2132008-09-13 15:23:29 -04002679 for_each_possible_cpu(i) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002680 struct ext4_locality_group *lg;
Eric Sandeen730c2132008-09-13 15:23:29 -04002681 lg = per_cpu_ptr(sbi->s_locality_groups, i);
Alex Tomasc9de5602008-01-29 00:19:52 -05002682 mutex_init(&lg->lg_mutex);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04002683 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2684 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
Alex Tomasc9de5602008-01-29 00:19:52 -05002685 spin_lock_init(&lg->lg_prealloc_lock);
2686 }
2687
Yu Jian79a77c52011-08-01 17:41:46 -04002688 /* init file for buddy data */
2689 ret = ext4_mb_init_backend(sb);
Tao Ma7aa0bae2011-10-06 10:22:28 -04002690 if (ret != 0)
2691 goto out_free_locality_groups;
Yu Jian79a77c52011-08-01 17:41:46 -04002692
Tao Ma7aa0bae2011-10-06 10:22:28 -04002693 return 0;
2694
2695out_free_locality_groups:
2696 free_percpu(sbi->s_locality_groups);
2697 sbi->s_locality_groups = NULL;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002698out:
Tao Ma7aa0bae2011-10-06 10:22:28 -04002699 kfree(sbi->s_mb_offsets);
2700 sbi->s_mb_offsets = NULL;
2701 kfree(sbi->s_mb_maxs);
2702 sbi->s_mb_maxs = NULL;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002703 return ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002704}
2705
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002706/* need to called with the ext4 group lock held */
Alex Tomasc9de5602008-01-29 00:19:52 -05002707static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2708{
2709 struct ext4_prealloc_space *pa;
2710 struct list_head *cur, *tmp;
2711 int count = 0;
2712
2713 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2714 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2715 list_del(&pa->pa_group_list);
2716 count++;
Aneesh Kumar K.V688f05a2008-10-13 12:14:14 -04002717 kmem_cache_free(ext4_pspace_cachep, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05002718 }
2719 if (count)
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002720 mb_debug(1, "mballoc: %u PAs left\n", count);
Alex Tomasc9de5602008-01-29 00:19:52 -05002721
2722}
2723
2724int ext4_mb_release(struct super_block *sb)
2725{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002726 ext4_group_t ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002727 ext4_group_t i;
2728 int num_meta_group_infos;
2729 struct ext4_group_info *grinfo;
2730 struct ext4_sb_info *sbi = EXT4_SB(sb);
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002731 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Alex Tomasc9de5602008-01-29 00:19:52 -05002732
Alex Tomasc9de5602008-01-29 00:19:52 -05002733 if (sbi->s_group_info) {
Theodore Ts'o8df96752009-05-01 08:50:38 -04002734 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002735 grinfo = ext4_get_group_info(sb, i);
2736#ifdef DOUBLE_CHECK
2737 kfree(grinfo->bb_bitmap);
2738#endif
2739 ext4_lock_group(sb, i);
2740 ext4_mb_cleanup_pa(grinfo);
2741 ext4_unlock_group(sb, i);
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002742 kmem_cache_free(cachep, grinfo);
Alex Tomasc9de5602008-01-29 00:19:52 -05002743 }
Theodore Ts'o8df96752009-05-01 08:50:38 -04002744 num_meta_group_infos = (ngroups +
Alex Tomasc9de5602008-01-29 00:19:52 -05002745 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2746 EXT4_DESC_PER_BLOCK_BITS(sb);
2747 for (i = 0; i < num_meta_group_infos; i++)
2748 kfree(sbi->s_group_info[i]);
Al Virob93b41d2014-11-20 12:19:11 -05002749 kvfree(sbi->s_group_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05002750 }
2751 kfree(sbi->s_mb_offsets);
2752 kfree(sbi->s_mb_maxs);
Markus Elfringbfcba2d2014-11-25 20:01:37 -05002753 iput(sbi->s_buddy_cache);
Alex Tomasc9de5602008-01-29 00:19:52 -05002754 if (sbi->s_mb_stats) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002755 ext4_msg(sb, KERN_INFO,
2756 "mballoc: %u blocks %u reqs (%u success)",
Alex Tomasc9de5602008-01-29 00:19:52 -05002757 atomic_read(&sbi->s_bal_allocated),
2758 atomic_read(&sbi->s_bal_reqs),
2759 atomic_read(&sbi->s_bal_success));
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002760 ext4_msg(sb, KERN_INFO,
2761 "mballoc: %u extents scanned, %u goal hits, "
2762 "%u 2^N hits, %u breaks, %u lost",
Alex Tomasc9de5602008-01-29 00:19:52 -05002763 atomic_read(&sbi->s_bal_ex_scanned),
2764 atomic_read(&sbi->s_bal_goals),
2765 atomic_read(&sbi->s_bal_2orders),
2766 atomic_read(&sbi->s_bal_breaks),
2767 atomic_read(&sbi->s_mb_lost_chunks));
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002768 ext4_msg(sb, KERN_INFO,
2769 "mballoc: %lu generated and it took %Lu",
Tao Maced156e2011-07-23 16:18:05 -04002770 sbi->s_mb_buddies_generated,
Alex Tomasc9de5602008-01-29 00:19:52 -05002771 sbi->s_mb_generation_time);
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002772 ext4_msg(sb, KERN_INFO,
2773 "mballoc: %u preallocated, %u discarded",
Alex Tomasc9de5602008-01-29 00:19:52 -05002774 atomic_read(&sbi->s_mb_preallocated),
2775 atomic_read(&sbi->s_mb_discarded));
2776 }
2777
Eric Sandeen730c2132008-09-13 15:23:29 -04002778 free_percpu(sbi->s_locality_groups);
Alex Tomasc9de5602008-01-29 00:19:52 -05002779
2780 return 0;
2781}
2782
Lukas Czerner77ca6cd2010-10-27 21:30:11 -04002783static inline int ext4_issue_discard(struct super_block *sb,
Daeho Jeonga0154342017-06-22 23:54:33 -04002784 ext4_group_t block_group, ext4_grpblk_t cluster, int count,
2785 struct bio **biop)
Jiaying Zhang5c521832010-07-27 11:56:05 -04002786{
Jiaying Zhang5c521832010-07-27 11:56:05 -04002787 ext4_fsblk_t discard_block;
2788
Theodore Ts'o84130192011-09-09 18:50:51 -04002789 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2790 ext4_group_first_block_no(sb, block_group));
2791 count = EXT4_C2B(EXT4_SB(sb), count);
Jiaying Zhang5c521832010-07-27 11:56:05 -04002792 trace_ext4_discard_blocks(sb,
2793 (unsigned long long) discard_block, count);
Daeho Jeonga0154342017-06-22 23:54:33 -04002794 if (biop) {
2795 return __blkdev_issue_discard(sb->s_bdev,
2796 (sector_t)discard_block << (sb->s_blocksize_bits - 9),
2797 (sector_t)count << (sb->s_blocksize_bits - 9),
2798 GFP_NOFS, 0, biop);
2799 } else
2800 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
Jiaying Zhang5c521832010-07-27 11:56:05 -04002801}
2802
Daeho Jeonga0154342017-06-22 23:54:33 -04002803static void ext4_free_data_in_buddy(struct super_block *sb,
2804 struct ext4_free_data *entry)
Alex Tomasc9de5602008-01-29 00:19:52 -05002805{
Alex Tomasc9de5602008-01-29 00:19:52 -05002806 struct ext4_buddy e4b;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002807 struct ext4_group_info *db;
Theodore Ts'od9f34502011-04-30 13:47:24 -04002808 int err, count = 0, count2 = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002809
Bobi Jam18aadd42012-02-20 17:53:02 -05002810 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2811 entry->efd_count, entry->efd_group, entry);
Alex Tomasc9de5602008-01-29 00:19:52 -05002812
Bobi Jam18aadd42012-02-20 17:53:02 -05002813 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2814 /* we expect to find existing buddy because it's pinned */
2815 BUG_ON(err != 0);
Theodore Ts'ob90f6872010-04-20 16:51:59 -04002816
Theodore Ts'od08854f2016-06-26 18:24:01 -04002817 spin_lock(&EXT4_SB(sb)->s_md_lock);
2818 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
2819 spin_unlock(&EXT4_SB(sb)->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05002820
Bobi Jam18aadd42012-02-20 17:53:02 -05002821 db = e4b.bd_info;
2822 /* there are blocks to put in buddy to make them really free */
2823 count += entry->efd_count;
2824 count2++;
2825 ext4_lock_group(sb, entry->efd_group);
2826 /* Take it out of per group rb tree */
2827 rb_erase(&entry->efd_node, &(db->bb_free_root));
2828 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002829
Bobi Jam18aadd42012-02-20 17:53:02 -05002830 /*
2831 * Clear the trimmed flag for the group so that the next
2832 * ext4_trim_fs can trim it.
2833 * If the volume is mounted with -o discard, online discard
2834 * is supported and the free blocks will be trimmed online.
2835 */
2836 if (!test_opt(sb, DISCARD))
2837 EXT4_MB_GRP_CLEAR_TRIMMED(db);
2838
2839 if (!db->bb_free_root.rb_node) {
2840 /* No more items in the per group rb tree
2841 * balance refcounts from ext4_mb_free_metadata()
Tao Ma3d56b8d2011-07-11 00:03:38 -04002842 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002843 put_page(e4b.bd_buddy_page);
2844 put_page(e4b.bd_bitmap_page);
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04002845 }
Bobi Jam18aadd42012-02-20 17:53:02 -05002846 ext4_unlock_group(sb, entry->efd_group);
2847 kmem_cache_free(ext4_free_data_cachep, entry);
2848 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002849
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002850 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
Alex Tomasc9de5602008-01-29 00:19:52 -05002851}
2852
Daeho Jeonga0154342017-06-22 23:54:33 -04002853/*
2854 * This function is called by the jbd2 layer once the commit has finished,
2855 * so we know we can free the blocks that were released with that commit.
2856 */
2857void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
2858{
2859 struct ext4_sb_info *sbi = EXT4_SB(sb);
2860 struct ext4_free_data *entry, *tmp;
2861 struct bio *discard_bio = NULL;
2862 struct list_head freed_data_list;
2863 struct list_head *cut_pos = NULL;
2864 int err;
2865
2866 INIT_LIST_HEAD(&freed_data_list);
2867
2868 spin_lock(&sbi->s_md_lock);
2869 list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
2870 if (entry->efd_tid != commit_tid)
2871 break;
2872 cut_pos = &entry->efd_list;
2873 }
2874 if (cut_pos)
2875 list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
2876 cut_pos);
2877 spin_unlock(&sbi->s_md_lock);
2878
2879 if (test_opt(sb, DISCARD)) {
2880 list_for_each_entry(entry, &freed_data_list, efd_list) {
2881 err = ext4_issue_discard(sb, entry->efd_group,
2882 entry->efd_start_cluster,
2883 entry->efd_count,
2884 &discard_bio);
2885 if (err && err != -EOPNOTSUPP) {
2886 ext4_msg(sb, KERN_WARNING, "discard request in"
2887 " group:%d block:%d count:%d failed"
2888 " with %d", entry->efd_group,
2889 entry->efd_start_cluster,
2890 entry->efd_count, err);
2891 } else if (err == -EOPNOTSUPP)
2892 break;
2893 }
2894
Daeho Jeonge4510572017-08-05 13:11:57 -04002895 if (discard_bio) {
Daeho Jeonga0154342017-06-22 23:54:33 -04002896 submit_bio_wait(discard_bio);
Daeho Jeonge4510572017-08-05 13:11:57 -04002897 bio_put(discard_bio);
2898 }
Daeho Jeonga0154342017-06-22 23:54:33 -04002899 }
2900
2901 list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
2902 ext4_free_data_in_buddy(sb, entry);
2903}
2904
Theodore Ts'o5dabfc72010-10-27 21:30:14 -04002905int __init ext4_init_mballoc(void)
Alex Tomasc9de5602008-01-29 00:19:52 -05002906{
Theodore Ts'o16828082010-10-27 21:30:09 -04002907 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2908 SLAB_RECLAIM_ACCOUNT);
Alex Tomasc9de5602008-01-29 00:19:52 -05002909 if (ext4_pspace_cachep == NULL)
2910 return -ENOMEM;
2911
Theodore Ts'o16828082010-10-27 21:30:09 -04002912 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2913 SLAB_RECLAIM_ACCOUNT);
Eric Sandeen256bdb42008-02-10 01:13:33 -05002914 if (ext4_ac_cachep == NULL) {
2915 kmem_cache_destroy(ext4_pspace_cachep);
2916 return -ENOMEM;
2917 }
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002918
Bobi Jam18aadd42012-02-20 17:53:02 -05002919 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2920 SLAB_RECLAIM_ACCOUNT);
2921 if (ext4_free_data_cachep == NULL) {
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002922 kmem_cache_destroy(ext4_pspace_cachep);
2923 kmem_cache_destroy(ext4_ac_cachep);
2924 return -ENOMEM;
2925 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002926 return 0;
2927}
2928
Theodore Ts'o5dabfc72010-10-27 21:30:14 -04002929void ext4_exit_mballoc(void)
Alex Tomasc9de5602008-01-29 00:19:52 -05002930{
Theodore Ts'o60e66792010-05-17 07:00:00 -04002931 /*
Jesper Dangaard Brouer3e03f9c2009-07-05 22:29:27 -04002932 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2933 * before destroying the slab cache.
2934 */
2935 rcu_barrier();
Alex Tomasc9de5602008-01-29 00:19:52 -05002936 kmem_cache_destroy(ext4_pspace_cachep);
Eric Sandeen256bdb42008-02-10 01:13:33 -05002937 kmem_cache_destroy(ext4_ac_cachep);
Bobi Jam18aadd42012-02-20 17:53:02 -05002938 kmem_cache_destroy(ext4_free_data_cachep);
Eric Sandeen2892c152011-02-12 08:12:18 -05002939 ext4_groupinfo_destroy_slabs();
Alex Tomasc9de5602008-01-29 00:19:52 -05002940}
2941
2942
2943/*
Uwe Kleine-König73b2c712010-07-30 21:02:47 +02002944 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
Alex Tomasc9de5602008-01-29 00:19:52 -05002945 * Returns 0 if success or error code
2946 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04002947static noinline_for_stack int
2948ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002949 handle_t *handle, unsigned int reserv_clstrs)
Alex Tomasc9de5602008-01-29 00:19:52 -05002950{
2951 struct buffer_head *bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002952 struct ext4_group_desc *gdp;
2953 struct buffer_head *gdp_bh;
2954 struct ext4_sb_info *sbi;
2955 struct super_block *sb;
2956 ext4_fsblk_t block;
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04002957 int err, len;
Alex Tomasc9de5602008-01-29 00:19:52 -05002958
2959 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2960 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2961
2962 sb = ac->ac_sb;
2963 sbi = EXT4_SB(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002964
Theodore Ts'o574ca172008-07-11 19:27:31 -04002965 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04002966 if (IS_ERR(bitmap_bh)) {
2967 err = PTR_ERR(bitmap_bh);
2968 bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002969 goto out_err;
Darrick J. Wong9008a582015-10-17 21:33:24 -04002970 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002971
liang xie5d601252014-05-12 22:06:43 -04002972 BUFFER_TRACE(bitmap_bh, "getting write access");
Alex Tomasc9de5602008-01-29 00:19:52 -05002973 err = ext4_journal_get_write_access(handle, bitmap_bh);
2974 if (err)
2975 goto out_err;
2976
2977 err = -EIO;
2978 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2979 if (!gdp)
2980 goto out_err;
2981
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002982 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002983 ext4_free_group_clusters(sb, gdp));
Aneesh Kumar K.V03cddb82008-06-05 20:59:29 -04002984
liang xie5d601252014-05-12 22:06:43 -04002985 BUFFER_TRACE(gdp_bh, "get_write_access");
Alex Tomasc9de5602008-01-29 00:19:52 -05002986 err = ext4_journal_get_write_access(handle, gdp_bh);
2987 if (err)
2988 goto out_err;
2989
Akinobu Mitabda00de2010-03-03 23:53:25 -05002990 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05002991
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002992 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Theodore Ts'o6fd058f2009-05-17 15:38:01 -04002993 if (!ext4_data_block_valid(sbi, block, len)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05002994 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
Theodore Ts'o1084f252012-03-19 23:13:43 -04002995 "fs metadata", block, block+len);
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04002996 /* File system mounted not to panic on error
Vegard Nossum554a5cc2016-07-14 23:02:47 -04002997 * Fix the bitmap and return EFSCORRUPTED
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04002998 * We leak some of the blocks here.
2999 */
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003000 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04003001 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3002 ac->ac_b_ex.fe_len);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003003 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
Frank Mayhar03901312009-01-07 00:06:22 -05003004 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04003005 if (!err)
Vegard Nossum554a5cc2016-07-14 23:02:47 -04003006 err = -EFSCORRUPTED;
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04003007 goto out_err;
Alex Tomasc9de5602008-01-29 00:19:52 -05003008 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003009
3010 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003011#ifdef AGGRESSIVE_CHECK
3012 {
3013 int i;
3014 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
3015 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
3016 bitmap_bh->b_data));
3017 }
3018 }
3019#endif
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04003020 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
3021 ac->ac_b_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003022 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
3023 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
Theodore Ts'o021b65b2011-09-09 19:08:51 -04003024 ext4_free_group_clusters_set(sb, gdp,
Theodore Ts'ocff1dfd72011-09-09 19:12:51 -04003025 ext4_free_clusters_after_init(sb,
Theodore Ts'o021b65b2011-09-09 19:08:51 -04003026 ac->ac_b_ex.fe_group, gdp));
Alex Tomasc9de5602008-01-29 00:19:52 -05003027 }
Theodore Ts'o021b65b2011-09-09 19:08:51 -04003028 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
3029 ext4_free_group_clusters_set(sb, gdp, len);
Tao Ma79f1ba42012-10-22 00:34:32 -04003030 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04003031 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003032
3033 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04003034 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
Mingming Caod2a17632008-07-14 17:52:37 -04003035 /*
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04003036 * Now reduce the dirty block count also. Should not go negative
Mingming Caod2a17632008-07-14 17:52:37 -04003037 */
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04003038 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
3039 /* release all the reserved blocks if non delalloc */
Theodore Ts'o57042652011-09-09 18:56:51 -04003040 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
3041 reserv_clstrs);
Alex Tomasc9de5602008-01-29 00:19:52 -05003042
Jose R. Santos772cb7c2008-07-11 19:27:31 -04003043 if (sbi->s_log_groups_per_flex) {
3044 ext4_group_t flex_group = ext4_flex_group(sbi,
3045 ac->ac_b_ex.fe_group);
Theodore Ts'o90ba9832013-03-11 23:39:59 -04003046 atomic64_sub(ac->ac_b_ex.fe_len,
3047 &sbi->s_flex_groups[flex_group].free_clusters);
Jose R. Santos772cb7c2008-07-11 19:27:31 -04003048 }
3049
Frank Mayhar03901312009-01-07 00:06:22 -05003050 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05003051 if (err)
3052 goto out_err;
Frank Mayhar03901312009-01-07 00:06:22 -05003053 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05003054
3055out_err:
Aneesh Kumar K.V42a10ad2008-02-10 01:07:28 -05003056 brelse(bitmap_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05003057 return err;
3058}
3059
3060/*
3061 * here we normalize request for locality group
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04003062 * Group request are normalized to s_mb_group_prealloc, which goes to
3063 * s_strip if we set the same via mount option.
3064 * s_mb_group_prealloc can be configured via
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04003065 * /sys/fs/ext4/<partition>/mb_group_prealloc
Alex Tomasc9de5602008-01-29 00:19:52 -05003066 *
3067 * XXX: should we try to preallocate more than the group has now?
3068 */
3069static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3070{
3071 struct super_block *sb = ac->ac_sb;
3072 struct ext4_locality_group *lg = ac->ac_lg;
3073
3074 BUG_ON(lg == NULL);
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04003075 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003076 mb_debug(1, "#%u: goal %u blocks for locality group\n",
Alex Tomasc9de5602008-01-29 00:19:52 -05003077 current->pid, ac->ac_g_ex.fe_len);
3078}
3079
3080/*
3081 * Normalization means making request better in terms of
3082 * size and alignment
3083 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003084static noinline_for_stack void
3085ext4_mb_normalize_request(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05003086 struct ext4_allocation_request *ar)
3087{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003088 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003089 int bsbits, max;
3090 ext4_lblk_t end;
Curt Wohlgemuth1592d2c2012-02-20 17:53:03 -05003091 loff_t size, start_off;
3092 loff_t orig_size __maybe_unused;
Andi Kleen5a0790c2010-06-14 13:28:03 -04003093 ext4_lblk_t start;
Alex Tomasc9de5602008-01-29 00:19:52 -05003094 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003095 struct ext4_prealloc_space *pa;
Alex Tomasc9de5602008-01-29 00:19:52 -05003096
3097 /* do normalize only data requests, metadata requests
3098 do not need preallocation */
3099 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3100 return;
3101
3102 /* sometime caller may want exact blocks */
3103 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3104 return;
3105
3106 /* caller may indicate that preallocation isn't
3107 * required (it's a tail, for example) */
3108 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3109 return;
3110
3111 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3112 ext4_mb_normalize_group_request(ac);
3113 return ;
3114 }
3115
3116 bsbits = ac->ac_sb->s_blocksize_bits;
3117
3118 /* first, let's learn actual file size
3119 * given current request is allocated */
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003120 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003121 size = size << bsbits;
3122 if (size < i_size_read(ac->ac_inode))
3123 size = i_size_read(ac->ac_inode);
Andi Kleen5a0790c2010-06-14 13:28:03 -04003124 orig_size = size;
Alex Tomasc9de5602008-01-29 00:19:52 -05003125
Valerie Clement19304792008-05-13 19:31:14 -04003126 /* max size of free chunks */
3127 max = 2 << bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003128
Valerie Clement19304792008-05-13 19:31:14 -04003129#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
3130 (req <= (size) || max <= (chunk_size))
Alex Tomasc9de5602008-01-29 00:19:52 -05003131
3132 /* first, try to predict filesize */
3133 /* XXX: should this table be tunable? */
3134 start_off = 0;
3135 if (size <= 16 * 1024) {
3136 size = 16 * 1024;
3137 } else if (size <= 32 * 1024) {
3138 size = 32 * 1024;
3139 } else if (size <= 64 * 1024) {
3140 size = 64 * 1024;
3141 } else if (size <= 128 * 1024) {
3142 size = 128 * 1024;
3143 } else if (size <= 256 * 1024) {
3144 size = 256 * 1024;
3145 } else if (size <= 512 * 1024) {
3146 size = 512 * 1024;
3147 } else if (size <= 1024 * 1024) {
3148 size = 1024 * 1024;
Valerie Clement19304792008-05-13 19:31:14 -04003149 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003150 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
Valerie Clement19304792008-05-13 19:31:14 -04003151 (21 - bsbits)) << 21;
3152 size = 2 * 1024 * 1024;
3153 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003154 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3155 (22 - bsbits)) << 22;
3156 size = 4 * 1024 * 1024;
3157 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
Valerie Clement19304792008-05-13 19:31:14 -04003158 (8<<20)>>bsbits, max, 8 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003159 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3160 (23 - bsbits)) << 23;
3161 size = 8 * 1024 * 1024;
3162 } else {
Xiaoguang Wangb27b1532014-07-27 22:26:36 -04003163 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3164 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3165 ac->ac_o_ex.fe_len) << bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003166 }
Andi Kleen5a0790c2010-06-14 13:28:03 -04003167 size = size >> bsbits;
3168 start = start_off >> bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003169
3170 /* don't cover already allocated blocks in selected range */
3171 if (ar->pleft && start <= ar->lleft) {
3172 size -= ar->lleft + 1 - start;
3173 start = ar->lleft + 1;
3174 }
3175 if (ar->pright && start + size - 1 >= ar->lright)
3176 size -= start + size - ar->lright;
3177
Jan Karacd648b82017-01-27 14:34:30 -05003178 /*
3179 * Trim allocation request for filesystems with artificially small
3180 * groups.
3181 */
3182 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3183 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3184
Alex Tomasc9de5602008-01-29 00:19:52 -05003185 end = start + size;
3186
3187 /* check we don't cross already preallocated blocks */
3188 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003189 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003190 ext4_lblk_t pa_end;
Alex Tomasc9de5602008-01-29 00:19:52 -05003191
Alex Tomasc9de5602008-01-29 00:19:52 -05003192 if (pa->pa_deleted)
3193 continue;
3194 spin_lock(&pa->pa_lock);
3195 if (pa->pa_deleted) {
3196 spin_unlock(&pa->pa_lock);
3197 continue;
3198 }
3199
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003200 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3201 pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003202
3203 /* PA must not overlap original request */
3204 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3205 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3206
Eric Sandeen38877f42009-08-17 23:55:24 -04003207 /* skip PAs this normalized request doesn't overlap with */
3208 if (pa->pa_lstart >= end || pa_end <= start) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003209 spin_unlock(&pa->pa_lock);
3210 continue;
3211 }
3212 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3213
Eric Sandeen38877f42009-08-17 23:55:24 -04003214 /* adjust start or end to be adjacent to this pa */
Alex Tomasc9de5602008-01-29 00:19:52 -05003215 if (pa_end <= ac->ac_o_ex.fe_logical) {
3216 BUG_ON(pa_end < start);
3217 start = pa_end;
Eric Sandeen38877f42009-08-17 23:55:24 -04003218 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003219 BUG_ON(pa->pa_lstart > end);
3220 end = pa->pa_lstart;
3221 }
3222 spin_unlock(&pa->pa_lock);
3223 }
3224 rcu_read_unlock();
3225 size = end - start;
3226
3227 /* XXX: extra loop to check we really don't overlap preallocations */
3228 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003229 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003230 ext4_lblk_t pa_end;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003231
Alex Tomasc9de5602008-01-29 00:19:52 -05003232 spin_lock(&pa->pa_lock);
3233 if (pa->pa_deleted == 0) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003234 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3235 pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003236 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3237 }
3238 spin_unlock(&pa->pa_lock);
3239 }
3240 rcu_read_unlock();
3241
3242 if (start + size <= ac->ac_o_ex.fe_logical &&
3243 start > ac->ac_o_ex.fe_logical) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003244 ext4_msg(ac->ac_sb, KERN_ERR,
3245 "start %lu, size %lu, fe_logical %lu",
3246 (unsigned long) start, (unsigned long) size,
3247 (unsigned long) ac->ac_o_ex.fe_logical);
Dmitry Monakhovdfe076c2014-10-01 22:26:17 -04003248 BUG();
Alex Tomasc9de5602008-01-29 00:19:52 -05003249 }
Maurizio Lombardib5b60772014-05-27 12:48:56 -04003250 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05003251
3252 /* now prepare goal request */
3253
3254 /* XXX: is it better to align blocks WRT to logical
3255 * placement or satisfy big request as is */
3256 ac->ac_g_ex.fe_logical = start;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003257 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
Alex Tomasc9de5602008-01-29 00:19:52 -05003258
3259 /* define goal start in order to merge */
3260 if (ar->pright && (ar->lright == (start + size))) {
3261 /* merge to the right */
3262 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3263 &ac->ac_f_ex.fe_group,
3264 &ac->ac_f_ex.fe_start);
3265 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3266 }
3267 if (ar->pleft && (ar->lleft + 1 == start)) {
3268 /* merge to the left */
3269 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3270 &ac->ac_f_ex.fe_group,
3271 &ac->ac_f_ex.fe_start);
3272 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3273 }
3274
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003275 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
Alex Tomasc9de5602008-01-29 00:19:52 -05003276 (unsigned) orig_size, (unsigned) start);
3277}
3278
3279static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3280{
3281 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3282
3283 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3284 atomic_inc(&sbi->s_bal_reqs);
3285 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
Curt Wohlgemuth291dae42010-05-16 16:00:00 -04003286 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
Alex Tomasc9de5602008-01-29 00:19:52 -05003287 atomic_inc(&sbi->s_bal_success);
3288 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3289 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3290 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3291 atomic_inc(&sbi->s_bal_goals);
3292 if (ac->ac_found > sbi->s_mb_max_to_scan)
3293 atomic_inc(&sbi->s_bal_breaks);
3294 }
3295
Theodore Ts'o296c3552009-09-30 00:32:42 -04003296 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3297 trace_ext4_mballoc_alloc(ac);
3298 else
3299 trace_ext4_mballoc_prealloc(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05003300}
3301
3302/*
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003303 * Called on failure; free up any blocks from the inode PA for this
3304 * context. We don't need this for MB_GROUP_PA because we only change
3305 * pa_free in ext4_mb_release_context(), but on failure, we've already
3306 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3307 */
3308static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3309{
3310 struct ext4_prealloc_space *pa = ac->ac_pa;
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003311 struct ext4_buddy e4b;
3312 int err;
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003313
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003314 if (pa == NULL) {
Theodore Ts'oc99d1e62014-08-23 17:47:28 -04003315 if (ac->ac_f_ex.fe_len == 0)
3316 return;
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003317 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3318 if (err) {
3319 /*
3320 * This should never happen since we pin the
3321 * pages in the ext4_allocation_context so
3322 * ext4_mb_load_buddy() should never fail.
3323 */
3324 WARN(1, "mb_load_buddy failed (%d)", err);
3325 return;
3326 }
3327 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3328 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3329 ac->ac_f_ex.fe_len);
3330 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
Theodore Ts'oc99d1e62014-08-23 17:47:28 -04003331 ext4_mb_unload_buddy(&e4b);
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003332 return;
3333 }
3334 if (pa->pa_type == MB_INODE_PA)
Zheng Liu400db9d2012-05-28 17:53:53 -04003335 pa->pa_free += ac->ac_b_ex.fe_len;
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003336}
3337
3338/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003339 * use blocks preallocated to inode
3340 */
3341static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3342 struct ext4_prealloc_space *pa)
3343{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003344 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003345 ext4_fsblk_t start;
3346 ext4_fsblk_t end;
3347 int len;
3348
3349 /* found preallocated blocks, use them */
3350 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003351 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3352 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3353 len = EXT4_NUM_B2C(sbi, end - start);
Alex Tomasc9de5602008-01-29 00:19:52 -05003354 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3355 &ac->ac_b_ex.fe_start);
3356 ac->ac_b_ex.fe_len = len;
3357 ac->ac_status = AC_STATUS_FOUND;
3358 ac->ac_pa = pa;
3359
3360 BUG_ON(start < pa->pa_pstart);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003361 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
Alex Tomasc9de5602008-01-29 00:19:52 -05003362 BUG_ON(pa->pa_free < len);
3363 pa->pa_free -= len;
3364
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003365 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003366}
3367
3368/*
3369 * use blocks preallocated to locality group
3370 */
3371static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3372 struct ext4_prealloc_space *pa)
3373{
Aneesh Kumar K.V03cddb82008-06-05 20:59:29 -04003374 unsigned int len = ac->ac_o_ex.fe_len;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003375
Alex Tomasc9de5602008-01-29 00:19:52 -05003376 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3377 &ac->ac_b_ex.fe_group,
3378 &ac->ac_b_ex.fe_start);
3379 ac->ac_b_ex.fe_len = len;
3380 ac->ac_status = AC_STATUS_FOUND;
3381 ac->ac_pa = pa;
3382
3383 /* we don't correct pa_pstart or pa_plen here to avoid
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05003384 * possible race when the group is being loaded concurrently
Alex Tomasc9de5602008-01-29 00:19:52 -05003385 * instead we correct pa later, after blocks are marked
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05003386 * in on-disk bitmap -- see ext4_mb_release_context()
3387 * Other CPUs are prevented from allocating from this pa by lg_mutex
Alex Tomasc9de5602008-01-29 00:19:52 -05003388 */
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003389 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003390}
3391
3392/*
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003393 * Return the prealloc space that have minimal distance
3394 * from the goal block. @cpa is the prealloc
3395 * space that is having currently known minimal distance
3396 * from the goal block.
3397 */
3398static struct ext4_prealloc_space *
3399ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3400 struct ext4_prealloc_space *pa,
3401 struct ext4_prealloc_space *cpa)
3402{
3403 ext4_fsblk_t cur_distance, new_distance;
3404
3405 if (cpa == NULL) {
3406 atomic_inc(&pa->pa_count);
3407 return pa;
3408 }
Andrew Morton79211c82015-11-09 14:58:13 -08003409 cur_distance = abs(goal_block - cpa->pa_pstart);
3410 new_distance = abs(goal_block - pa->pa_pstart);
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003411
Coly Li5a54b2f2011-02-24 14:10:05 -05003412 if (cur_distance <= new_distance)
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003413 return cpa;
3414
3415 /* drop the previous reference */
3416 atomic_dec(&cpa->pa_count);
3417 atomic_inc(&pa->pa_count);
3418 return pa;
3419}
3420
3421/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003422 * search goal blocks in preallocated space
3423 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003424static noinline_for_stack int
3425ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003426{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003427 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003428 int order, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05003429 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3430 struct ext4_locality_group *lg;
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003431 struct ext4_prealloc_space *pa, *cpa = NULL;
3432 ext4_fsblk_t goal_block;
Alex Tomasc9de5602008-01-29 00:19:52 -05003433
3434 /* only data can be preallocated */
3435 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3436 return 0;
3437
3438 /* first, try per-file preallocation */
3439 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003440 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003441
3442 /* all fields in this condition don't change,
3443 * so we can skip locking for them */
3444 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003445 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3446 EXT4_C2B(sbi, pa->pa_len)))
Alex Tomasc9de5602008-01-29 00:19:52 -05003447 continue;
3448
Eric Sandeenfb0a3872009-09-16 14:45:10 -04003449 /* non-extent files can't have physical blocks past 2^32 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04003450 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003451 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3452 EXT4_MAX_BLOCK_FILE_PHYS))
Eric Sandeenfb0a3872009-09-16 14:45:10 -04003453 continue;
3454
Alex Tomasc9de5602008-01-29 00:19:52 -05003455 /* found preallocated blocks, use them */
3456 spin_lock(&pa->pa_lock);
3457 if (pa->pa_deleted == 0 && pa->pa_free) {
3458 atomic_inc(&pa->pa_count);
3459 ext4_mb_use_inode_pa(ac, pa);
3460 spin_unlock(&pa->pa_lock);
3461 ac->ac_criteria = 10;
3462 rcu_read_unlock();
3463 return 1;
3464 }
3465 spin_unlock(&pa->pa_lock);
3466 }
3467 rcu_read_unlock();
3468
3469 /* can we use group allocation? */
3470 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3471 return 0;
3472
3473 /* inode may have no locality group for some reason */
3474 lg = ac->ac_lg;
3475 if (lg == NULL)
3476 return 0;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003477 order = fls(ac->ac_o_ex.fe_len) - 1;
3478 if (order > PREALLOC_TB_SIZE - 1)
3479 /* The max size of hash table is PREALLOC_TB_SIZE */
3480 order = PREALLOC_TB_SIZE - 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05003481
Akinobu Mitabda00de2010-03-03 23:53:25 -05003482 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003483 /*
3484 * search for the prealloc space that is having
3485 * minimal distance from the goal block.
3486 */
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003487 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3488 rcu_read_lock();
3489 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3490 pa_inode_list) {
3491 spin_lock(&pa->pa_lock);
3492 if (pa->pa_deleted == 0 &&
3493 pa->pa_free >= ac->ac_o_ex.fe_len) {
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003494
3495 cpa = ext4_mb_check_group_pa(goal_block,
3496 pa, cpa);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003497 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003498 spin_unlock(&pa->pa_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05003499 }
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003500 rcu_read_unlock();
Alex Tomasc9de5602008-01-29 00:19:52 -05003501 }
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003502 if (cpa) {
3503 ext4_mb_use_group_pa(ac, cpa);
3504 ac->ac_criteria = 20;
3505 return 1;
3506 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003507 return 0;
3508}
3509
3510/*
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003511 * the function goes through all block freed in the group
3512 * but not yet committed and marks them used in in-core bitmap.
3513 * buddy must be generated from this bitmap
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003514 * Need to be called with the ext4 group lock held
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003515 */
3516static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3517 ext4_group_t group)
3518{
3519 struct rb_node *n;
3520 struct ext4_group_info *grp;
3521 struct ext4_free_data *entry;
3522
3523 grp = ext4_get_group_info(sb, group);
3524 n = rb_first(&(grp->bb_free_root));
3525
3526 while (n) {
Bobi Jam18aadd42012-02-20 17:53:02 -05003527 entry = rb_entry(n, struct ext4_free_data, efd_node);
3528 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003529 n = rb_next(n);
3530 }
3531 return;
3532}
3533
3534/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003535 * the function goes through all preallocation in this group and marks them
3536 * used in in-core bitmap. buddy must be generated from this bitmap
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003537 * Need to be called with ext4 group lock held
Alex Tomasc9de5602008-01-29 00:19:52 -05003538 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04003539static noinline_for_stack
3540void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
Alex Tomasc9de5602008-01-29 00:19:52 -05003541 ext4_group_t group)
3542{
3543 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3544 struct ext4_prealloc_space *pa;
3545 struct list_head *cur;
3546 ext4_group_t groupnr;
3547 ext4_grpblk_t start;
3548 int preallocated = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003549 int len;
3550
3551 /* all form of preallocation discards first load group,
3552 * so the only competing code is preallocation use.
3553 * we don't need any locking here
3554 * notice we do NOT ignore preallocations with pa_deleted
3555 * otherwise we could leave used blocks available for
3556 * allocation in buddy when concurrent ext4_mb_put_pa()
3557 * is dropping preallocation
3558 */
3559 list_for_each(cur, &grp->bb_prealloc_list) {
3560 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3561 spin_lock(&pa->pa_lock);
3562 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3563 &groupnr, &start);
3564 len = pa->pa_len;
3565 spin_unlock(&pa->pa_lock);
3566 if (unlikely(len == 0))
3567 continue;
3568 BUG_ON(groupnr != group);
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04003569 ext4_set_bits(bitmap, start, len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003570 preallocated += len;
Alex Tomasc9de5602008-01-29 00:19:52 -05003571 }
Colin Ian Kingff950152017-07-06 15:28:45 -04003572 mb_debug(1, "preallocated %u for group %u\n", preallocated, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003573}
3574
3575static void ext4_mb_pa_callback(struct rcu_head *head)
3576{
3577 struct ext4_prealloc_space *pa;
3578 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
Junho Ryu4e8d2132013-12-03 18:10:28 -05003579
3580 BUG_ON(atomic_read(&pa->pa_count));
3581 BUG_ON(pa->pa_deleted == 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05003582 kmem_cache_free(ext4_pspace_cachep, pa);
3583}
3584
3585/*
3586 * drops a reference to preallocated space descriptor
3587 * if this was the last reference and the space is consumed
3588 */
3589static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3590 struct super_block *sb, struct ext4_prealloc_space *pa)
3591{
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05003592 ext4_group_t grp;
Eric Sandeend33a1972009-03-16 23:25:40 -04003593 ext4_fsblk_t grp_blk;
Alex Tomasc9de5602008-01-29 00:19:52 -05003594
Alex Tomasc9de5602008-01-29 00:19:52 -05003595 /* in this short window concurrent discard can set pa_deleted */
3596 spin_lock(&pa->pa_lock);
Junho Ryu4e8d2132013-12-03 18:10:28 -05003597 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3598 spin_unlock(&pa->pa_lock);
3599 return;
3600 }
3601
Alex Tomasc9de5602008-01-29 00:19:52 -05003602 if (pa->pa_deleted == 1) {
3603 spin_unlock(&pa->pa_lock);
3604 return;
3605 }
3606
3607 pa->pa_deleted = 1;
3608 spin_unlock(&pa->pa_lock);
3609
Eric Sandeend33a1972009-03-16 23:25:40 -04003610 grp_blk = pa->pa_pstart;
Theodore Ts'o60e66792010-05-17 07:00:00 -04003611 /*
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003612 * If doing group-based preallocation, pa_pstart may be in the
3613 * next group when pa is used up
3614 */
3615 if (pa->pa_type == MB_GROUP_PA)
Eric Sandeend33a1972009-03-16 23:25:40 -04003616 grp_blk--;
3617
Lukas Czernerbd862982013-04-03 23:32:34 -04003618 grp = ext4_get_group_number(sb, grp_blk);
Alex Tomasc9de5602008-01-29 00:19:52 -05003619
3620 /*
3621 * possible race:
3622 *
3623 * P1 (buddy init) P2 (regular allocation)
3624 * find block B in PA
3625 * copy on-disk bitmap to buddy
3626 * mark B in on-disk bitmap
3627 * drop PA from group
3628 * mark all PAs in buddy
3629 *
3630 * thus, P1 initializes buddy with B available. to prevent this
3631 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3632 * against that pair
3633 */
3634 ext4_lock_group(sb, grp);
3635 list_del(&pa->pa_group_list);
3636 ext4_unlock_group(sb, grp);
3637
3638 spin_lock(pa->pa_obj_lock);
3639 list_del_rcu(&pa->pa_inode_list);
3640 spin_unlock(pa->pa_obj_lock);
3641
3642 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3643}
3644
3645/*
3646 * creates new preallocated space for given inode
3647 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003648static noinline_for_stack int
3649ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003650{
3651 struct super_block *sb = ac->ac_sb;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003652 struct ext4_sb_info *sbi = EXT4_SB(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003653 struct ext4_prealloc_space *pa;
3654 struct ext4_group_info *grp;
3655 struct ext4_inode_info *ei;
3656
3657 /* preallocate only when found space is larger then requested */
3658 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3659 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3660 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3661
3662 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3663 if (pa == NULL)
3664 return -ENOMEM;
3665
3666 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3667 int winl;
3668 int wins;
3669 int win;
3670 int offs;
3671
3672 /* we can't allocate as much as normalizer wants.
3673 * so, found space must get proper lstart
3674 * to cover original request */
3675 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3676 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3677
3678 /* we're limited by original request in that
3679 * logical block must be covered any way
3680 * winl is window we can move our chunk within */
3681 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3682
3683 /* also, we should cover whole original request */
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003684 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003685
3686 /* the smallest one defines real window */
3687 win = min(winl, wins);
3688
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003689 offs = ac->ac_o_ex.fe_logical %
3690 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003691 if (offs && offs < win)
3692 win = offs;
3693
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003694 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
Lukas Czerner810da242013-03-02 17:18:58 -05003695 EXT4_NUM_B2C(sbi, win);
Alex Tomasc9de5602008-01-29 00:19:52 -05003696 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3697 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3698 }
3699
3700 /* preallocation can change ac_b_ex, thus we store actually
3701 * allocated blocks for history */
3702 ac->ac_f_ex = ac->ac_b_ex;
3703
3704 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3705 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3706 pa->pa_len = ac->ac_b_ex.fe_len;
3707 pa->pa_free = pa->pa_len;
3708 atomic_set(&pa->pa_count, 1);
3709 spin_lock_init(&pa->pa_lock);
Aneesh Kumar K.Vd794bf82009-02-14 10:31:16 -05003710 INIT_LIST_HEAD(&pa->pa_inode_list);
3711 INIT_LIST_HEAD(&pa->pa_group_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003712 pa->pa_deleted = 0;
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003713 pa->pa_type = MB_INODE_PA;
Alex Tomasc9de5602008-01-29 00:19:52 -05003714
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003715 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
Alex Tomasc9de5602008-01-29 00:19:52 -05003716 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003717 trace_ext4_mb_new_inode_pa(ac, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003718
3719 ext4_mb_use_inode_pa(ac, pa);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003720 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
Alex Tomasc9de5602008-01-29 00:19:52 -05003721
3722 ei = EXT4_I(ac->ac_inode);
3723 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3724
3725 pa->pa_obj_lock = &ei->i_prealloc_lock;
3726 pa->pa_inode = ac->ac_inode;
3727
3728 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3729 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3730 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3731
3732 spin_lock(pa->pa_obj_lock);
3733 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3734 spin_unlock(pa->pa_obj_lock);
3735
3736 return 0;
3737}
3738
3739/*
3740 * creates new preallocated space for locality group inodes belongs to
3741 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003742static noinline_for_stack int
3743ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003744{
3745 struct super_block *sb = ac->ac_sb;
3746 struct ext4_locality_group *lg;
3747 struct ext4_prealloc_space *pa;
3748 struct ext4_group_info *grp;
3749
3750 /* preallocate only when found space is larger then requested */
3751 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3752 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3753 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3754
3755 BUG_ON(ext4_pspace_cachep == NULL);
3756 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3757 if (pa == NULL)
3758 return -ENOMEM;
3759
3760 /* preallocation can change ac_b_ex, thus we store actually
3761 * allocated blocks for history */
3762 ac->ac_f_ex = ac->ac_b_ex;
3763
3764 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3765 pa->pa_lstart = pa->pa_pstart;
3766 pa->pa_len = ac->ac_b_ex.fe_len;
3767 pa->pa_free = pa->pa_len;
3768 atomic_set(&pa->pa_count, 1);
3769 spin_lock_init(&pa->pa_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003770 INIT_LIST_HEAD(&pa->pa_inode_list);
Aneesh Kumar K.Vd794bf82009-02-14 10:31:16 -05003771 INIT_LIST_HEAD(&pa->pa_group_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003772 pa->pa_deleted = 0;
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003773 pa->pa_type = MB_GROUP_PA;
Alex Tomasc9de5602008-01-29 00:19:52 -05003774
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003775 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003776 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3777 trace_ext4_mb_new_group_pa(ac, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003778
3779 ext4_mb_use_group_pa(ac, pa);
3780 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3781
3782 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3783 lg = ac->ac_lg;
3784 BUG_ON(lg == NULL);
3785
3786 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3787 pa->pa_inode = NULL;
3788
3789 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3790 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3791 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3792
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003793 /*
3794 * We will later add the new pa to the right bucket
3795 * after updating the pa_free in ext4_mb_release_context
3796 */
Alex Tomasc9de5602008-01-29 00:19:52 -05003797 return 0;
3798}
3799
3800static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3801{
3802 int err;
3803
3804 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3805 err = ext4_mb_new_group_pa(ac);
3806 else
3807 err = ext4_mb_new_inode_pa(ac);
3808 return err;
3809}
3810
3811/*
3812 * finds all unused blocks in on-disk bitmap, frees them in
3813 * in-core bitmap and buddy.
3814 * @pa must be unlinked from inode and group lists, so that
3815 * nobody else can find/use it.
3816 * the caller MUST hold group/inode locks.
3817 * TODO: optimize the case when there are no in-core structures yet
3818 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003819static noinline_for_stack int
3820ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003821 struct ext4_prealloc_space *pa)
Alex Tomasc9de5602008-01-29 00:19:52 -05003822{
Alex Tomasc9de5602008-01-29 00:19:52 -05003823 struct super_block *sb = e4b->bd_sb;
3824 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003825 unsigned int end;
3826 unsigned int next;
Alex Tomasc9de5602008-01-29 00:19:52 -05003827 ext4_group_t group;
3828 ext4_grpblk_t bit;
Theodore Ts'oba80b102009-01-03 20:03:21 -05003829 unsigned long long grp_blk_start;
Alex Tomasc9de5602008-01-29 00:19:52 -05003830 int err = 0;
3831 int free = 0;
3832
3833 BUG_ON(pa->pa_deleted == 0);
3834 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003835 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003836 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3837 end = bit + pa->pa_len;
3838
Alex Tomasc9de5602008-01-29 00:19:52 -05003839 while (bit < end) {
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05003840 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003841 if (bit >= end)
3842 break;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05003843 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003844 mb_debug(1, " free preallocated %u/%u in group %u\n",
Andi Kleen5a0790c2010-06-14 13:28:03 -04003845 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3846 (unsigned) next - bit, (unsigned) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003847 free += next - bit;
3848
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003849 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003850 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3851 EXT4_C2B(sbi, bit)),
Lukas Czernera9c667f2011-06-06 09:51:52 -04003852 next - bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003853 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3854 bit = next + 1;
3855 }
3856 if (free != pa->pa_free) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003857 ext4_msg(e4b->bd_sb, KERN_CRIT,
3858 "pa %p: logic %lu, phys. %lu, len %lu",
3859 pa, (unsigned long) pa->pa_lstart,
3860 (unsigned long) pa->pa_pstart,
3861 (unsigned long) pa->pa_len);
Theodore Ts'oe29136f2010-06-29 12:54:28 -04003862 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -05003863 free, pa->pa_free);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05003864 /*
3865 * pa is already deleted so we use the value obtained
3866 * from the bitmap and continue.
3867 */
Alex Tomasc9de5602008-01-29 00:19:52 -05003868 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003869 atomic_add(free, &sbi->s_mb_discarded);
3870
3871 return err;
3872}
3873
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003874static noinline_for_stack int
3875ext4_mb_release_group_pa(struct ext4_buddy *e4b,
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003876 struct ext4_prealloc_space *pa)
Alex Tomasc9de5602008-01-29 00:19:52 -05003877{
Alex Tomasc9de5602008-01-29 00:19:52 -05003878 struct super_block *sb = e4b->bd_sb;
3879 ext4_group_t group;
3880 ext4_grpblk_t bit;
3881
Yongqiang Yang60e07cf2011-12-18 15:49:54 -05003882 trace_ext4_mb_release_group_pa(sb, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003883 BUG_ON(pa->pa_deleted == 0);
3884 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3885 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3886 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3887 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003888 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003889
3890 return 0;
3891}
3892
3893/*
3894 * releases all preallocations in given group
3895 *
3896 * first, we need to decide discard policy:
3897 * - when do we discard
3898 * 1) ENOSPC
3899 * - how many do we discard
3900 * 1) how many requested
3901 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003902static noinline_for_stack int
3903ext4_mb_discard_group_preallocations(struct super_block *sb,
Alex Tomasc9de5602008-01-29 00:19:52 -05003904 ext4_group_t group, int needed)
3905{
3906 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3907 struct buffer_head *bitmap_bh = NULL;
3908 struct ext4_prealloc_space *pa, *tmp;
3909 struct list_head list;
3910 struct ext4_buddy e4b;
3911 int err;
3912 int busy = 0;
3913 int free = 0;
3914
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003915 mb_debug(1, "discard preallocation for group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003916
3917 if (list_empty(&grp->bb_prealloc_list))
3918 return 0;
3919
Theodore Ts'o574ca172008-07-11 19:27:31 -04003920 bitmap_bh = ext4_read_block_bitmap(sb, group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04003921 if (IS_ERR(bitmap_bh)) {
3922 err = PTR_ERR(bitmap_bh);
3923 ext4_error(sb, "Error %d reading block bitmap for %u",
3924 err, group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003925 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003926 }
3927
3928 err = ext4_mb_load_buddy(sb, group, &e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003929 if (err) {
Konstantin Khlebnikov9651e6b2017-05-21 22:35:23 -04003930 ext4_warning(sb, "Error %d loading buddy information for %u",
3931 err, group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003932 put_bh(bitmap_bh);
3933 return 0;
3934 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003935
3936 if (needed == 0)
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04003937 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05003938
Alex Tomasc9de5602008-01-29 00:19:52 -05003939 INIT_LIST_HEAD(&list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003940repeat:
3941 ext4_lock_group(sb, group);
3942 list_for_each_entry_safe(pa, tmp,
3943 &grp->bb_prealloc_list, pa_group_list) {
3944 spin_lock(&pa->pa_lock);
3945 if (atomic_read(&pa->pa_count)) {
3946 spin_unlock(&pa->pa_lock);
3947 busy = 1;
3948 continue;
3949 }
3950 if (pa->pa_deleted) {
3951 spin_unlock(&pa->pa_lock);
3952 continue;
3953 }
3954
3955 /* seems this one can be freed ... */
3956 pa->pa_deleted = 1;
3957
3958 /* we can trust pa_free ... */
3959 free += pa->pa_free;
3960
3961 spin_unlock(&pa->pa_lock);
3962
3963 list_del(&pa->pa_group_list);
3964 list_add(&pa->u.pa_tmp_list, &list);
3965 }
3966
3967 /* if we still need more blocks and some PAs were used, try again */
3968 if (free < needed && busy) {
3969 busy = 0;
3970 ext4_unlock_group(sb, group);
Lukas Czernerbb8b20e2013-03-10 22:28:09 -04003971 cond_resched();
Alex Tomasc9de5602008-01-29 00:19:52 -05003972 goto repeat;
3973 }
3974
3975 /* found anything to free? */
3976 if (list_empty(&list)) {
3977 BUG_ON(free != 0);
3978 goto out;
3979 }
3980
3981 /* now free all selected PAs */
3982 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3983
3984 /* remove from object (inode or locality group) */
3985 spin_lock(pa->pa_obj_lock);
3986 list_del_rcu(&pa->pa_inode_list);
3987 spin_unlock(pa->pa_obj_lock);
3988
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003989 if (pa->pa_type == MB_GROUP_PA)
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003990 ext4_mb_release_group_pa(&e4b, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003991 else
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003992 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003993
3994 list_del(&pa->u.pa_tmp_list);
3995 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3996 }
3997
3998out:
3999 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04004000 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05004001 put_bh(bitmap_bh);
4002 return free;
4003}
4004
4005/*
4006 * releases all non-used preallocated blocks for given inode
4007 *
4008 * It's important to discard preallocations under i_data_sem
4009 * We don't want another block to be served from the prealloc
4010 * space when we are discarding the inode prealloc space.
4011 *
4012 * FIXME!! Make sure it is valid at all the call sites
4013 */
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04004014void ext4_discard_preallocations(struct inode *inode)
Alex Tomasc9de5602008-01-29 00:19:52 -05004015{
4016 struct ext4_inode_info *ei = EXT4_I(inode);
4017 struct super_block *sb = inode->i_sb;
4018 struct buffer_head *bitmap_bh = NULL;
4019 struct ext4_prealloc_space *pa, *tmp;
4020 ext4_group_t group = 0;
4021 struct list_head list;
4022 struct ext4_buddy e4b;
4023 int err;
4024
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04004025 if (!S_ISREG(inode->i_mode)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004026 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
4027 return;
4028 }
4029
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004030 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004031 trace_ext4_discard_preallocations(inode);
Alex Tomasc9de5602008-01-29 00:19:52 -05004032
4033 INIT_LIST_HEAD(&list);
4034
4035repeat:
4036 /* first, collect all pa's in the inode */
4037 spin_lock(&ei->i_prealloc_lock);
4038 while (!list_empty(&ei->i_prealloc_list)) {
4039 pa = list_entry(ei->i_prealloc_list.next,
4040 struct ext4_prealloc_space, pa_inode_list);
4041 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
4042 spin_lock(&pa->pa_lock);
4043 if (atomic_read(&pa->pa_count)) {
4044 /* this shouldn't happen often - nobody should
4045 * use preallocation while we're discarding it */
4046 spin_unlock(&pa->pa_lock);
4047 spin_unlock(&ei->i_prealloc_lock);
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04004048 ext4_msg(sb, KERN_ERR,
4049 "uh-oh! used pa while discarding");
Alex Tomasc9de5602008-01-29 00:19:52 -05004050 WARN_ON(1);
4051 schedule_timeout_uninterruptible(HZ);
4052 goto repeat;
4053
4054 }
4055 if (pa->pa_deleted == 0) {
4056 pa->pa_deleted = 1;
4057 spin_unlock(&pa->pa_lock);
4058 list_del_rcu(&pa->pa_inode_list);
4059 list_add(&pa->u.pa_tmp_list, &list);
4060 continue;
4061 }
4062
4063 /* someone is deleting pa right now */
4064 spin_unlock(&pa->pa_lock);
4065 spin_unlock(&ei->i_prealloc_lock);
4066
4067 /* we have to wait here because pa_deleted
4068 * doesn't mean pa is already unlinked from
4069 * the list. as we might be called from
4070 * ->clear_inode() the inode will get freed
4071 * and concurrent thread which is unlinking
4072 * pa from inode's list may access already
4073 * freed memory, bad-bad-bad */
4074
4075 /* XXX: if this happens too often, we can
4076 * add a flag to force wait only in case
4077 * of ->clear_inode(), but not in case of
4078 * regular truncate */
4079 schedule_timeout_uninterruptible(HZ);
4080 goto repeat;
4081 }
4082 spin_unlock(&ei->i_prealloc_lock);
4083
4084 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004085 BUG_ON(pa->pa_type != MB_INODE_PA);
Lukas Czernerbd862982013-04-03 23:32:34 -04004086 group = ext4_get_group_number(sb, pa->pa_pstart);
Alex Tomasc9de5602008-01-29 00:19:52 -05004087
Konstantin Khlebnikov9651e6b2017-05-21 22:35:23 -04004088 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4089 GFP_NOFS|__GFP_NOFAIL);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004090 if (err) {
Konstantin Khlebnikov9651e6b2017-05-21 22:35:23 -04004091 ext4_error(sb, "Error %d loading buddy information for %u",
4092 err, group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004093 continue;
4094 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004095
Theodore Ts'o574ca172008-07-11 19:27:31 -04004096 bitmap_bh = ext4_read_block_bitmap(sb, group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04004097 if (IS_ERR(bitmap_bh)) {
4098 err = PTR_ERR(bitmap_bh);
4099 ext4_error(sb, "Error %d reading block bitmap for %u",
4100 err, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04004101 ext4_mb_unload_buddy(&e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004102 continue;
Alex Tomasc9de5602008-01-29 00:19:52 -05004103 }
4104
4105 ext4_lock_group(sb, group);
4106 list_del(&pa->pa_group_list);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04004107 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05004108 ext4_unlock_group(sb, group);
4109
Jing Zhange39e07f2010-05-14 00:00:00 -04004110 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05004111 put_bh(bitmap_bh);
4112
4113 list_del(&pa->u.pa_tmp_list);
4114 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4115 }
4116}
4117
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004118#ifdef CONFIG_EXT4_DEBUG
Alex Tomasc9de5602008-01-29 00:19:52 -05004119static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4120{
4121 struct super_block *sb = ac->ac_sb;
Theodore Ts'o8df96752009-05-01 08:50:38 -04004122 ext4_group_t ngroups, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05004123
Theodore Ts'oa0b30c12013-02-09 16:28:20 -05004124 if (!ext4_mballoc_debug ||
Theodore Ts'o4dd89fc2011-02-27 17:23:47 -05004125 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
Eric Sandeene3570632010-07-27 11:56:08 -04004126 return;
4127
Joe Perches7f6a11e2012-03-19 23:09:43 -04004128 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04004129 " Allocation context details:");
Joe Perches7f6a11e2012-03-19 23:09:43 -04004130 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
Alex Tomasc9de5602008-01-29 00:19:52 -05004131 ac->ac_status, ac->ac_flags);
Joe Perches7f6a11e2012-03-19 23:09:43 -04004132 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04004133 "goal %lu/%lu/%lu@%lu, "
4134 "best %lu/%lu/%lu@%lu cr %d",
Alex Tomasc9de5602008-01-29 00:19:52 -05004135 (unsigned long)ac->ac_o_ex.fe_group,
4136 (unsigned long)ac->ac_o_ex.fe_start,
4137 (unsigned long)ac->ac_o_ex.fe_len,
4138 (unsigned long)ac->ac_o_ex.fe_logical,
4139 (unsigned long)ac->ac_g_ex.fe_group,
4140 (unsigned long)ac->ac_g_ex.fe_start,
4141 (unsigned long)ac->ac_g_ex.fe_len,
4142 (unsigned long)ac->ac_g_ex.fe_logical,
4143 (unsigned long)ac->ac_b_ex.fe_group,
4144 (unsigned long)ac->ac_b_ex.fe_start,
4145 (unsigned long)ac->ac_b_ex.fe_len,
4146 (unsigned long)ac->ac_b_ex.fe_logical,
4147 (int)ac->ac_criteria);
Eric Sandeendc9ddd92014-02-20 13:32:10 -05004148 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
Joe Perches7f6a11e2012-03-19 23:09:43 -04004149 ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
Theodore Ts'o8df96752009-05-01 08:50:38 -04004150 ngroups = ext4_get_groups_count(sb);
4151 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004152 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4153 struct ext4_prealloc_space *pa;
4154 ext4_grpblk_t start;
4155 struct list_head *cur;
4156 ext4_lock_group(sb, i);
4157 list_for_each(cur, &grp->bb_prealloc_list) {
4158 pa = list_entry(cur, struct ext4_prealloc_space,
4159 pa_group_list);
4160 spin_lock(&pa->pa_lock);
4161 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4162 NULL, &start);
4163 spin_unlock(&pa->pa_lock);
Akira Fujita1c718502009-07-05 23:04:36 -04004164 printk(KERN_ERR "PA:%u:%d:%u \n", i,
4165 start, pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05004166 }
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -04004167 ext4_unlock_group(sb, i);
Alex Tomasc9de5602008-01-29 00:19:52 -05004168
4169 if (grp->bb_free == 0)
4170 continue;
Akira Fujita1c718502009-07-05 23:04:36 -04004171 printk(KERN_ERR "%u: %d/%d \n",
Alex Tomasc9de5602008-01-29 00:19:52 -05004172 i, grp->bb_free, grp->bb_fragments);
4173 }
4174 printk(KERN_ERR "\n");
4175}
4176#else
4177static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4178{
4179 return;
4180}
4181#endif
4182
4183/*
4184 * We use locality group preallocation for small size file. The size of the
4185 * file is determined by the current size or the resulting size after
4186 * allocation which ever is larger
4187 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04004188 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
Alex Tomasc9de5602008-01-29 00:19:52 -05004189 */
4190static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4191{
4192 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4193 int bsbits = ac->ac_sb->s_blocksize_bits;
4194 loff_t size, isize;
4195
4196 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4197 return;
4198
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04004199 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4200 return;
4201
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004202 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
Theodore Ts'o50797482009-09-18 13:34:02 -04004203 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4204 >> bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05004205
Theodore Ts'o50797482009-09-18 13:34:02 -04004206 if ((size == isize) &&
4207 !ext4_fs_is_busy(sbi) &&
4208 (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
4209 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4210 return;
4211 }
4212
Robin Dongebbe0272011-10-26 05:14:27 -04004213 if (sbi->s_mb_group_prealloc <= 0) {
4214 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4215 return;
4216 }
4217
Alex Tomasc9de5602008-01-29 00:19:52 -05004218 /* don't use group allocation for large files */
Theodore Ts'o71780572009-09-28 00:06:20 -04004219 size = max(size, isize);
Tao Macc483f12010-03-01 19:06:35 -05004220 if (size > sbi->s_mb_stream_request) {
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04004221 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
Alex Tomasc9de5602008-01-29 00:19:52 -05004222 return;
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04004223 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004224
4225 BUG_ON(ac->ac_lg != NULL);
4226 /*
4227 * locality group prealloc space are per cpu. The reason for having
4228 * per cpu locality group is to reduce the contention between block
4229 * request from multiple CPUs.
4230 */
Christoph Lametera0b6bc62014-08-17 12:30:28 -05004231 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
Alex Tomasc9de5602008-01-29 00:19:52 -05004232
4233 /* we're going to use group allocation */
4234 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4235
4236 /* serialize all allocations in the group */
4237 mutex_lock(&ac->ac_lg->lg_mutex);
4238}
4239
Eric Sandeen4ddfef72008-04-29 08:11:12 -04004240static noinline_for_stack int
4241ext4_mb_initialize_context(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05004242 struct ext4_allocation_request *ar)
4243{
4244 struct super_block *sb = ar->inode->i_sb;
4245 struct ext4_sb_info *sbi = EXT4_SB(sb);
4246 struct ext4_super_block *es = sbi->s_es;
4247 ext4_group_t group;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05004248 unsigned int len;
4249 ext4_fsblk_t goal;
Alex Tomasc9de5602008-01-29 00:19:52 -05004250 ext4_grpblk_t block;
4251
4252 /* we can't allocate > group size */
4253 len = ar->len;
4254
4255 /* just a dirty hack to filter too big requests */
Theodore Ts'o40ae3482013-02-04 15:08:40 -05004256 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4257 len = EXT4_CLUSTERS_PER_GROUP(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004258
4259 /* start searching from the goal */
4260 goal = ar->goal;
4261 if (goal < le32_to_cpu(es->s_first_data_block) ||
4262 goal >= ext4_blocks_count(es))
4263 goal = le32_to_cpu(es->s_first_data_block);
4264 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4265
4266 /* set up allocation goals */
Theodore Ts'of5a44db2013-12-20 09:29:35 -05004267 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
Alex Tomasc9de5602008-01-29 00:19:52 -05004268 ac->ac_status = AC_STATUS_CONTINUE;
Alex Tomasc9de5602008-01-29 00:19:52 -05004269 ac->ac_sb = sb;
4270 ac->ac_inode = ar->inode;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004271 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
Alex Tomasc9de5602008-01-29 00:19:52 -05004272 ac->ac_o_ex.fe_group = group;
4273 ac->ac_o_ex.fe_start = block;
4274 ac->ac_o_ex.fe_len = len;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004275 ac->ac_g_ex = ac->ac_o_ex;
Alex Tomasc9de5602008-01-29 00:19:52 -05004276 ac->ac_flags = ar->flags;
Alex Tomasc9de5602008-01-29 00:19:52 -05004277
4278 /* we have to define context: we'll we work with a file or
4279 * locality group. this is a policy, actually */
4280 ext4_mb_group_or_file(ac);
4281
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004282 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
Alex Tomasc9de5602008-01-29 00:19:52 -05004283 "left: %u/%u, right %u/%u to %swritable\n",
4284 (unsigned) ar->len, (unsigned) ar->logical,
4285 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4286 (unsigned) ar->lleft, (unsigned) ar->pleft,
4287 (unsigned) ar->lright, (unsigned) ar->pright,
4288 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4289 return 0;
4290
4291}
4292
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004293static noinline_for_stack void
4294ext4_mb_discard_lg_preallocations(struct super_block *sb,
4295 struct ext4_locality_group *lg,
4296 int order, int total_entries)
4297{
4298 ext4_group_t group = 0;
4299 struct ext4_buddy e4b;
4300 struct list_head discard_list;
4301 struct ext4_prealloc_space *pa, *tmp;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004302
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004303 mb_debug(1, "discard locality group preallocation\n");
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004304
4305 INIT_LIST_HEAD(&discard_list);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004306
4307 spin_lock(&lg->lg_prealloc_lock);
4308 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4309 pa_inode_list) {
4310 spin_lock(&pa->pa_lock);
4311 if (atomic_read(&pa->pa_count)) {
4312 /*
4313 * This is the pa that we just used
4314 * for block allocation. So don't
4315 * free that
4316 */
4317 spin_unlock(&pa->pa_lock);
4318 continue;
4319 }
4320 if (pa->pa_deleted) {
4321 spin_unlock(&pa->pa_lock);
4322 continue;
4323 }
4324 /* only lg prealloc space */
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004325 BUG_ON(pa->pa_type != MB_GROUP_PA);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004326
4327 /* seems this one can be freed ... */
4328 pa->pa_deleted = 1;
4329 spin_unlock(&pa->pa_lock);
4330
4331 list_del_rcu(&pa->pa_inode_list);
4332 list_add(&pa->u.pa_tmp_list, &discard_list);
4333
4334 total_entries--;
4335 if (total_entries <= 5) {
4336 /*
4337 * we want to keep only 5 entries
4338 * allowing it to grow to 8. This
4339 * mak sure we don't call discard
4340 * soon for this list.
4341 */
4342 break;
4343 }
4344 }
4345 spin_unlock(&lg->lg_prealloc_lock);
4346
4347 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
Konstantin Khlebnikov9651e6b2017-05-21 22:35:23 -04004348 int err;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004349
Lukas Czernerbd862982013-04-03 23:32:34 -04004350 group = ext4_get_group_number(sb, pa->pa_pstart);
Konstantin Khlebnikov9651e6b2017-05-21 22:35:23 -04004351 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4352 GFP_NOFS|__GFP_NOFAIL);
4353 if (err) {
4354 ext4_error(sb, "Error %d loading buddy information for %u",
4355 err, group);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004356 continue;
4357 }
4358 ext4_lock_group(sb, group);
4359 list_del(&pa->pa_group_list);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04004360 ext4_mb_release_group_pa(&e4b, pa);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004361 ext4_unlock_group(sb, group);
4362
Jing Zhange39e07f2010-05-14 00:00:00 -04004363 ext4_mb_unload_buddy(&e4b);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004364 list_del(&pa->u.pa_tmp_list);
4365 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4366 }
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004367}
4368
4369/*
4370 * We have incremented pa_count. So it cannot be freed at this
4371 * point. Also we hold lg_mutex. So no parallel allocation is
4372 * possible from this lg. That means pa_free cannot be updated.
4373 *
4374 * A parallel ext4_mb_discard_group_preallocations is possible.
4375 * which can cause the lg_prealloc_list to be updated.
4376 */
4377
4378static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4379{
4380 int order, added = 0, lg_prealloc_count = 1;
4381 struct super_block *sb = ac->ac_sb;
4382 struct ext4_locality_group *lg = ac->ac_lg;
4383 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4384
4385 order = fls(pa->pa_free) - 1;
4386 if (order > PREALLOC_TB_SIZE - 1)
4387 /* The max size of hash table is PREALLOC_TB_SIZE */
4388 order = PREALLOC_TB_SIZE - 1;
4389 /* Add the prealloc space to lg */
Niu Yaweif1167002013-02-01 21:31:27 -05004390 spin_lock(&lg->lg_prealloc_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004391 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4392 pa_inode_list) {
4393 spin_lock(&tmp_pa->pa_lock);
4394 if (tmp_pa->pa_deleted) {
Theodore Ts'oe7c9e3e2009-03-27 19:43:21 -04004395 spin_unlock(&tmp_pa->pa_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004396 continue;
4397 }
4398 if (!added && pa->pa_free < tmp_pa->pa_free) {
4399 /* Add to the tail of the previous entry */
4400 list_add_tail_rcu(&pa->pa_inode_list,
4401 &tmp_pa->pa_inode_list);
4402 added = 1;
4403 /*
4404 * we want to count the total
4405 * number of entries in the list
4406 */
4407 }
4408 spin_unlock(&tmp_pa->pa_lock);
4409 lg_prealloc_count++;
4410 }
4411 if (!added)
4412 list_add_tail_rcu(&pa->pa_inode_list,
4413 &lg->lg_prealloc_list[order]);
Niu Yaweif1167002013-02-01 21:31:27 -05004414 spin_unlock(&lg->lg_prealloc_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004415
4416 /* Now trim the list to be not more than 8 elements */
4417 if (lg_prealloc_count > 8) {
4418 ext4_mb_discard_lg_preallocations(sb, lg,
Niu Yaweif1167002013-02-01 21:31:27 -05004419 order, lg_prealloc_count);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004420 return;
4421 }
4422 return ;
4423}
4424
Alex Tomasc9de5602008-01-29 00:19:52 -05004425/*
4426 * release all resource we used in allocation
4427 */
4428static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4429{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004430 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004431 struct ext4_prealloc_space *pa = ac->ac_pa;
4432 if (pa) {
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004433 if (pa->pa_type == MB_GROUP_PA) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004434 /* see comment in ext4_mb_use_group_pa() */
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004435 spin_lock(&pa->pa_lock);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004436 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4437 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004438 pa->pa_free -= ac->ac_b_ex.fe_len;
4439 pa->pa_len -= ac->ac_b_ex.fe_len;
4440 spin_unlock(&pa->pa_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05004441 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004442 }
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004443 if (pa) {
4444 /*
4445 * We want to add the pa to the right bucket.
4446 * Remove it from the list and while adding
4447 * make sure the list to which we are adding
Amir Goldstein44183d42011-05-09 21:52:36 -04004448 * doesn't grow big.
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004449 */
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004450 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004451 spin_lock(pa->pa_obj_lock);
4452 list_del_rcu(&pa->pa_inode_list);
4453 spin_unlock(pa->pa_obj_lock);
4454 ext4_mb_add_n_trim(ac);
4455 }
4456 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4457 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004458 if (ac->ac_bitmap_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004459 put_page(ac->ac_bitmap_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05004460 if (ac->ac_buddy_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004461 put_page(ac->ac_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05004462 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4463 mutex_unlock(&ac->ac_lg->lg_mutex);
4464 ext4_mb_collect_stats(ac);
4465 return 0;
4466}
4467
4468static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4469{
Theodore Ts'o8df96752009-05-01 08:50:38 -04004470 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004471 int ret;
4472 int freed = 0;
4473
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004474 trace_ext4_mb_discard_preallocations(sb, needed);
Theodore Ts'o8df96752009-05-01 08:50:38 -04004475 for (i = 0; i < ngroups && needed > 0; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004476 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4477 freed += ret;
4478 needed -= ret;
4479 }
4480
4481 return freed;
4482}
4483
4484/*
4485 * Main entry point into mballoc to allocate blocks
4486 * it tries to use preallocation first, then falls back
4487 * to usual allocation
4488 */
4489ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
Aditya Kali6c7a1202010-08-05 16:22:24 -04004490 struct ext4_allocation_request *ar, int *errp)
Alex Tomasc9de5602008-01-29 00:19:52 -05004491{
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04004492 int freed;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004493 struct ext4_allocation_context *ac = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004494 struct ext4_sb_info *sbi;
4495 struct super_block *sb;
4496 ext4_fsblk_t block = 0;
Mingming Cao60e58e02009-01-22 18:13:05 +01004497 unsigned int inquota = 0;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004498 unsigned int reserv_clstrs = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004499
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04004500 might_sleep();
Alex Tomasc9de5602008-01-29 00:19:52 -05004501 sb = ar->inode->i_sb;
4502 sbi = EXT4_SB(sb);
4503
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004504 trace_ext4_request_blocks(ar);
Theodore Ts'oba80b102009-01-03 20:03:21 -05004505
Dmitry Monakhov45dc63e2011-10-20 20:07:23 -04004506 /* Allow to use superuser reservation for quota file */
Tahsin Erdogan02749a42017-06-22 11:31:25 -04004507 if (ext4_is_quota_file(ar->inode))
Dmitry Monakhov45dc63e2011-10-20 20:07:23 -04004508 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4509
Theodore Ts'oe3cf5d52014-09-04 18:07:25 -04004510 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
Mingming Cao60e58e02009-01-22 18:13:05 +01004511 /* Without delayed allocation we need to verify
4512 * there is enough free blocks to do block allocation
4513 * and verify allocation doesn't exceed the quota limits.
Mingming Caod2a17632008-07-14 17:52:37 -04004514 */
Allison Henderson55f020d2011-05-25 07:41:26 -04004515 while (ar->len &&
Theodore Ts'oe7d5f312011-09-09 19:14:51 -04004516 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
Allison Henderson55f020d2011-05-25 07:41:26 -04004517
Aneesh Kumar K.V030ba6b2008-09-08 23:14:50 -04004518 /* let others to free the space */
Lukas Czernerbb8b20e2013-03-10 22:28:09 -04004519 cond_resched();
Aneesh Kumar K.V030ba6b2008-09-08 23:14:50 -04004520 ar->len = ar->len >> 1;
4521 }
4522 if (!ar->len) {
Aneesh Kumar K.Va30d542a2008-10-09 10:56:23 -04004523 *errp = -ENOSPC;
4524 return 0;
4525 }
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004526 reserv_clstrs = ar->len;
Allison Henderson55f020d2011-05-25 07:41:26 -04004527 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004528 dquot_alloc_block_nofail(ar->inode,
4529 EXT4_C2B(sbi, ar->len));
Allison Henderson55f020d2011-05-25 07:41:26 -04004530 } else {
4531 while (ar->len &&
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004532 dquot_alloc_block(ar->inode,
4533 EXT4_C2B(sbi, ar->len))) {
Allison Henderson55f020d2011-05-25 07:41:26 -04004534
4535 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4536 ar->len--;
4537 }
Mingming Cao60e58e02009-01-22 18:13:05 +01004538 }
4539 inquota = ar->len;
4540 if (ar->len == 0) {
4541 *errp = -EDQUOT;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004542 goto out;
Mingming Cao60e58e02009-01-22 18:13:05 +01004543 }
Mingming Caod2a17632008-07-14 17:52:37 -04004544 }
Mingming Caod2a17632008-07-14 17:52:37 -04004545
Wei Yongjun85556c92012-09-26 20:43:37 -04004546 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
Theodore Ts'o833576b2009-07-13 09:45:52 -04004547 if (!ac) {
Shen Feng363d4252008-07-11 19:27:31 -04004548 ar->len = 0;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004549 *errp = -ENOMEM;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004550 goto out;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004551 }
4552
Eric Sandeen256bdb42008-02-10 01:13:33 -05004553 *errp = ext4_mb_initialize_context(ac, ar);
Alex Tomasc9de5602008-01-29 00:19:52 -05004554 if (*errp) {
4555 ar->len = 0;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004556 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05004557 }
4558
Eric Sandeen256bdb42008-02-10 01:13:33 -05004559 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4560 if (!ext4_mb_use_preallocated(ac)) {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004561 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4562 ext4_mb_normalize_request(ac, ar);
Alex Tomasc9de5602008-01-29 00:19:52 -05004563repeat:
4564 /* allocate space in core */
Aditya Kali6c7a1202010-08-05 16:22:24 -04004565 *errp = ext4_mb_regular_allocator(ac);
Alexey Khoroshilov2c00ef32013-07-01 08:12:36 -04004566 if (*errp)
4567 goto discard_and_exit;
4568
4569 /* as we've just preallocated more space than
4570 * user requested originally, we store allocated
4571 * space in a special descriptor */
4572 if (ac->ac_status == AC_STATUS_FOUND &&
4573 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4574 *errp = ext4_mb_new_preallocation(ac);
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004575 if (*errp) {
Alexey Khoroshilov2c00ef32013-07-01 08:12:36 -04004576 discard_and_exit:
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004577 ext4_discard_allocated_blocks(ac);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004578 goto errout;
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004579 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004580 }
Eric Sandeen256bdb42008-02-10 01:13:33 -05004581 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004582 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
Vegard Nossum554a5cc2016-07-14 23:02:47 -04004583 if (*errp) {
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05004584 ext4_discard_allocated_blocks(ac);
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004585 goto errout;
4586 } else {
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04004587 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4588 ar->len = ac->ac_b_ex.fe_len;
4589 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004590 } else {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004591 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05004592 if (freed)
4593 goto repeat;
4594 *errp = -ENOSPC;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004595 }
4596
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004597errout:
Aditya Kali6c7a1202010-08-05 16:22:24 -04004598 if (*errp) {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004599 ac->ac_b_ex.fe_len = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004600 ar->len = 0;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004601 ext4_mb_show_ac(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05004602 }
Eric Sandeen256bdb42008-02-10 01:13:33 -05004603 ext4_mb_release_context(ac);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004604out:
4605 if (ac)
4606 kmem_cache_free(ext4_ac_cachep, ac);
Mingming Cao60e58e02009-01-22 18:13:05 +01004607 if (inquota && ar->len < inquota)
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004608 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004609 if (!ar->len) {
Theodore Ts'oe3cf5d52014-09-04 18:07:25 -04004610 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004611 /* release all the reserved blocks if non delalloc */
Theodore Ts'o57042652011-09-09 18:56:51 -04004612 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004613 reserv_clstrs);
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004614 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004615
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004616 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
Theodore Ts'oba80b102009-01-03 20:03:21 -05004617
Alex Tomasc9de5602008-01-29 00:19:52 -05004618 return block;
4619}
Alex Tomasc9de5602008-01-29 00:19:52 -05004620
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004621/*
4622 * We can merge two free data extents only if the physical blocks
4623 * are contiguous, AND the extents were freed by the same transaction,
4624 * AND the blocks are associated with the same group.
4625 */
Daeho Jeonga0154342017-06-22 23:54:33 -04004626static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
4627 struct ext4_free_data *entry,
4628 struct ext4_free_data *new_entry,
4629 struct rb_root *entry_rb_root)
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004630{
Daeho Jeonga0154342017-06-22 23:54:33 -04004631 if ((entry->efd_tid != new_entry->efd_tid) ||
4632 (entry->efd_group != new_entry->efd_group))
4633 return;
4634 if (entry->efd_start_cluster + entry->efd_count ==
4635 new_entry->efd_start_cluster) {
4636 new_entry->efd_start_cluster = entry->efd_start_cluster;
4637 new_entry->efd_count += entry->efd_count;
4638 } else if (new_entry->efd_start_cluster + new_entry->efd_count ==
4639 entry->efd_start_cluster) {
4640 new_entry->efd_count += entry->efd_count;
4641 } else
4642 return;
4643 spin_lock(&sbi->s_md_lock);
4644 list_del(&entry->efd_list);
4645 spin_unlock(&sbi->s_md_lock);
4646 rb_erase(&entry->efd_node, entry_rb_root);
4647 kmem_cache_free(ext4_free_data_cachep, entry);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004648}
4649
Eric Sandeen4ddfef72008-04-29 08:11:12 -04004650static noinline_for_stack int
4651ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004652 struct ext4_free_data *new_entry)
Alex Tomasc9de5602008-01-29 00:19:52 -05004653{
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004654 ext4_group_t group = e4b->bd_group;
Theodore Ts'o84130192011-09-09 18:50:51 -04004655 ext4_grpblk_t cluster;
Theodore Ts'od08854f2016-06-26 18:24:01 -04004656 ext4_grpblk_t clusters = new_entry->efd_count;
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004657 struct ext4_free_data *entry;
Alex Tomasc9de5602008-01-29 00:19:52 -05004658 struct ext4_group_info *db = e4b->bd_info;
4659 struct super_block *sb = e4b->bd_sb;
4660 struct ext4_sb_info *sbi = EXT4_SB(sb);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004661 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4662 struct rb_node *parent = NULL, *new_node;
4663
Frank Mayhar03901312009-01-07 00:06:22 -05004664 BUG_ON(!ext4_handle_valid(handle));
Alex Tomasc9de5602008-01-29 00:19:52 -05004665 BUG_ON(e4b->bd_bitmap_page == NULL);
4666 BUG_ON(e4b->bd_buddy_page == NULL);
4667
Bobi Jam18aadd42012-02-20 17:53:02 -05004668 new_node = &new_entry->efd_node;
4669 cluster = new_entry->efd_start_cluster;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004670
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004671 if (!*n) {
4672 /* first free block exent. We need to
4673 protect buddy cache from being freed,
4674 * otherwise we'll refresh it from
4675 * on-disk bitmap and lose not-yet-available
4676 * blocks */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004677 get_page(e4b->bd_buddy_page);
4678 get_page(e4b->bd_bitmap_page);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004679 }
4680 while (*n) {
4681 parent = *n;
Bobi Jam18aadd42012-02-20 17:53:02 -05004682 entry = rb_entry(parent, struct ext4_free_data, efd_node);
4683 if (cluster < entry->efd_start_cluster)
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004684 n = &(*n)->rb_left;
Bobi Jam18aadd42012-02-20 17:53:02 -05004685 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004686 n = &(*n)->rb_right;
4687 else {
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004688 ext4_grp_locked_error(sb, group, 0,
Theodore Ts'o84130192011-09-09 18:50:51 -04004689 ext4_group_first_block_no(sb, group) +
4690 EXT4_C2B(sbi, cluster),
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004691 "Block already on to-be-freed list");
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004692 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004693 }
4694 }
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004695
4696 rb_link_node(new_node, parent, n);
4697 rb_insert_color(new_node, &db->bb_free_root);
4698
4699 /* Now try to see the extent can be merged to left and right */
4700 node = rb_prev(new_node);
4701 if (node) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004702 entry = rb_entry(node, struct ext4_free_data, efd_node);
Daeho Jeonga0154342017-06-22 23:54:33 -04004703 ext4_try_merge_freed_extent(sbi, entry, new_entry,
4704 &(db->bb_free_root));
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004705 }
4706
4707 node = rb_next(new_node);
4708 if (node) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004709 entry = rb_entry(node, struct ext4_free_data, efd_node);
Daeho Jeonga0154342017-06-22 23:54:33 -04004710 ext4_try_merge_freed_extent(sbi, entry, new_entry,
4711 &(db->bb_free_root));
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004712 }
Daeho Jeonga0154342017-06-22 23:54:33 -04004713
Theodore Ts'od08854f2016-06-26 18:24:01 -04004714 spin_lock(&sbi->s_md_lock);
Daeho Jeonga0154342017-06-22 23:54:33 -04004715 list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
Theodore Ts'od08854f2016-06-26 18:24:01 -04004716 sbi->s_mb_free_pending += clusters;
4717 spin_unlock(&sbi->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05004718 return 0;
4719}
4720
Theodore Ts'o44338712009-11-22 07:44:56 -05004721/**
4722 * ext4_free_blocks() -- Free given blocks and update quota
4723 * @handle: handle for this transaction
4724 * @inode: inode
4725 * @block: start physical block to free
4726 * @count: number of blocks to count
Yongqiang Yang5def1362011-06-05 23:26:40 -04004727 * @flags: flags used by ext4_free_blocks
Alex Tomasc9de5602008-01-29 00:19:52 -05004728 */
Theodore Ts'o44338712009-11-22 07:44:56 -05004729void ext4_free_blocks(handle_t *handle, struct inode *inode,
Theodore Ts'oe6362602009-11-23 07:17:05 -05004730 struct buffer_head *bh, ext4_fsblk_t block,
4731 unsigned long count, int flags)
Alex Tomasc9de5602008-01-29 00:19:52 -05004732{
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05004733 struct buffer_head *bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004734 struct super_block *sb = inode->i_sb;
Alex Tomasc9de5602008-01-29 00:19:52 -05004735 struct ext4_group_desc *gdp;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05004736 unsigned int overflow;
Alex Tomasc9de5602008-01-29 00:19:52 -05004737 ext4_grpblk_t bit;
4738 struct buffer_head *gd_bh;
4739 ext4_group_t block_group;
4740 struct ext4_sb_info *sbi;
4741 struct ext4_buddy e4b;
Theodore Ts'o84130192011-09-09 18:50:51 -04004742 unsigned int count_clusters;
Alex Tomasc9de5602008-01-29 00:19:52 -05004743 int err = 0;
4744 int ret;
4745
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04004746 might_sleep();
Theodore Ts'oe6362602009-11-23 07:17:05 -05004747 if (bh) {
4748 if (block)
4749 BUG_ON(block != bh->b_blocknr);
4750 else
4751 block = bh->b_blocknr;
4752 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004753
Alex Tomasc9de5602008-01-29 00:19:52 -05004754 sbi = EXT4_SB(sb);
Theodore Ts'o1f2acb62010-01-22 17:40:42 -05004755 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4756 !ext4_data_block_valid(sbi, block, count)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05004757 ext4_error(sb, "Freeing blocks not in datazone - "
Theodore Ts'o1f2acb62010-01-22 17:40:42 -05004758 "block = %llu, count = %lu", block, count);
Alex Tomasc9de5602008-01-29 00:19:52 -05004759 goto error_return;
4760 }
4761
Theodore Ts'o0610b6e2009-06-15 03:45:05 -04004762 ext4_debug("freeing block %llu\n", block);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004763 trace_ext4_free_blocks(inode, block, count, flags);
4764
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004765 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4766 BUG_ON(count > 1);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004767
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004768 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4769 inode, bh, block);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004770 }
4771
Theodore Ts'o60e66792010-05-17 07:00:00 -04004772 /*
Theodore Ts'o84130192011-09-09 18:50:51 -04004773 * If the extent to be freed does not begin on a cluster
4774 * boundary, we need to deal with partial clusters at the
4775 * beginning and end of the extent. Normally we will free
4776 * blocks at the beginning or the end unless we are explicitly
4777 * requested to avoid doing so.
4778 */
Theodore Ts'of5a44db2013-12-20 09:29:35 -05004779 overflow = EXT4_PBLK_COFF(sbi, block);
Theodore Ts'o84130192011-09-09 18:50:51 -04004780 if (overflow) {
4781 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4782 overflow = sbi->s_cluster_ratio - overflow;
4783 block += overflow;
4784 if (count > overflow)
4785 count -= overflow;
4786 else
4787 return;
4788 } else {
4789 block -= overflow;
4790 count += overflow;
4791 }
4792 }
Theodore Ts'of5a44db2013-12-20 09:29:35 -05004793 overflow = EXT4_LBLK_COFF(sbi, count);
Theodore Ts'o84130192011-09-09 18:50:51 -04004794 if (overflow) {
4795 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4796 if (count > overflow)
4797 count -= overflow;
4798 else
4799 return;
4800 } else
4801 count += sbi->s_cluster_ratio - overflow;
4802 }
4803
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004804 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4805 int i;
Daeho Jeongf96c4502016-02-21 18:31:41 -05004806 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004807
4808 for (i = 0; i < count; i++) {
4809 cond_resched();
Daeho Jeongf96c4502016-02-21 18:31:41 -05004810 if (is_metadata)
4811 bh = sb_find_get_block(inode->i_sb, block + i);
4812 ext4_forget(handle, is_metadata, inode, bh, block + i);
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004813 }
4814 }
4815
Alex Tomasc9de5602008-01-29 00:19:52 -05004816do_more:
4817 overflow = 0;
4818 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4819
Darrick J. Wong163a2032013-08-28 17:35:51 -04004820 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
4821 ext4_get_group_info(sb, block_group))))
4822 return;
4823
Alex Tomasc9de5602008-01-29 00:19:52 -05004824 /*
4825 * Check to see if we are freeing blocks across a group
4826 * boundary.
4827 */
Theodore Ts'o84130192011-09-09 18:50:51 -04004828 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4829 overflow = EXT4_C2B(sbi, bit) + count -
4830 EXT4_BLOCKS_PER_GROUP(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004831 count -= overflow;
4832 }
Lukas Czerner810da242013-03-02 17:18:58 -05004833 count_clusters = EXT4_NUM_B2C(sbi, count);
Theodore Ts'o574ca172008-07-11 19:27:31 -04004834 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04004835 if (IS_ERR(bitmap_bh)) {
4836 err = PTR_ERR(bitmap_bh);
4837 bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004838 goto error_return;
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004839 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004840 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004841 if (!gdp) {
4842 err = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05004843 goto error_return;
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004844 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004845
4846 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4847 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4848 in_range(block, ext4_inode_table(sb, gdp),
Theodore Ts'o84130192011-09-09 18:50:51 -04004849 EXT4_SB(sb)->s_itb_per_group) ||
Alex Tomasc9de5602008-01-29 00:19:52 -05004850 in_range(block + count - 1, ext4_inode_table(sb, gdp),
Theodore Ts'o84130192011-09-09 18:50:51 -04004851 EXT4_SB(sb)->s_itb_per_group)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004852
Eric Sandeen12062dd2010-02-15 14:19:27 -05004853 ext4_error(sb, "Freeing blocks in system zone - "
Theodore Ts'o0610b6e2009-06-15 03:45:05 -04004854 "Block = %llu, count = %lu", block, count);
Aneesh Kumar K.V519deca02008-05-15 14:43:20 -04004855 /* err = 0. ext4_std_error should be a no op */
4856 goto error_return;
Alex Tomasc9de5602008-01-29 00:19:52 -05004857 }
4858
4859 BUFFER_TRACE(bitmap_bh, "getting write access");
4860 err = ext4_journal_get_write_access(handle, bitmap_bh);
4861 if (err)
4862 goto error_return;
4863
4864 /*
4865 * We are about to modify some metadata. Call the journal APIs
4866 * to unshare ->b_data if a currently-committing transaction is
4867 * using it
4868 */
4869 BUFFER_TRACE(gd_bh, "get_write_access");
4870 err = ext4_journal_get_write_access(handle, gd_bh);
4871 if (err)
4872 goto error_return;
Alex Tomasc9de5602008-01-29 00:19:52 -05004873#ifdef AGGRESSIVE_CHECK
4874 {
4875 int i;
Theodore Ts'o84130192011-09-09 18:50:51 -04004876 for (i = 0; i < count_clusters; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -05004877 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4878 }
4879#endif
Theodore Ts'o84130192011-09-09 18:50:51 -04004880 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004881
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04004882 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
4883 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
4884 GFP_NOFS|__GFP_NOFAIL);
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05004885 if (err)
4886 goto error_return;
Theodore Ts'oe6362602009-11-23 07:17:05 -05004887
Daeho Jeongf96c4502016-02-21 18:31:41 -05004888 /*
4889 * We need to make sure we don't reuse the freed block until after the
4890 * transaction is committed. We make an exception if the inode is to be
4891 * written in writeback mode since writeback mode has weak data
4892 * consistency guarantees.
4893 */
4894 if (ext4_handle_valid(handle) &&
4895 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
4896 !ext4_should_writeback_data(inode))) {
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004897 struct ext4_free_data *new_entry;
4898 /*
Michal Hocko7444a072015-07-05 12:33:44 -04004899 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4900 * to fail.
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004901 */
Michal Hocko7444a072015-07-05 12:33:44 -04004902 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4903 GFP_NOFS|__GFP_NOFAIL);
Bobi Jam18aadd42012-02-20 17:53:02 -05004904 new_entry->efd_start_cluster = bit;
4905 new_entry->efd_group = block_group;
4906 new_entry->efd_count = count_clusters;
4907 new_entry->efd_tid = handle->h_transaction->t_tid;
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004908
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004909 ext4_lock_group(sb, block_group);
Theodore Ts'o84130192011-09-09 18:50:51 -04004910 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004911 ext4_mb_free_metadata(handle, &e4b, new_entry);
Alex Tomasc9de5602008-01-29 00:19:52 -05004912 } else {
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004913 /* need to update group_info->bb_free and bitmap
4914 * with group lock held. generate_buddy look at
4915 * them with group lock_held
4916 */
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05004917 if (test_opt(sb, DISCARD)) {
Daeho Jeonga0154342017-06-22 23:54:33 -04004918 err = ext4_issue_discard(sb, block_group, bit, count,
4919 NULL);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05004920 if (err && err != -EOPNOTSUPP)
4921 ext4_msg(sb, KERN_WARNING, "discard request in"
4922 " group:%d block:%d count:%lu failed"
4923 " with %d", block_group, bit, count,
4924 err);
Lukas Czerner8f9ff182013-10-30 11:10:52 -04004925 } else
4926 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05004927
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004928 ext4_lock_group(sb, block_group);
Theodore Ts'o84130192011-09-09 18:50:51 -04004929 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4930 mb_free_blocks(inode, &e4b, bit, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004931 }
4932
Theodore Ts'o021b65b2011-09-09 19:08:51 -04004933 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4934 ext4_free_group_clusters_set(sb, gdp, ret);
Tao Ma79f1ba42012-10-22 00:34:32 -04004935 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04004936 ext4_group_desc_csum_set(sb, block_group, gdp);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004937 ext4_unlock_group(sb, block_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05004938
Jose R. Santos772cb7c2008-07-11 19:27:31 -04004939 if (sbi->s_log_groups_per_flex) {
4940 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
Theodore Ts'o90ba9832013-03-11 23:39:59 -04004941 atomic64_add(count_clusters,
4942 &sbi->s_flex_groups[flex_group].free_clusters);
Jose R. Santos772cb7c2008-07-11 19:27:31 -04004943 }
4944
Theodore Ts'o71d4f7d2014-07-15 06:02:38 -04004945 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
Aditya Kali7b415bf2011-09-09 19:04:51 -04004946 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
Jan Kara7d734532013-08-17 09:36:54 -04004947 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4948
4949 ext4_mb_unload_buddy(&e4b);
Aditya Kali7b415bf2011-09-09 19:04:51 -04004950
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004951 /* We dirtied the bitmap block */
4952 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4953 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4954
Alex Tomasc9de5602008-01-29 00:19:52 -05004955 /* And the group descriptor block */
4956 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
Frank Mayhar03901312009-01-07 00:06:22 -05004957 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05004958 if (!err)
4959 err = ret;
4960
4961 if (overflow && !err) {
4962 block += count;
4963 count = overflow;
4964 put_bh(bitmap_bh);
4965 goto do_more;
4966 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004967error_return:
4968 brelse(bitmap_bh);
4969 ext4_std_error(sb, err);
Alex Tomasc9de5602008-01-29 00:19:52 -05004970 return;
4971}
Lukas Czerner7360d172010-10-27 21:30:12 -04004972
4973/**
Yongqiang Yang05291552011-07-26 21:43:56 -04004974 * ext4_group_add_blocks() -- Add given blocks to an existing group
Amir Goldstein2846e822011-05-09 10:46:41 -04004975 * @handle: handle to this transaction
4976 * @sb: super block
Anatol Pomozov4907cb72012-09-01 10:31:09 -07004977 * @block: start physical block to add to the block group
Amir Goldstein2846e822011-05-09 10:46:41 -04004978 * @count: number of blocks to free
4979 *
Amir Goldsteine73a3472011-05-09 21:40:01 -04004980 * This marks the blocks as free in the bitmap and buddy.
Amir Goldstein2846e822011-05-09 10:46:41 -04004981 */
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004982int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
Amir Goldstein2846e822011-05-09 10:46:41 -04004983 ext4_fsblk_t block, unsigned long count)
4984{
4985 struct buffer_head *bitmap_bh = NULL;
4986 struct buffer_head *gd_bh;
4987 ext4_group_t block_group;
4988 ext4_grpblk_t bit;
4989 unsigned int i;
4990 struct ext4_group_desc *desc;
4991 struct ext4_sb_info *sbi = EXT4_SB(sb);
Amir Goldsteine73a3472011-05-09 21:40:01 -04004992 struct ext4_buddy e4b;
Amir Goldstein2846e822011-05-09 10:46:41 -04004993 int err = 0, ret, blk_free_count;
4994 ext4_grpblk_t blocks_freed;
Amir Goldstein2846e822011-05-09 10:46:41 -04004995
4996 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4997
Yongqiang Yang4740b832011-07-26 21:51:08 -04004998 if (count == 0)
4999 return 0;
5000
Amir Goldstein2846e822011-05-09 10:46:41 -04005001 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
Amir Goldstein2846e822011-05-09 10:46:41 -04005002 /*
5003 * Check to see if we are freeing blocks across a group
5004 * boundary.
5005 */
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005006 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -04005007 ext4_warning(sb, "too much blocks added to group %u",
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005008 block_group);
5009 err = -EINVAL;
Amir Goldstein2846e822011-05-09 10:46:41 -04005010 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005011 }
Theodore Ts'o2cd05cc2011-05-09 10:58:45 -04005012
Amir Goldstein2846e822011-05-09 10:46:41 -04005013 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04005014 if (IS_ERR(bitmap_bh)) {
5015 err = PTR_ERR(bitmap_bh);
5016 bitmap_bh = NULL;
Amir Goldstein2846e822011-05-09 10:46:41 -04005017 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005018 }
5019
Amir Goldstein2846e822011-05-09 10:46:41 -04005020 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005021 if (!desc) {
5022 err = -EIO;
Amir Goldstein2846e822011-05-09 10:46:41 -04005023 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005024 }
Amir Goldstein2846e822011-05-09 10:46:41 -04005025
5026 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
5027 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
5028 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
5029 in_range(block + count - 1, ext4_inode_table(sb, desc),
5030 sbi->s_itb_per_group)) {
5031 ext4_error(sb, "Adding blocks in system zones - "
5032 "Block = %llu, count = %lu",
5033 block, count);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005034 err = -EINVAL;
Amir Goldstein2846e822011-05-09 10:46:41 -04005035 goto error_return;
5036 }
5037
Theodore Ts'o2cd05cc2011-05-09 10:58:45 -04005038 BUFFER_TRACE(bitmap_bh, "getting write access");
5039 err = ext4_journal_get_write_access(handle, bitmap_bh);
Amir Goldstein2846e822011-05-09 10:46:41 -04005040 if (err)
5041 goto error_return;
5042
5043 /*
5044 * We are about to modify some metadata. Call the journal APIs
5045 * to unshare ->b_data if a currently-committing transaction is
5046 * using it
5047 */
5048 BUFFER_TRACE(gd_bh, "get_write_access");
5049 err = ext4_journal_get_write_access(handle, gd_bh);
5050 if (err)
5051 goto error_return;
Amir Goldsteine73a3472011-05-09 21:40:01 -04005052
Amir Goldstein2846e822011-05-09 10:46:41 -04005053 for (i = 0, blocks_freed = 0; i < count; i++) {
5054 BUFFER_TRACE(bitmap_bh, "clear bit");
Amir Goldsteine73a3472011-05-09 21:40:01 -04005055 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
Amir Goldstein2846e822011-05-09 10:46:41 -04005056 ext4_error(sb, "bit already cleared for block %llu",
5057 (ext4_fsblk_t)(block + i));
5058 BUFFER_TRACE(bitmap_bh, "bit already cleared");
5059 } else {
5060 blocks_freed++;
5061 }
5062 }
Amir Goldsteine73a3472011-05-09 21:40:01 -04005063
5064 err = ext4_mb_load_buddy(sb, block_group, &e4b);
5065 if (err)
5066 goto error_return;
5067
5068 /*
5069 * need to update group_info->bb_free and bitmap
5070 * with group lock held. generate_buddy look at
5071 * them with group lock_held
5072 */
Amir Goldstein2846e822011-05-09 10:46:41 -04005073 ext4_lock_group(sb, block_group);
Amir Goldsteine73a3472011-05-09 21:40:01 -04005074 mb_clear_bits(bitmap_bh->b_data, bit, count);
5075 mb_free_blocks(NULL, &e4b, bit, count);
Theodore Ts'o021b65b2011-09-09 19:08:51 -04005076 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
5077 ext4_free_group_clusters_set(sb, desc, blk_free_count);
Tao Ma79f1ba42012-10-22 00:34:32 -04005078 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04005079 ext4_group_desc_csum_set(sb, block_group, desc);
Amir Goldstein2846e822011-05-09 10:46:41 -04005080 ext4_unlock_group(sb, block_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04005081 percpu_counter_add(&sbi->s_freeclusters_counter,
Lukas Czerner810da242013-03-02 17:18:58 -05005082 EXT4_NUM_B2C(sbi, blocks_freed));
Amir Goldstein2846e822011-05-09 10:46:41 -04005083
5084 if (sbi->s_log_groups_per_flex) {
5085 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
Theodore Ts'o90ba9832013-03-11 23:39:59 -04005086 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
5087 &sbi->s_flex_groups[flex_group].free_clusters);
Amir Goldstein2846e822011-05-09 10:46:41 -04005088 }
Amir Goldsteine73a3472011-05-09 21:40:01 -04005089
5090 ext4_mb_unload_buddy(&e4b);
Amir Goldstein2846e822011-05-09 10:46:41 -04005091
5092 /* We dirtied the bitmap block */
5093 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5094 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5095
5096 /* And the group descriptor block */
5097 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5098 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5099 if (!err)
5100 err = ret;
5101
5102error_return:
5103 brelse(bitmap_bh);
5104 ext4_std_error(sb, err);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005105 return err;
Amir Goldstein2846e822011-05-09 10:46:41 -04005106}
5107
5108/**
Lukas Czerner7360d172010-10-27 21:30:12 -04005109 * ext4_trim_extent -- function to TRIM one single free extent in the group
5110 * @sb: super block for the file system
5111 * @start: starting block of the free extent in the alloc. group
5112 * @count: number of blocks to TRIM
5113 * @group: alloc. group we are working with
5114 * @e4b: ext4 buddy for the group
5115 *
5116 * Trim "count" blocks starting at "start" in the "group". To assure that no
5117 * one will allocate those blocks, mark it as used in buddy bitmap. This must
5118 * be called with under the group lock.
5119 */
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005120static int ext4_trim_extent(struct super_block *sb, int start, int count,
Theodore Ts'od9f34502011-04-30 13:47:24 -04005121 ext4_group_t group, struct ext4_buddy *e4b)
jon ernste2cbd582014-04-12 23:01:28 -04005122__releases(bitlock)
5123__acquires(bitlock)
Lukas Czerner7360d172010-10-27 21:30:12 -04005124{
5125 struct ext4_free_extent ex;
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005126 int ret = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005127
Tao Mab3d4c2b2011-07-11 00:01:52 -04005128 trace_ext4_trim_extent(sb, group, start, count);
5129
Lukas Czerner7360d172010-10-27 21:30:12 -04005130 assert_spin_locked(ext4_group_lock_ptr(sb, group));
5131
5132 ex.fe_start = start;
5133 ex.fe_group = group;
5134 ex.fe_len = count;
5135
5136 /*
5137 * Mark blocks used, so no one can reuse them while
5138 * being trimmed.
5139 */
5140 mb_mark_used(e4b, &ex);
5141 ext4_unlock_group(sb, group);
Daeho Jeonga0154342017-06-22 23:54:33 -04005142 ret = ext4_issue_discard(sb, group, start, count, NULL);
Lukas Czerner7360d172010-10-27 21:30:12 -04005143 ext4_lock_group(sb, group);
5144 mb_free_blocks(NULL, e4b, start, ex.fe_len);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005145 return ret;
Lukas Czerner7360d172010-10-27 21:30:12 -04005146}
5147
5148/**
5149 * ext4_trim_all_free -- function to trim all free space in alloc. group
5150 * @sb: super block for file system
Tao Ma22612282011-07-11 00:04:34 -04005151 * @group: group to be trimmed
Lukas Czerner7360d172010-10-27 21:30:12 -04005152 * @start: first group block to examine
5153 * @max: last group block to examine
5154 * @minblocks: minimum extent block count
5155 *
5156 * ext4_trim_all_free walks through group's buddy bitmap searching for free
5157 * extents. When the free block is found, ext4_trim_extent is called to TRIM
5158 * the extent.
5159 *
5160 *
5161 * ext4_trim_all_free walks through group's block bitmap searching for free
5162 * extents. When the free extent is found, mark it as used in group buddy
5163 * bitmap. Then issue a TRIM command on this extent and free the extent in
5164 * the group buddy bitmap. This is done until whole group is scanned.
5165 */
Lukas Czerner0b75a842011-02-23 12:22:49 -05005166static ext4_grpblk_t
Lukas Czerner78944082011-05-24 18:16:27 -04005167ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5168 ext4_grpblk_t start, ext4_grpblk_t max,
5169 ext4_grpblk_t minblocks)
Lukas Czerner7360d172010-10-27 21:30:12 -04005170{
5171 void *bitmap;
Tao Ma169ddc32011-07-11 00:00:07 -04005172 ext4_grpblk_t next, count = 0, free_count = 0;
Lukas Czerner78944082011-05-24 18:16:27 -04005173 struct ext4_buddy e4b;
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005174 int ret = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005175
Tao Mab3d4c2b2011-07-11 00:01:52 -04005176 trace_ext4_trim_all_free(sb, group, start, max);
5177
Lukas Czerner78944082011-05-24 18:16:27 -04005178 ret = ext4_mb_load_buddy(sb, group, &e4b);
5179 if (ret) {
Konstantin Khlebnikov9651e6b2017-05-21 22:35:23 -04005180 ext4_warning(sb, "Error %d loading buddy information for %u",
5181 ret, group);
Lukas Czerner78944082011-05-24 18:16:27 -04005182 return ret;
5183 }
Lukas Czerner78944082011-05-24 18:16:27 -04005184 bitmap = e4b.bd_bitmap;
Lukas Czerner28739ee2011-05-24 18:28:07 -04005185
5186 ext4_lock_group(sb, group);
Tao Ma3d56b8d2011-07-11 00:03:38 -04005187 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5188 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5189 goto out;
5190
Lukas Czerner78944082011-05-24 18:16:27 -04005191 start = (e4b.bd_info->bb_first_free > start) ?
5192 e4b.bd_info->bb_first_free : start;
Lukas Czerner7360d172010-10-27 21:30:12 -04005193
Lukas Czerner913eed832012-03-21 21:22:22 -04005194 while (start <= max) {
5195 start = mb_find_next_zero_bit(bitmap, max + 1, start);
5196 if (start > max)
Lukas Czerner7360d172010-10-27 21:30:12 -04005197 break;
Lukas Czerner913eed832012-03-21 21:22:22 -04005198 next = mb_find_next_bit(bitmap, max + 1, start);
Lukas Czerner7360d172010-10-27 21:30:12 -04005199
5200 if ((next - start) >= minblocks) {
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005201 ret = ext4_trim_extent(sb, start,
5202 next - start, group, &e4b);
5203 if (ret && ret != -EOPNOTSUPP)
5204 break;
5205 ret = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005206 count += next - start;
5207 }
Tao Ma169ddc32011-07-11 00:00:07 -04005208 free_count += next - start;
Lukas Czerner7360d172010-10-27 21:30:12 -04005209 start = next + 1;
5210
5211 if (fatal_signal_pending(current)) {
5212 count = -ERESTARTSYS;
5213 break;
5214 }
5215
5216 if (need_resched()) {
5217 ext4_unlock_group(sb, group);
5218 cond_resched();
5219 ext4_lock_group(sb, group);
5220 }
5221
Tao Ma169ddc32011-07-11 00:00:07 -04005222 if ((e4b.bd_info->bb_free - free_count) < minblocks)
Lukas Czerner7360d172010-10-27 21:30:12 -04005223 break;
5224 }
Tao Ma3d56b8d2011-07-11 00:03:38 -04005225
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005226 if (!ret) {
5227 ret = count;
Tao Ma3d56b8d2011-07-11 00:03:38 -04005228 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005229 }
Tao Ma3d56b8d2011-07-11 00:03:38 -04005230out:
Lukas Czerner7360d172010-10-27 21:30:12 -04005231 ext4_unlock_group(sb, group);
Lukas Czerner78944082011-05-24 18:16:27 -04005232 ext4_mb_unload_buddy(&e4b);
Lukas Czerner7360d172010-10-27 21:30:12 -04005233
5234 ext4_debug("trimmed %d blocks in the group %d\n",
5235 count, group);
5236
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005237 return ret;
Lukas Czerner7360d172010-10-27 21:30:12 -04005238}
5239
5240/**
5241 * ext4_trim_fs() -- trim ioctl handle function
5242 * @sb: superblock for filesystem
5243 * @range: fstrim_range structure
5244 *
5245 * start: First Byte to trim
5246 * len: number of Bytes to trim from start
5247 * minlen: minimum extent length in Bytes
5248 * ext4_trim_fs goes through all allocation groups containing Bytes from
5249 * start to start+len. For each such a group ext4_trim_all_free function
5250 * is invoked to trim all free space.
5251 */
5252int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5253{
Lukas Czerner78944082011-05-24 18:16:27 -04005254 struct ext4_group_info *grp;
Lukas Czerner913eed832012-03-21 21:22:22 -04005255 ext4_group_t group, first_group, last_group;
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005256 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
Lukas Czerner913eed832012-03-21 21:22:22 -04005257 uint64_t start, end, minlen, trimmed = 0;
Jan Kara0f0a25b2011-01-11 15:16:31 -05005258 ext4_fsblk_t first_data_blk =
5259 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
Lukas Czerner913eed832012-03-21 21:22:22 -04005260 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
Lukas Czerner7360d172010-10-27 21:30:12 -04005261 int ret = 0;
5262
5263 start = range->start >> sb->s_blocksize_bits;
Lukas Czerner913eed832012-03-21 21:22:22 -04005264 end = start + (range->len >> sb->s_blocksize_bits) - 1;
Lukas Czerneraaf7d732012-09-26 22:21:21 -04005265 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5266 range->minlen >> sb->s_blocksize_bits);
Lukas Czerner7360d172010-10-27 21:30:12 -04005267
Lukas Czerner5de35e82012-10-22 18:01:19 -04005268 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5269 start >= max_blks ||
5270 range->len < sb->s_blocksize)
Lukas Czerner7360d172010-10-27 21:30:12 -04005271 return -EINVAL;
Lukas Czerner913eed832012-03-21 21:22:22 -04005272 if (end >= max_blks)
5273 end = max_blks - 1;
5274 if (end <= first_data_blk)
Tao Ma22f10452011-07-10 23:52:37 -04005275 goto out;
Lukas Czerner913eed832012-03-21 21:22:22 -04005276 if (start < first_data_blk)
Jan Kara0f0a25b2011-01-11 15:16:31 -05005277 start = first_data_blk;
Lukas Czerner7360d172010-10-27 21:30:12 -04005278
Lukas Czerner913eed832012-03-21 21:22:22 -04005279 /* Determine first and last group to examine based on start and end */
Lukas Czerner7360d172010-10-27 21:30:12 -04005280 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005281 &first_group, &first_cluster);
Lukas Czerner913eed832012-03-21 21:22:22 -04005282 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005283 &last_group, &last_cluster);
Lukas Czerner7360d172010-10-27 21:30:12 -04005284
Lukas Czerner913eed832012-03-21 21:22:22 -04005285 /* end now represents the last cluster to discard in this group */
5286 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
Lukas Czerner7360d172010-10-27 21:30:12 -04005287
5288 for (group = first_group; group <= last_group; group++) {
Lukas Czerner78944082011-05-24 18:16:27 -04005289 grp = ext4_get_group_info(sb, group);
5290 /* We only do this if the grp has never been initialized */
5291 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04005292 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
Lukas Czerner78944082011-05-24 18:16:27 -04005293 if (ret)
5294 break;
Lukas Czerner7360d172010-10-27 21:30:12 -04005295 }
5296
Tao Ma0ba08512011-03-23 15:48:11 -04005297 /*
Lukas Czerner913eed832012-03-21 21:22:22 -04005298 * For all the groups except the last one, last cluster will
5299 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5300 * change it for the last group, note that last_cluster is
5301 * already computed earlier by ext4_get_group_no_and_offset()
Tao Ma0ba08512011-03-23 15:48:11 -04005302 */
Lukas Czerner913eed832012-03-21 21:22:22 -04005303 if (group == last_group)
5304 end = last_cluster;
Lukas Czerner7360d172010-10-27 21:30:12 -04005305
Lukas Czerner78944082011-05-24 18:16:27 -04005306 if (grp->bb_free >= minlen) {
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005307 cnt = ext4_trim_all_free(sb, group, first_cluster,
Lukas Czerner913eed832012-03-21 21:22:22 -04005308 end, minlen);
Lukas Czerner7360d172010-10-27 21:30:12 -04005309 if (cnt < 0) {
5310 ret = cnt;
Lukas Czerner7360d172010-10-27 21:30:12 -04005311 break;
5312 }
Lukas Czerner21e7fd22012-03-21 21:24:22 -04005313 trimmed += cnt;
Lukas Czerner7360d172010-10-27 21:30:12 -04005314 }
Lukas Czerner913eed832012-03-21 21:22:22 -04005315
5316 /*
5317 * For every group except the first one, we are sure
5318 * that the first cluster to discard will be cluster #0.
5319 */
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005320 first_cluster = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005321 }
Lukas Czerner7360d172010-10-27 21:30:12 -04005322
Tao Ma3d56b8d2011-07-11 00:03:38 -04005323 if (!ret)
5324 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5325
Tao Ma22f10452011-07-10 23:52:37 -04005326out:
Lukas Czerneraaf7d732012-09-26 22:21:21 -04005327 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
Lukas Czerner7360d172010-10-27 21:30:12 -04005328 return ret;
5329}
Darrick J. Wong0c9ec4b2017-04-30 00:36:53 -04005330
5331/* Iterate all the free extents in the group. */
5332int
5333ext4_mballoc_query_range(
5334 struct super_block *sb,
5335 ext4_group_t group,
5336 ext4_grpblk_t start,
5337 ext4_grpblk_t end,
5338 ext4_mballoc_query_range_fn formatter,
5339 void *priv)
5340{
5341 void *bitmap;
5342 ext4_grpblk_t next;
5343 struct ext4_buddy e4b;
5344 int error;
5345
5346 error = ext4_mb_load_buddy(sb, group, &e4b);
5347 if (error)
5348 return error;
5349 bitmap = e4b.bd_bitmap;
5350
5351 ext4_lock_group(sb, group);
5352
5353 start = (e4b.bd_info->bb_first_free > start) ?
5354 e4b.bd_info->bb_first_free : start;
5355 if (end >= EXT4_CLUSTERS_PER_GROUP(sb))
5356 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
5357
5358 while (start <= end) {
5359 start = mb_find_next_zero_bit(bitmap, end + 1, start);
5360 if (start > end)
5361 break;
5362 next = mb_find_next_bit(bitmap, end + 1, start);
5363
5364 ext4_unlock_group(sb, group);
5365 error = formatter(sb, group, start, next - start, priv);
5366 if (error)
5367 goto out_unload;
5368 ext4_lock_group(sb, group);
5369
5370 start = next + 1;
5371 }
5372
5373 ext4_unlock_group(sb, group);
5374out_unload:
5375 ext4_mb_unload_buddy(&e4b);
5376
5377 return error;
5378}