blob: 14bd37041e1a49326c9bdbf39ee600a37715d103 [file] [log] [blame]
Alex Tomasc9de5602008-01-29 00:19:52 -05001/*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Adam Buchbinderb8a074632016-03-09 23:49:05 -050014 * You should have received a copy of the GNU General Public License
Alex Tomasc9de5602008-01-29 00:19:52 -050015 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 */
18
19
20/*
21 * mballoc.c contains the multiblocks allocation routines
22 */
23
Bobi Jam18aadd42012-02-20 17:53:02 -050024#include "ext4_jbd2.h"
Mingming Cao8f6e39a2008-04-29 22:01:31 -040025#include "mballoc.h"
Theodore Ts'o28623c22012-09-05 01:31:50 -040026#include <linux/log2.h>
Theodore Ts'oa0b30c12013-02-09 16:28:20 -050027#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090028#include <linux/slab.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040029#include <linux/backing-dev.h>
Theodore Ts'o9bffad12009-06-17 11:48:11 -040030#include <trace/events/ext4.h>
31
Theodore Ts'oa0b30c12013-02-09 16:28:20 -050032#ifdef CONFIG_EXT4_DEBUG
33ushort ext4_mballoc_debug __read_mostly;
34
35module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
36MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
37#endif
38
Alex Tomasc9de5602008-01-29 00:19:52 -050039/*
40 * MUSTDO:
41 * - test ext4_ext_search_left() and ext4_ext_search_right()
42 * - search for metadata in few groups
43 *
44 * TODO v4:
45 * - normalization should take into account whether file is still open
46 * - discard preallocations if no free space left (policy?)
47 * - don't normalize tails
48 * - quota
49 * - reservation for superuser
50 *
51 * TODO v3:
52 * - bitmap read-ahead (proposed by Oleg Drokin aka green)
53 * - track min/max extents in each group for better group selection
54 * - mb_mark_used() may allocate chunk right after splitting buddy
55 * - tree of groups sorted by number of free blocks
56 * - error handling
57 */
58
59/*
60 * The allocation request involve request for multiple number of blocks
61 * near to the goal(block) value specified.
62 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040063 * During initialization phase of the allocator we decide to use the
64 * group preallocation or inode preallocation depending on the size of
65 * the file. The size of the file could be the resulting file size we
66 * would have after allocation, or the current file size, which ever
67 * is larger. If the size is less than sbi->s_mb_stream_request we
68 * select to use the group preallocation. The default value of
69 * s_mb_stream_request is 16 blocks. This can also be tuned via
70 * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in
71 * terms of number of blocks.
Alex Tomasc9de5602008-01-29 00:19:52 -050072 *
73 * The main motivation for having small file use group preallocation is to
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040074 * ensure that we have small files closer together on the disk.
Alex Tomasc9de5602008-01-29 00:19:52 -050075 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -040076 * First stage the allocator looks at the inode prealloc list,
77 * ext4_inode_info->i_prealloc_list, which contains list of prealloc
78 * spaces for this particular inode. The inode prealloc space is
79 * represented as:
Alex Tomasc9de5602008-01-29 00:19:52 -050080 *
81 * pa_lstart -> the logical start block for this prealloc space
82 * pa_pstart -> the physical start block for this prealloc space
Theodore Ts'o53accfa2011-09-09 18:48:51 -040083 * pa_len -> length for this prealloc space (in clusters)
84 * pa_free -> free space available in this prealloc space (in clusters)
Alex Tomasc9de5602008-01-29 00:19:52 -050085 *
86 * The inode preallocation space is used looking at the _logical_ start
87 * block. If only the logical file block falls within the range of prealloc
Tao Macaaf7a22011-07-11 18:42:42 -040088 * space we will consume the particular prealloc space. This makes sure that
89 * we have contiguous physical blocks representing the file blocks
Alex Tomasc9de5602008-01-29 00:19:52 -050090 *
91 * The important thing to be noted in case of inode prealloc space is that
92 * we don't modify the values associated to inode prealloc space except
93 * pa_free.
94 *
95 * If we are not able to find blocks in the inode prealloc space and if we
96 * have the group allocation flag set then we look at the locality group
Tao Macaaf7a22011-07-11 18:42:42 -040097 * prealloc space. These are per CPU prealloc list represented as
Alex Tomasc9de5602008-01-29 00:19:52 -050098 *
99 * ext4_sb_info.s_locality_groups[smp_processor_id()]
100 *
101 * The reason for having a per cpu locality group is to reduce the contention
102 * between CPUs. It is possible to get scheduled at this point.
103 *
104 * The locality group prealloc space is used looking at whether we have
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300105 * enough free space (pa_free) within the prealloc space.
Alex Tomasc9de5602008-01-29 00:19:52 -0500106 *
107 * If we can't allocate blocks via inode prealloc or/and locality group
108 * prealloc then we look at the buddy cache. The buddy cache is represented
109 * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
110 * mapped to the buddy and bitmap information regarding different
111 * groups. The buddy information is attached to buddy cache inode so that
112 * we can access them through the page cache. The information regarding
113 * each group is loaded via ext4_mb_load_buddy. The information involve
114 * block bitmap and buddy information. The information are stored in the
115 * inode as:
116 *
117 * { page }
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500118 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
Alex Tomasc9de5602008-01-29 00:19:52 -0500119 *
120 *
121 * one block each for bitmap and buddy information. So for each group we
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300122 * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE /
Alex Tomasc9de5602008-01-29 00:19:52 -0500123 * blocksize) blocks. So it can have information regarding groups_per_page
124 * which is blocks_per_page/2
125 *
126 * The buddy cache inode is not stored on disk. The inode is thrown
127 * away when the filesystem is unmounted.
128 *
129 * We look for count number of blocks in the buddy cache. If we were able
130 * to locate that many free blocks we return with additional information
131 * regarding rest of the contiguous physical block available
132 *
133 * Before allocating blocks via buddy cache we normalize the request
134 * blocks. This ensure we ask for more blocks that we needed. The extra
135 * blocks that we get after allocation is added to the respective prealloc
136 * list. In case of inode preallocation we follow a list of heuristics
137 * based on file size. This can be found in ext4_mb_normalize_request. If
138 * we are doing a group prealloc we try to normalize the request to
Theodore Ts'o27baebb2011-09-09 19:02:51 -0400139 * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is
140 * dependent on the cluster size; for non-bigalloc file systems, it is
Alex Tomasc9de5602008-01-29 00:19:52 -0500141 * 512 blocks. This can be tuned via
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400142 * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in
Alex Tomasc9de5602008-01-29 00:19:52 -0500143 * terms of number of blocks. If we have mounted the file system with -O
144 * stripe=<value> option the group prealloc request is normalized to the
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400145 * the smallest multiple of the stripe value (sbi->s_stripe) which is
146 * greater than the default mb_group_prealloc.
Alex Tomasc9de5602008-01-29 00:19:52 -0500147 *
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -0400148 * The regular allocator (using the buddy cache) supports a few tunables.
Alex Tomasc9de5602008-01-29 00:19:52 -0500149 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400150 * /sys/fs/ext4/<partition>/mb_min_to_scan
151 * /sys/fs/ext4/<partition>/mb_max_to_scan
152 * /sys/fs/ext4/<partition>/mb_order2_req
Alex Tomasc9de5602008-01-29 00:19:52 -0500153 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400154 * The regular allocator uses buddy scan only if the request len is power of
Alex Tomasc9de5602008-01-29 00:19:52 -0500155 * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
156 * value of s_mb_order2_reqs can be tuned via
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400157 * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200158 * stripe size (sbi->s_stripe), we try to search for contiguous block in
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400159 * stripe size. This should result in better allocation on RAID setups. If
160 * not, we search in the specific group using bitmap for best extents. The
161 * tunable min_to_scan and max_to_scan control the behaviour here.
Alex Tomasc9de5602008-01-29 00:19:52 -0500162 * min_to_scan indicate how long the mballoc __must__ look for a best
Theodore Ts'ob713a5e2009-03-31 09:11:14 -0400163 * extent and max_to_scan indicates how long the mballoc __can__ look for a
Alex Tomasc9de5602008-01-29 00:19:52 -0500164 * best extent in the found extents. Searching for the blocks starts with
165 * the group specified as the goal value in allocation context via
166 * ac_g_ex. Each group is first checked based on the criteria whether it
Tao Macaaf7a22011-07-11 18:42:42 -0400167 * can be used for allocation. ext4_mb_good_group explains how the groups are
Alex Tomasc9de5602008-01-29 00:19:52 -0500168 * checked.
169 *
170 * Both the prealloc space are getting populated as above. So for the first
171 * request we will hit the buddy cache which will result in this prealloc
172 * space getting filled. The prealloc space is then later used for the
173 * subsequent request.
174 */
175
176/*
177 * mballoc operates on the following data:
178 * - on-disk bitmap
179 * - in-core buddy (actually includes buddy and bitmap)
180 * - preallocation descriptors (PAs)
181 *
182 * there are two types of preallocations:
183 * - inode
184 * assiged to specific inode and can be used for this inode only.
185 * it describes part of inode's space preallocated to specific
186 * physical blocks. any block from that preallocated can be used
187 * independent. the descriptor just tracks number of blocks left
188 * unused. so, before taking some block from descriptor, one must
189 * make sure corresponded logical block isn't allocated yet. this
190 * also means that freeing any block within descriptor's range
191 * must discard all preallocated blocks.
192 * - locality group
193 * assigned to specific locality group which does not translate to
194 * permanent set of inodes: inode can join and leave group. space
195 * from this type of preallocation can be used for any inode. thus
196 * it's consumed from the beginning to the end.
197 *
198 * relation between them can be expressed as:
199 * in-core buddy = on-disk bitmap + preallocation descriptors
200 *
201 * this mean blocks mballoc considers used are:
202 * - allocated blocks (persistent)
203 * - preallocated blocks (non-persistent)
204 *
205 * consistency in mballoc world means that at any time a block is either
206 * free or used in ALL structures. notice: "any time" should not be read
207 * literally -- time is discrete and delimited by locks.
208 *
209 * to keep it simple, we don't use block numbers, instead we count number of
210 * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
211 *
212 * all operations can be expressed as:
213 * - init buddy: buddy = on-disk + PAs
214 * - new PA: buddy += N; PA = N
215 * - use inode PA: on-disk += N; PA -= N
216 * - discard inode PA buddy -= on-disk - PA; PA = 0
217 * - use locality group PA on-disk += N; PA -= N
218 * - discard locality group PA buddy -= PA; PA = 0
219 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
220 * is used in real operation because we can't know actual used
221 * bits from PA, only from on-disk bitmap
222 *
223 * if we follow this strict logic, then all operations above should be atomic.
224 * given some of them can block, we'd have to use something like semaphores
225 * killing performance on high-end SMP hardware. let's try to relax it using
226 * the following knowledge:
227 * 1) if buddy is referenced, it's already initialized
228 * 2) while block is used in buddy and the buddy is referenced,
229 * nobody can re-allocate that block
230 * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
231 * bit set and PA claims same block, it's OK. IOW, one can set bit in
232 * on-disk bitmap if buddy has same bit set or/and PA covers corresponded
233 * block
234 *
235 * so, now we're building a concurrency table:
236 * - init buddy vs.
237 * - new PA
238 * blocks for PA are allocated in the buddy, buddy must be referenced
239 * until PA is linked to allocation group to avoid concurrent buddy init
240 * - use inode PA
241 * we need to make sure that either on-disk bitmap or PA has uptodate data
242 * given (3) we care that PA-=N operation doesn't interfere with init
243 * - discard inode PA
244 * the simplest way would be to have buddy initialized by the discard
245 * - use locality group PA
246 * again PA-=N must be serialized with init
247 * - discard locality group PA
248 * the simplest way would be to have buddy initialized by the discard
249 * - new PA vs.
250 * - use inode PA
251 * i_data_sem serializes them
252 * - discard inode PA
253 * discard process must wait until PA isn't used by another process
254 * - use locality group PA
255 * some mutex should serialize them
256 * - discard locality group PA
257 * discard process must wait until PA isn't used by another process
258 * - use inode PA
259 * - use inode PA
260 * i_data_sem or another mutex should serializes them
261 * - discard inode PA
262 * discard process must wait until PA isn't used by another process
263 * - use locality group PA
264 * nothing wrong here -- they're different PAs covering different blocks
265 * - discard locality group PA
266 * discard process must wait until PA isn't used by another process
267 *
268 * now we're ready to make few consequences:
269 * - PA is referenced and while it is no discard is possible
270 * - PA is referenced until block isn't marked in on-disk bitmap
271 * - PA changes only after on-disk bitmap
272 * - discard must not compete with init. either init is done before
273 * any discard or they're serialized somehow
274 * - buddy init as sum of on-disk bitmap and PAs is done atomically
275 *
276 * a special case when we've used PA to emptiness. no need to modify buddy
277 * in this case, but we should care about concurrent init
278 *
279 */
280
281 /*
282 * Logic in few words:
283 *
284 * - allocation:
285 * load group
286 * find blocks
287 * mark bits in on-disk bitmap
288 * release group
289 *
290 * - use preallocation:
291 * find proper PA (per-inode or group)
292 * load group
293 * mark bits in on-disk bitmap
294 * release group
295 * release PA
296 *
297 * - free:
298 * load group
299 * mark bits in on-disk bitmap
300 * release group
301 *
302 * - discard preallocations in group:
303 * mark PAs deleted
304 * move them onto local list
305 * load on-disk bitmap
306 * load group
307 * remove PA from object (inode or locality group)
308 * mark free blocks in-core
309 *
310 * - discard inode's preallocations:
311 */
312
313/*
314 * Locking rules
315 *
316 * Locks:
317 * - bitlock on a group (group)
318 * - object (inode/locality) (object)
319 * - per-pa lock (pa)
320 *
321 * Paths:
322 * - new pa
323 * object
324 * group
325 *
326 * - find and use pa:
327 * pa
328 *
329 * - release consumed pa:
330 * pa
331 * group
332 * object
333 *
334 * - generate in-core bitmap:
335 * group
336 * pa
337 *
338 * - discard all for given object (inode, locality group):
339 * object
340 * pa
341 * group
342 *
343 * - discard all for given group:
344 * group
345 * pa
346 * group
347 * object
348 *
349 */
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500350static struct kmem_cache *ext4_pspace_cachep;
351static struct kmem_cache *ext4_ac_cachep;
Bobi Jam18aadd42012-02-20 17:53:02 -0500352static struct kmem_cache *ext4_free_data_cachep;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -0400353
354/* We create slab caches for groupinfo data structures based on the
355 * superblock block size. There will be one per mounted filesystem for
356 * each unique s_blocksize_bits */
Eric Sandeen2892c152011-02-12 08:12:18 -0500357#define NR_GRPINFO_CACHES 8
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -0400358static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
359
Eric Sandeen2892c152011-02-12 08:12:18 -0500360static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
361 "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
362 "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
363 "ext4_groupinfo_64k", "ext4_groupinfo_128k"
364};
365
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500366static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
367 ext4_group_t group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500368static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
369 ext4_group_t group);
Bobi Jam18aadd42012-02-20 17:53:02 -0500370static void ext4_free_data_callback(struct super_block *sb,
371 struct ext4_journal_cb_entry *jce, int rc);
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500372
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500373static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
374{
Alex Tomasc9de5602008-01-29 00:19:52 -0500375#if BITS_PER_LONG == 64
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500376 *bit += ((unsigned long) addr & 7UL) << 3;
377 addr = (void *) ((unsigned long) addr & ~7UL);
Alex Tomasc9de5602008-01-29 00:19:52 -0500378#elif BITS_PER_LONG == 32
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500379 *bit += ((unsigned long) addr & 3UL) << 3;
380 addr = (void *) ((unsigned long) addr & ~3UL);
Alex Tomasc9de5602008-01-29 00:19:52 -0500381#else
382#error "how many bits you are?!"
383#endif
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500384 return addr;
385}
Alex Tomasc9de5602008-01-29 00:19:52 -0500386
387static inline int mb_test_bit(int bit, void *addr)
388{
389 /*
390 * ext4_test_bit on architecture like powerpc
391 * needs unsigned long aligned address
392 */
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500393 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500394 return ext4_test_bit(bit, addr);
395}
396
397static inline void mb_set_bit(int bit, void *addr)
398{
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500399 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500400 ext4_set_bit(bit, addr);
401}
402
Alex Tomasc9de5602008-01-29 00:19:52 -0500403static inline void mb_clear_bit(int bit, void *addr)
404{
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500405 addr = mb_correct_addr_and_bit(&bit, addr);
Alex Tomasc9de5602008-01-29 00:19:52 -0500406 ext4_clear_bit(bit, addr);
407}
408
Andrey Sidoroveabe0442013-04-09 12:22:29 -0400409static inline int mb_test_and_clear_bit(int bit, void *addr)
410{
411 addr = mb_correct_addr_and_bit(&bit, addr);
412 return ext4_test_and_clear_bit(bit, addr);
413}
414
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500415static inline int mb_find_next_zero_bit(void *addr, int max, int start)
416{
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400417 int fix = 0, ret, tmpmax;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500418 addr = mb_correct_addr_and_bit(&fix, addr);
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400419 tmpmax = max + fix;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500420 start += fix;
421
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400422 ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
423 if (ret > max)
424 return max;
425 return ret;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500426}
427
428static inline int mb_find_next_bit(void *addr, int max, int start)
429{
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400430 int fix = 0, ret, tmpmax;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500431 addr = mb_correct_addr_and_bit(&fix, addr);
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400432 tmpmax = max + fix;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500433 start += fix;
434
Aneesh Kumar K.Ve7dfb242008-07-11 19:27:31 -0400435 ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
436 if (ret > max)
437 return max;
438 return ret;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500439}
440
Alex Tomasc9de5602008-01-29 00:19:52 -0500441static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
442{
443 char *bb;
444
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500445 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
Alex Tomasc9de5602008-01-29 00:19:52 -0500446 BUG_ON(max == NULL);
447
448 if (order > e4b->bd_blkbits + 1) {
449 *max = 0;
450 return NULL;
451 }
452
453 /* at order 0 we see each particular block */
Coly Li84b775a2011-02-24 12:51:59 -0500454 if (order == 0) {
455 *max = 1 << (e4b->bd_blkbits + 3);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500456 return e4b->bd_bitmap;
Coly Li84b775a2011-02-24 12:51:59 -0500457 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500458
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500459 bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
Alex Tomasc9de5602008-01-29 00:19:52 -0500460 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
461
462 return bb;
463}
464
465#ifdef DOUBLE_CHECK
466static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
467 int first, int count)
468{
469 int i;
470 struct super_block *sb = e4b->bd_sb;
471
472 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
473 return;
Vincent Minetbc8e6742009-05-15 08:33:18 -0400474 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -0500475 for (i = 0; i < count; i++) {
476 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
477 ext4_fsblk_t blocknr;
Akinobu Mita5661bd62010-03-03 23:53:39 -0500478
479 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
Theodore Ts'o53accfa2011-09-09 18:48:51 -0400480 blocknr += EXT4_C2B(EXT4_SB(sb), first + i);
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -0500481 ext4_grp_locked_error(sb, e4b->bd_group,
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400482 inode ? inode->i_ino : 0,
483 blocknr,
484 "freeing block already freed "
485 "(bit %u)",
486 first + i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500487 }
488 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
489 }
490}
491
492static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
493{
494 int i;
495
496 if (unlikely(e4b->bd_info->bb_bitmap == NULL))
497 return;
Vincent Minetbc8e6742009-05-15 08:33:18 -0400498 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -0500499 for (i = 0; i < count; i++) {
500 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
501 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
502 }
503}
504
505static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
506{
507 if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
508 unsigned char *b1, *b2;
509 int i;
510 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
511 b2 = (unsigned char *) bitmap;
512 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
513 if (b1[i] != b2[i]) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -0400514 ext4_msg(e4b->bd_sb, KERN_ERR,
515 "corruption in group %u "
516 "at byte %u(%u): %x in copy != %x "
517 "on disk/prealloc",
518 e4b->bd_group, i, i * 8, b1[i], b2[i]);
Alex Tomasc9de5602008-01-29 00:19:52 -0500519 BUG();
520 }
521 }
522 }
523}
524
525#else
526static inline void mb_free_blocks_double(struct inode *inode,
527 struct ext4_buddy *e4b, int first, int count)
528{
529 return;
530}
531static inline void mb_mark_used_double(struct ext4_buddy *e4b,
532 int first, int count)
533{
534 return;
535}
536static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
537{
538 return;
539}
540#endif
541
542#ifdef AGGRESSIVE_CHECK
543
544#define MB_CHECK_ASSERT(assert) \
545do { \
546 if (!(assert)) { \
547 printk(KERN_EMERG \
548 "Assertion failure in %s() at %s:%d: \"%s\"\n", \
549 function, file, line, # assert); \
550 BUG(); \
551 } \
552} while (0)
553
554static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
555 const char *function, int line)
556{
557 struct super_block *sb = e4b->bd_sb;
558 int order = e4b->bd_blkbits + 1;
559 int max;
560 int max2;
561 int i;
562 int j;
563 int k;
564 int count;
565 struct ext4_group_info *grp;
566 int fragments = 0;
567 int fstart;
568 struct list_head *cur;
569 void *buddy;
570 void *buddy2;
571
Alex Tomasc9de5602008-01-29 00:19:52 -0500572 {
573 static int mb_check_counter;
574 if (mb_check_counter++ % 100 != 0)
575 return 0;
576 }
577
578 while (order > 1) {
579 buddy = mb_find_buddy(e4b, order, &max);
580 MB_CHECK_ASSERT(buddy);
581 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
582 MB_CHECK_ASSERT(buddy2);
583 MB_CHECK_ASSERT(buddy != buddy2);
584 MB_CHECK_ASSERT(max * 2 == max2);
585
586 count = 0;
587 for (i = 0; i < max; i++) {
588
589 if (mb_test_bit(i, buddy)) {
590 /* only single bit in buddy2 may be 1 */
591 if (!mb_test_bit(i << 1, buddy2)) {
592 MB_CHECK_ASSERT(
593 mb_test_bit((i<<1)+1, buddy2));
594 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
595 MB_CHECK_ASSERT(
596 mb_test_bit(i << 1, buddy2));
597 }
598 continue;
599 }
600
Robin Dong0a10da72011-10-26 08:48:54 -0400601 /* both bits in buddy2 must be 1 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500602 MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
603 MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
604
605 for (j = 0; j < (1 << order); j++) {
606 k = (i * (1 << order)) + j;
607 MB_CHECK_ASSERT(
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -0500608 !mb_test_bit(k, e4b->bd_bitmap));
Alex Tomasc9de5602008-01-29 00:19:52 -0500609 }
610 count++;
611 }
612 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
613 order--;
614 }
615
616 fstart = -1;
617 buddy = mb_find_buddy(e4b, 0, &max);
618 for (i = 0; i < max; i++) {
619 if (!mb_test_bit(i, buddy)) {
620 MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
621 if (fstart == -1) {
622 fragments++;
623 fstart = i;
624 }
625 continue;
626 }
627 fstart = -1;
628 /* check used bits only */
629 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
630 buddy2 = mb_find_buddy(e4b, j, &max2);
631 k = i >> j;
632 MB_CHECK_ASSERT(k < max2);
633 MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
634 }
635 }
636 MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
637 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
638
639 grp = ext4_get_group_info(sb, e4b->bd_group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500640 list_for_each(cur, &grp->bb_prealloc_list) {
641 ext4_group_t groupnr;
642 struct ext4_prealloc_space *pa;
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -0400643 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
644 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
Alex Tomasc9de5602008-01-29 00:19:52 -0500645 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -0400646 for (i = 0; i < pa->pa_len; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -0500647 MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
648 }
649 return 0;
650}
651#undef MB_CHECK_ASSERT
652#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
Harvey Harrison46e665e2008-04-17 10:38:59 -0400653 __FILE__, __func__, __LINE__)
Alex Tomasc9de5602008-01-29 00:19:52 -0500654#else
655#define mb_check_buddy(e4b)
656#endif
657
Coly Li7c786052011-02-24 13:24:25 -0500658/*
659 * Divide blocks started from @first with length @len into
660 * smaller chunks with power of 2 blocks.
661 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
662 * then increase bb_counters[] for corresponded chunk size.
663 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500664static void ext4_mb_mark_free_simple(struct super_block *sb,
Eric Sandeena36b4492009-08-25 22:36:45 -0400665 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
Alex Tomasc9de5602008-01-29 00:19:52 -0500666 struct ext4_group_info *grp)
667{
668 struct ext4_sb_info *sbi = EXT4_SB(sb);
Eric Sandeena36b4492009-08-25 22:36:45 -0400669 ext4_grpblk_t min;
670 ext4_grpblk_t max;
671 ext4_grpblk_t chunk;
Chandan Rajendrac3881ab2016-11-14 21:04:37 -0500672 unsigned int border;
Alex Tomasc9de5602008-01-29 00:19:52 -0500673
Theodore Ts'o7137d7a2011-09-09 18:38:51 -0400674 BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
Alex Tomasc9de5602008-01-29 00:19:52 -0500675
676 border = 2 << sb->s_blocksize_bits;
677
678 while (len > 0) {
679 /* find how many blocks can be covered since this position */
680 max = ffs(first | border) - 1;
681
682 /* find how many blocks of power 2 we need to mark */
683 min = fls(len) - 1;
684
685 if (max < min)
686 min = max;
687 chunk = 1 << min;
688
689 /* mark multiblock chunks only */
690 grp->bb_counters[min]++;
691 if (min > 0)
692 mb_clear_bit(first >> min,
693 buddy + sbi->s_mb_offsets[min]);
694
695 len -= chunk;
696 first += chunk;
697 }
698}
699
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400700/*
701 * Cache the order of the largest free extent we have available in this block
702 * group.
703 */
704static void
705mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
706{
707 int i;
708 int bits;
709
710 grp->bb_largest_free_order = -1; /* uninit */
711
712 bits = sb->s_blocksize_bits + 1;
713 for (i = bits; i >= 0; i--) {
714 if (grp->bb_counters[i] > 0) {
715 grp->bb_largest_free_order = i;
716 break;
717 }
718 }
719}
720
Eric Sandeen089ceec2009-07-05 22:17:31 -0400721static noinline_for_stack
722void ext4_mb_generate_buddy(struct super_block *sb,
Alex Tomasc9de5602008-01-29 00:19:52 -0500723 void *buddy, void *bitmap, ext4_group_t group)
724{
725 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
Namjae Jeone43bb4e2014-06-26 10:11:53 -0400726 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -0400727 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
Eric Sandeena36b4492009-08-25 22:36:45 -0400728 ext4_grpblk_t i = 0;
729 ext4_grpblk_t first;
730 ext4_grpblk_t len;
Alex Tomasc9de5602008-01-29 00:19:52 -0500731 unsigned free = 0;
732 unsigned fragments = 0;
733 unsigned long long period = get_cycles();
734
735 /* initialize buddy from bitmap which is aggregation
736 * of on-disk bitmap and preallocations */
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500737 i = mb_find_next_zero_bit(bitmap, max, 0);
Alex Tomasc9de5602008-01-29 00:19:52 -0500738 grp->bb_first_free = i;
739 while (i < max) {
740 fragments++;
741 first = i;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500742 i = mb_find_next_bit(bitmap, max, i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500743 len = i - first;
744 free += len;
745 if (len > 1)
746 ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
747 else
748 grp->bb_counters[0]++;
749 if (i < max)
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -0500750 i = mb_find_next_zero_bit(bitmap, max, i);
Alex Tomasc9de5602008-01-29 00:19:52 -0500751 }
752 grp->bb_fragments = fragments;
753
754 if (free != grp->bb_free) {
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400755 ext4_grp_locked_error(sb, group, 0, 0,
Theodore Ts'o94d4c062014-07-05 19:15:50 -0400756 "block bitmap and bg descriptor "
757 "inconsistent: %u vs %u free clusters",
Theodore Ts'oe29136f2010-06-29 12:54:28 -0400758 free, grp->bb_free);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -0500759 /*
Darrick J. Wong163a2032013-08-28 17:35:51 -0400760 * If we intend to continue, we consider group descriptor
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -0500761 * corrupt and update bb_free using bitmap value
762 */
Alex Tomasc9de5602008-01-29 00:19:52 -0500763 grp->bb_free = free;
Namjae Jeone43bb4e2014-06-26 10:11:53 -0400764 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
765 percpu_counter_sub(&sbi->s_freeclusters_counter,
766 grp->bb_free);
Darrick J. Wong163a2032013-08-28 17:35:51 -0400767 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
Alex Tomasc9de5602008-01-29 00:19:52 -0500768 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400769 mb_set_largest_free_order(sb, grp);
Alex Tomasc9de5602008-01-29 00:19:52 -0500770
771 clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
772
773 period = get_cycles() - period;
774 spin_lock(&EXT4_SB(sb)->s_bal_lock);
775 EXT4_SB(sb)->s_mb_buddies_generated++;
776 EXT4_SB(sb)->s_mb_generation_time += period;
777 spin_unlock(&EXT4_SB(sb)->s_bal_lock);
778}
779
Andrey Sidoroveabe0442013-04-09 12:22:29 -0400780static void mb_regenerate_buddy(struct ext4_buddy *e4b)
781{
782 int count;
783 int order = 1;
784 void *buddy;
785
786 while ((buddy = mb_find_buddy(e4b, order++, &count))) {
787 ext4_set_bits(buddy, 0, count);
788 }
789 e4b->bd_info->bb_fragments = 0;
790 memset(e4b->bd_info->bb_counters, 0,
791 sizeof(*e4b->bd_info->bb_counters) *
792 (e4b->bd_sb->s_blocksize_bits + 2));
793
794 ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy,
795 e4b->bd_bitmap, e4b->bd_group);
796}
797
Alex Tomasc9de5602008-01-29 00:19:52 -0500798/* The buddy information is attached the buddy cache inode
799 * for convenience. The information regarding each group
800 * is loaded via ext4_mb_load_buddy. The information involve
801 * block bitmap and buddy information. The information are
802 * stored in the inode as
803 *
804 * { page }
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -0500805 * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]...
Alex Tomasc9de5602008-01-29 00:19:52 -0500806 *
807 *
808 * one block each for bitmap and buddy information.
809 * So for each group we take up 2 blocks. A page can
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300810 * contain blocks_per_page (PAGE_SIZE / blocksize) blocks.
Alex Tomasc9de5602008-01-29 00:19:52 -0500811 * So it can have information regarding groups_per_page which
812 * is blocks_per_page/2
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400813 *
814 * Locking note: This routine takes the block group lock of all groups
815 * for this page; do not hold this lock when calling this routine!
Alex Tomasc9de5602008-01-29 00:19:52 -0500816 */
817
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -0400818static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
Alex Tomasc9de5602008-01-29 00:19:52 -0500819{
Theodore Ts'o8df96752009-05-01 08:50:38 -0400820 ext4_group_t ngroups;
Alex Tomasc9de5602008-01-29 00:19:52 -0500821 int blocksize;
822 int blocks_per_page;
823 int groups_per_page;
824 int err = 0;
825 int i;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500826 ext4_group_t first_group, group;
Alex Tomasc9de5602008-01-29 00:19:52 -0500827 int first_block;
828 struct super_block *sb;
829 struct buffer_head *bhs;
Darrick J. Wongfa77dcf2012-04-29 18:35:10 -0400830 struct buffer_head **bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -0500831 struct inode *inode;
832 char *data;
833 char *bitmap;
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400834 struct ext4_group_info *grinfo;
Alex Tomasc9de5602008-01-29 00:19:52 -0500835
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400836 mb_debug(1, "init page %lu\n", page->index);
Alex Tomasc9de5602008-01-29 00:19:52 -0500837
838 inode = page->mapping->host;
839 sb = inode->i_sb;
Theodore Ts'o8df96752009-05-01 08:50:38 -0400840 ngroups = ext4_get_groups_count(sb);
Fabian Frederick61604a22017-02-27 14:28:32 -0800841 blocksize = i_blocksize(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300842 blocks_per_page = PAGE_SIZE / blocksize;
Alex Tomasc9de5602008-01-29 00:19:52 -0500843
844 groups_per_page = blocks_per_page >> 1;
845 if (groups_per_page == 0)
846 groups_per_page = 1;
847
848 /* allocate buffer_heads to read bitmaps */
849 if (groups_per_page > 1) {
Alex Tomasc9de5602008-01-29 00:19:52 -0500850 i = sizeof(struct buffer_head *) * groups_per_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -0400851 bh = kzalloc(i, gfp);
Theodore Ts'o813e5722012-02-20 17:52:46 -0500852 if (bh == NULL) {
853 err = -ENOMEM;
Alex Tomasc9de5602008-01-29 00:19:52 -0500854 goto out;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500855 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500856 } else
857 bh = &bhs;
858
859 first_group = page->index * blocks_per_page / 2;
860
861 /* read all groups the page covers into the cache */
Theodore Ts'o813e5722012-02-20 17:52:46 -0500862 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
863 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -0500864 break;
865
Theodore Ts'o813e5722012-02-20 17:52:46 -0500866 grinfo = ext4_get_group_info(sb, group);
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400867 /*
868 * If page is uptodate then we came here after online resize
869 * which added some new uninitialized group info structs, so
870 * we must skip all initialized uptodate buddies on the page,
871 * which may be currently in use by an allocating task.
872 */
873 if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
874 bh[i] = NULL;
875 continue;
876 }
Darrick J. Wong9008a582015-10-17 21:33:24 -0400877 bh[i] = ext4_read_block_bitmap_nowait(sb, group);
878 if (IS_ERR(bh[i])) {
879 err = PTR_ERR(bh[i]);
880 bh[i] = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -0500881 goto out;
Aneesh Kumar K.V2ccb5fb2009-01-05 21:49:55 -0500882 }
Theodore Ts'o813e5722012-02-20 17:52:46 -0500883 mb_debug(1, "read bitmap for group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500884 }
885
886 /* wait for I/O completion */
Theodore Ts'o813e5722012-02-20 17:52:46 -0500887 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
Darrick J. Wong9008a582015-10-17 21:33:24 -0400888 int err2;
889
890 if (!bh[i])
891 continue;
892 err2 = ext4_wait_block_bitmap(sb, group, bh[i]);
893 if (!err)
894 err = err2;
Theodore Ts'o813e5722012-02-20 17:52:46 -0500895 }
Alex Tomasc9de5602008-01-29 00:19:52 -0500896
897 first_block = page->index * blocks_per_page;
898 for (i = 0; i < blocks_per_page; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -0500899 group = (first_block + i) >> 1;
Theodore Ts'o8df96752009-05-01 08:50:38 -0400900 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -0500901 break;
902
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400903 if (!bh[group - first_group])
904 /* skip initialized uptodate buddy */
905 continue;
906
Lukas Czernerbbdc3222015-06-08 11:38:37 -0400907 if (!buffer_verified(bh[group - first_group]))
908 /* Skip faulty bitmaps */
909 continue;
910 err = 0;
911
Alex Tomasc9de5602008-01-29 00:19:52 -0500912 /*
913 * data carry information regarding this
914 * particular group in the format specified
915 * above
916 *
917 */
918 data = page_address(page) + (i * blocksize);
919 bitmap = bh[group - first_group]->b_data;
920
921 /*
922 * We place the buddy block and bitmap block
923 * close together
924 */
925 if ((first_block + i) & 1) {
926 /* this is block of buddy */
927 BUG_ON(incore == NULL);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400928 mb_debug(1, "put buddy for group %u in page %lu/%x\n",
Alex Tomasc9de5602008-01-29 00:19:52 -0500929 group, page->index, i * blocksize);
Theodore Ts'of3073332010-05-17 03:00:00 -0400930 trace_ext4_mb_buddy_bitmap_load(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500931 grinfo = ext4_get_group_info(sb, group);
932 grinfo->bb_fragments = 0;
933 memset(grinfo->bb_counters, 0,
Eric Sandeen19278052009-08-25 22:36:25 -0400934 sizeof(*grinfo->bb_counters) *
935 (sb->s_blocksize_bits+2));
Alex Tomasc9de5602008-01-29 00:19:52 -0500936 /*
937 * incore got set to the group block bitmap below
938 */
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500939 ext4_lock_group(sb, group);
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400940 /* init the buddy */
941 memset(data, 0xff, blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -0500942 ext4_mb_generate_buddy(sb, data, incore, group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500943 ext4_unlock_group(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500944 incore = NULL;
945 } else {
946 /* this is block of bitmap */
947 BUG_ON(incore != NULL);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -0400948 mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
Alex Tomasc9de5602008-01-29 00:19:52 -0500949 group, page->index, i * blocksize);
Theodore Ts'of3073332010-05-17 03:00:00 -0400950 trace_ext4_mb_bitmap_load(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500951
952 /* see comments in ext4_mb_put_pa() */
953 ext4_lock_group(sb, group);
954 memcpy(data, bitmap, blocksize);
955
956 /* mark all preallocated blks used in in-core bitmap */
957 ext4_mb_generate_from_pa(sb, data, group);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -0500958 ext4_mb_generate_from_freelist(sb, data, group);
Alex Tomasc9de5602008-01-29 00:19:52 -0500959 ext4_unlock_group(sb, group);
960
961 /* set incore so that the buddy information can be
962 * generated using this
963 */
964 incore = data;
965 }
966 }
967 SetPageUptodate(page);
968
969out:
970 if (bh) {
Amir Goldstein9b8b7d32011-05-09 21:49:42 -0400971 for (i = 0; i < groups_per_page; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -0500972 brelse(bh[i]);
973 if (bh != &bhs)
974 kfree(bh);
975 }
976 return err;
977}
978
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -0400979/*
Amir Goldstein2de88072011-05-09 21:48:13 -0400980 * Lock the buddy and bitmap pages. This make sure other parallel init_group
981 * on the same buddy page doesn't happen whild holding the buddy page lock.
982 * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
983 * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400984 */
Amir Goldstein2de88072011-05-09 21:48:13 -0400985static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -0400986 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400987{
Amir Goldstein2de88072011-05-09 21:48:13 -0400988 struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
989 int block, pnum, poff;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400990 int blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -0400991 struct page *page;
992
993 e4b->bd_buddy_page = NULL;
994 e4b->bd_bitmap_page = NULL;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400995
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300996 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
Eric Sandeeneee4adc2010-10-27 21:30:15 -0400997 /*
998 * the buddy cache inode stores the block bitmap
999 * and buddy information in consecutive blocks.
1000 * So for each group we need two blocks.
1001 */
1002 block = group * 2;
1003 pnum = block / blocks_per_page;
Amir Goldstein2de88072011-05-09 21:48:13 -04001004 poff = block % blocks_per_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001005 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001006 if (!page)
Younger Liuc57ab392014-04-10 23:03:43 -04001007 return -ENOMEM;
Amir Goldstein2de88072011-05-09 21:48:13 -04001008 BUG_ON(page->mapping != inode->i_mapping);
1009 e4b->bd_bitmap_page = page;
1010 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001011
Amir Goldstein2de88072011-05-09 21:48:13 -04001012 if (blocks_per_page >= 2) {
1013 /* buddy and bitmap are on the same page */
1014 return 0;
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001015 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001016
1017 block++;
1018 pnum = block / blocks_per_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001019 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001020 if (!page)
Younger Liuc57ab392014-04-10 23:03:43 -04001021 return -ENOMEM;
Amir Goldstein2de88072011-05-09 21:48:13 -04001022 BUG_ON(page->mapping != inode->i_mapping);
1023 e4b->bd_buddy_page = page;
1024 return 0;
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001025}
1026
Amir Goldstein2de88072011-05-09 21:48:13 -04001027static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001028{
Amir Goldstein2de88072011-05-09 21:48:13 -04001029 if (e4b->bd_bitmap_page) {
1030 unlock_page(e4b->bd_bitmap_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001031 put_page(e4b->bd_bitmap_page);
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001032 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001033 if (e4b->bd_buddy_page) {
1034 unlock_page(e4b->bd_buddy_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001035 put_page(e4b->bd_buddy_page);
Amir Goldstein2de88072011-05-09 21:48:13 -04001036 }
Eric Sandeeneee4adc2010-10-27 21:30:15 -04001037}
1038
1039/*
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001040 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1041 * block group lock of all groups for this page; do not hold the BG lock when
1042 * calling this routine!
1043 */
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001044static noinline_for_stack
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001045int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001046{
1047
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001048 struct ext4_group_info *this_grp;
Amir Goldstein2de88072011-05-09 21:48:13 -04001049 struct ext4_buddy e4b;
1050 struct page *page;
1051 int ret = 0;
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001052
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04001053 might_sleep();
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001054 mb_debug(1, "init group %u\n", group);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001055 this_grp = ext4_get_group_info(sb, group);
1056 /*
Aneesh Kumar K.V08c3a812009-09-09 23:50:17 -04001057 * This ensures that we don't reinit the buddy cache
1058 * page which map to the group from which we are already
1059 * allocating. If we are looking at the buddy cache we would
1060 * have taken a reference using ext4_mb_load_buddy and that
Amir Goldstein2de88072011-05-09 21:48:13 -04001061 * would have pinned buddy page to page cache.
Mel Gorman2457aec2014-06-04 16:10:31 -07001062 * The call to ext4_mb_get_buddy_page_lock will mark the
1063 * page accessed.
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001064 */
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001065 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001066 if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001067 /*
1068 * somebody initialized the group
1069 * return without doing anything
1070 */
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001071 goto err;
1072 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001073
1074 page = e4b.bd_bitmap_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001075 ret = ext4_mb_init_cache(page, NULL, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001076 if (ret)
1077 goto err;
1078 if (!PageUptodate(page)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001079 ret = -EIO;
1080 goto err;
1081 }
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001082
Amir Goldstein2de88072011-05-09 21:48:13 -04001083 if (e4b.bd_buddy_page == NULL) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001084 /*
1085 * If both the bitmap and buddy are in
1086 * the same page we don't need to force
1087 * init the buddy
1088 */
Amir Goldstein2de88072011-05-09 21:48:13 -04001089 ret = 0;
1090 goto err;
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001091 }
Amir Goldstein2de88072011-05-09 21:48:13 -04001092 /* init buddy cache */
1093 page = e4b.bd_buddy_page;
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001094 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
Amir Goldstein2de88072011-05-09 21:48:13 -04001095 if (ret)
1096 goto err;
1097 if (!PageUptodate(page)) {
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001098 ret = -EIO;
1099 goto err;
1100 }
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001101err:
Amir Goldstein2de88072011-05-09 21:48:13 -04001102 ext4_mb_put_buddy_page_lock(&e4b);
Aneesh Kumar K.Vb6a758e2009-09-09 23:47:46 -04001103 return ret;
1104}
1105
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001106/*
1107 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
1108 * block group lock of all groups for this page; do not hold the BG lock when
1109 * calling this routine!
1110 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04001111static noinline_for_stack int
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001112ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
1113 struct ext4_buddy *e4b, gfp_t gfp)
Alex Tomasc9de5602008-01-29 00:19:52 -05001114{
Alex Tomasc9de5602008-01-29 00:19:52 -05001115 int blocks_per_page;
1116 int block;
1117 int pnum;
1118 int poff;
1119 struct page *page;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001120 int ret;
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001121 struct ext4_group_info *grp;
1122 struct ext4_sb_info *sbi = EXT4_SB(sb);
1123 struct inode *inode = sbi->s_buddy_cache;
Alex Tomasc9de5602008-01-29 00:19:52 -05001124
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04001125 might_sleep();
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04001126 mb_debug(1, "load group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001127
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001128 blocks_per_page = PAGE_SIZE / sb->s_blocksize;
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001129 grp = ext4_get_group_info(sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001130
1131 e4b->bd_blkbits = sb->s_blocksize_bits;
Tao Ma529da702011-07-23 16:07:26 -04001132 e4b->bd_info = grp;
Alex Tomasc9de5602008-01-29 00:19:52 -05001133 e4b->bd_sb = sb;
1134 e4b->bd_group = group;
1135 e4b->bd_buddy_page = NULL;
1136 e4b->bd_bitmap_page = NULL;
1137
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001138 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001139 /*
1140 * we need full data about the group
1141 * to make a good selection
1142 */
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001143 ret = ext4_mb_init_group(sb, group, gfp);
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001144 if (ret)
1145 return ret;
Aneesh Kumar K.Vf41c0752009-09-09 23:34:50 -04001146 }
1147
Alex Tomasc9de5602008-01-29 00:19:52 -05001148 /*
1149 * the buddy cache inode stores the block bitmap
1150 * and buddy information in consecutive blocks.
1151 * So for each group we need two blocks.
1152 */
1153 block = group * 2;
1154 pnum = block / blocks_per_page;
1155 poff = block % blocks_per_page;
1156
1157 /* we could use find_or_create_page(), but it locks page
1158 * what we'd like to avoid in fast path ... */
Mel Gorman2457aec2014-06-04 16:10:31 -07001159 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
Alex Tomasc9de5602008-01-29 00:19:52 -05001160 if (page == NULL || !PageUptodate(page)) {
1161 if (page)
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05001162 /*
1163 * drop the page reference and try
1164 * to get the page with lock. If we
1165 * are not uptodate that implies
1166 * somebody just created the page but
1167 * is yet to initialize the same. So
1168 * wait for it to initialize.
1169 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001170 put_page(page);
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001171 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Alex Tomasc9de5602008-01-29 00:19:52 -05001172 if (page) {
1173 BUG_ON(page->mapping != inode->i_mapping);
1174 if (!PageUptodate(page)) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001175 ret = ext4_mb_init_cache(page, NULL, gfp);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001176 if (ret) {
1177 unlock_page(page);
1178 goto err;
1179 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001180 mb_cmp_bitmaps(e4b, page_address(page) +
1181 (poff * sb->s_blocksize));
1182 }
1183 unlock_page(page);
1184 }
1185 }
Younger Liuc57ab392014-04-10 23:03:43 -04001186 if (page == NULL) {
1187 ret = -ENOMEM;
1188 goto err;
1189 }
1190 if (!PageUptodate(page)) {
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001191 ret = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05001192 goto err;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001193 }
Mel Gorman2457aec2014-06-04 16:10:31 -07001194
1195 /* Pages marked accessed already */
Alex Tomasc9de5602008-01-29 00:19:52 -05001196 e4b->bd_bitmap_page = page;
1197 e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -05001198
1199 block++;
1200 pnum = block / blocks_per_page;
1201 poff = block % blocks_per_page;
1202
Mel Gorman2457aec2014-06-04 16:10:31 -07001203 page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
Alex Tomasc9de5602008-01-29 00:19:52 -05001204 if (page == NULL || !PageUptodate(page)) {
1205 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001206 put_page(page);
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001207 page = find_or_create_page(inode->i_mapping, pnum, gfp);
Alex Tomasc9de5602008-01-29 00:19:52 -05001208 if (page) {
1209 BUG_ON(page->mapping != inode->i_mapping);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001210 if (!PageUptodate(page)) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001211 ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
1212 gfp);
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001213 if (ret) {
1214 unlock_page(page);
1215 goto err;
1216 }
1217 }
Alex Tomasc9de5602008-01-29 00:19:52 -05001218 unlock_page(page);
1219 }
1220 }
Younger Liuc57ab392014-04-10 23:03:43 -04001221 if (page == NULL) {
1222 ret = -ENOMEM;
1223 goto err;
1224 }
1225 if (!PageUptodate(page)) {
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001226 ret = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05001227 goto err;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001228 }
Mel Gorman2457aec2014-06-04 16:10:31 -07001229
1230 /* Pages marked accessed already */
Alex Tomasc9de5602008-01-29 00:19:52 -05001231 e4b->bd_buddy_page = page;
1232 e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
Alex Tomasc9de5602008-01-29 00:19:52 -05001233
1234 BUG_ON(e4b->bd_bitmap_page == NULL);
1235 BUG_ON(e4b->bd_buddy_page == NULL);
1236
1237 return 0;
1238
1239err:
Yang Ruirui26626f112011-04-16 19:17:48 -04001240 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001241 put_page(page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001242 if (e4b->bd_bitmap_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001243 put_page(e4b->bd_bitmap_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001244 if (e4b->bd_buddy_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001245 put_page(e4b->bd_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001246 e4b->bd_buddy = NULL;
1247 e4b->bd_bitmap = NULL;
Shen Fengfdf6c7a2008-07-11 19:27:31 -04001248 return ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05001249}
1250
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04001251static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
1252 struct ext4_buddy *e4b)
1253{
1254 return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS);
1255}
1256
Jing Zhange39e07f2010-05-14 00:00:00 -04001257static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
Alex Tomasc9de5602008-01-29 00:19:52 -05001258{
1259 if (e4b->bd_bitmap_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001260 put_page(e4b->bd_bitmap_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001261 if (e4b->bd_buddy_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001262 put_page(e4b->bd_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001263}
1264
1265
1266static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1267{
1268 int order = 1;
Nicolai Stangeb5cb3162016-05-05 17:38:03 -04001269 int bb_incr = 1 << (e4b->bd_blkbits - 1);
Alex Tomasc9de5602008-01-29 00:19:52 -05001270 void *bb;
1271
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001272 BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
Alex Tomasc9de5602008-01-29 00:19:52 -05001273 BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1274
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001275 bb = e4b->bd_buddy;
Alex Tomasc9de5602008-01-29 00:19:52 -05001276 while (order <= e4b->bd_blkbits + 1) {
1277 block = block >> 1;
1278 if (!mb_test_bit(block, bb)) {
1279 /* this block is part of buddy of order 'order' */
1280 return order;
1281 }
Nicolai Stangeb5cb3162016-05-05 17:38:03 -04001282 bb += bb_incr;
1283 bb_incr >>= 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001284 order++;
1285 }
1286 return 0;
1287}
1288
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001289static void mb_clear_bits(void *bm, int cur, int len)
Alex Tomasc9de5602008-01-29 00:19:52 -05001290{
1291 __u32 *addr;
1292
1293 len = cur + len;
1294 while (cur < len) {
1295 if ((cur & 31) == 0 && (len - cur) >= 32) {
1296 /* fast path: clear whole word at once */
1297 addr = bm + (cur >> 3);
1298 *addr = 0;
1299 cur += 32;
1300 continue;
1301 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001302 mb_clear_bit(cur, bm);
Alex Tomasc9de5602008-01-29 00:19:52 -05001303 cur++;
1304 }
1305}
1306
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001307/* clear bits in given range
1308 * will return first found zero bit if any, -1 otherwise
1309 */
1310static int mb_test_and_clear_bits(void *bm, int cur, int len)
1311{
1312 __u32 *addr;
1313 int zero_bit = -1;
1314
1315 len = cur + len;
1316 while (cur < len) {
1317 if ((cur & 31) == 0 && (len - cur) >= 32) {
1318 /* fast path: clear whole word at once */
1319 addr = bm + (cur >> 3);
1320 if (*addr != (__u32)(-1) && zero_bit == -1)
1321 zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0);
1322 *addr = 0;
1323 cur += 32;
1324 continue;
1325 }
1326 if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1)
1327 zero_bit = cur;
1328 cur++;
1329 }
1330
1331 return zero_bit;
1332}
1333
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04001334void ext4_set_bits(void *bm, int cur, int len)
Alex Tomasc9de5602008-01-29 00:19:52 -05001335{
1336 __u32 *addr;
1337
1338 len = cur + len;
1339 while (cur < len) {
1340 if ((cur & 31) == 0 && (len - cur) >= 32) {
1341 /* fast path: set whole word at once */
1342 addr = bm + (cur >> 3);
1343 *addr = 0xffffffff;
1344 cur += 32;
1345 continue;
1346 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04001347 mb_set_bit(cur, bm);
Alex Tomasc9de5602008-01-29 00:19:52 -05001348 cur++;
1349 }
1350}
1351
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001352/*
1353 * _________________________________________________________________ */
1354
1355static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side)
Alex Tomasc9de5602008-01-29 00:19:52 -05001356{
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001357 if (mb_test_bit(*bit + side, bitmap)) {
1358 mb_clear_bit(*bit, bitmap);
1359 (*bit) -= side;
1360 return 1;
1361 }
1362 else {
1363 (*bit) += side;
1364 mb_set_bit(*bit, bitmap);
1365 return -1;
1366 }
1367}
1368
1369static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last)
1370{
1371 int max;
1372 int order = 1;
1373 void *buddy = mb_find_buddy(e4b, order, &max);
1374
1375 while (buddy) {
1376 void *buddy2;
1377
1378 /* Bits in range [first; last] are known to be set since
1379 * corresponding blocks were allocated. Bits in range
1380 * (first; last) will stay set because they form buddies on
1381 * upper layer. We just deal with borders if they don't
1382 * align with upper layer and then go up.
1383 * Releasing entire group is all about clearing
1384 * single bit of highest order buddy.
1385 */
1386
1387 /* Example:
1388 * ---------------------------------
1389 * | 1 | 1 | 1 | 1 |
1390 * ---------------------------------
1391 * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 |
1392 * ---------------------------------
1393 * 0 1 2 3 4 5 6 7
1394 * \_____________________/
1395 *
1396 * Neither [1] nor [6] is aligned to above layer.
1397 * Left neighbour [0] is free, so mark it busy,
1398 * decrease bb_counters and extend range to
1399 * [0; 6]
1400 * Right neighbour [7] is busy. It can't be coaleasced with [6], so
1401 * mark [6] free, increase bb_counters and shrink range to
1402 * [0; 5].
1403 * Then shift range to [0; 2], go up and do the same.
1404 */
1405
1406
1407 if (first & 1)
1408 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1);
1409 if (!(last & 1))
1410 e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1);
1411 if (first > last)
1412 break;
1413 order++;
1414
1415 if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) {
1416 mb_clear_bits(buddy, first, last - first + 1);
1417 e4b->bd_info->bb_counters[order - 1] += last - first + 1;
1418 break;
1419 }
1420 first >>= 1;
1421 last >>= 1;
1422 buddy = buddy2;
1423 }
1424}
1425
1426static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1427 int first, int count)
1428{
1429 int left_is_free = 0;
1430 int right_is_free = 0;
1431 int block;
1432 int last = first + count - 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001433 struct super_block *sb = e4b->bd_sb;
1434
Theodore Ts'oc99d1e62014-08-23 17:47:28 -04001435 if (WARN_ON(count == 0))
1436 return;
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001437 BUG_ON(last >= (sb->s_blocksize << 3));
Vincent Minetbc8e6742009-05-15 08:33:18 -04001438 assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
Darrick J. Wong163a2032013-08-28 17:35:51 -04001439 /* Don't bother if the block group is corrupt. */
1440 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
1441 return;
1442
Alex Tomasc9de5602008-01-29 00:19:52 -05001443 mb_check_buddy(e4b);
1444 mb_free_blocks_double(inode, e4b, first, count);
1445
1446 e4b->bd_info->bb_free += count;
1447 if (first < e4b->bd_info->bb_first_free)
1448 e4b->bd_info->bb_first_free = first;
1449
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001450 /* access memory sequentially: check left neighbour,
1451 * clear range and then check right neighbour
1452 */
Alex Tomasc9de5602008-01-29 00:19:52 -05001453 if (first != 0)
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001454 left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap);
1455 block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count);
1456 if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0])
1457 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1458
1459 if (unlikely(block != -1)) {
Namjae Jeone43bb4e2014-06-26 10:11:53 -04001460 struct ext4_sb_info *sbi = EXT4_SB(sb);
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001461 ext4_fsblk_t blocknr;
1462
1463 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
1464 blocknr += EXT4_C2B(EXT4_SB(sb), block);
1465 ext4_grp_locked_error(sb, e4b->bd_group,
1466 inode ? inode->i_ino : 0,
1467 blocknr,
1468 "freeing already freed block "
Darrick J. Wong163a2032013-08-28 17:35:51 -04001469 "(bit %u); block bitmap corrupt.",
1470 block);
Namjae Jeone43bb4e2014-06-26 10:11:53 -04001471 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1472 percpu_counter_sub(&sbi->s_freeclusters_counter,
1473 e4b->bd_info->bb_free);
Darrick J. Wong163a2032013-08-28 17:35:51 -04001474 /* Mark the block group as corrupt. */
1475 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1476 &e4b->bd_info->bb_state);
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001477 mb_regenerate_buddy(e4b);
1478 goto done;
1479 }
1480
1481 /* let's maintain fragments counter */
1482 if (left_is_free && right_is_free)
Alex Tomasc9de5602008-01-29 00:19:52 -05001483 e4b->bd_info->bb_fragments--;
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001484 else if (!left_is_free && !right_is_free)
Alex Tomasc9de5602008-01-29 00:19:52 -05001485 e4b->bd_info->bb_fragments++;
1486
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001487 /* buddy[0] == bd_bitmap is a special case, so handle
1488 * it right away and let mb_buddy_mark_free stay free of
1489 * zero order checks.
1490 * Check if neighbours are to be coaleasced,
1491 * adjust bitmap bb_counters and borders appropriately.
1492 */
1493 if (first & 1) {
1494 first += !left_is_free;
1495 e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05001496 }
Andrey Sidoroveabe0442013-04-09 12:22:29 -04001497 if (!(last & 1)) {
1498 last -= !right_is_free;
1499 e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1;
1500 }
1501
1502 if (first <= last)
1503 mb_buddy_mark_free(e4b, first >> 1, last >> 1);
1504
1505done:
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001506 mb_set_largest_free_order(sb, e4b->bd_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05001507 mb_check_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001508}
1509
Robin Dong15c006a2012-08-17 10:02:17 -04001510static int mb_find_extent(struct ext4_buddy *e4b, int block,
Alex Tomasc9de5602008-01-29 00:19:52 -05001511 int needed, struct ext4_free_extent *ex)
1512{
1513 int next = block;
Robin Dong15c006a2012-08-17 10:02:17 -04001514 int max, order;
Alex Tomasc9de5602008-01-29 00:19:52 -05001515 void *buddy;
1516
Vincent Minetbc8e6742009-05-15 08:33:18 -04001517 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001518 BUG_ON(ex == NULL);
1519
Robin Dong15c006a2012-08-17 10:02:17 -04001520 buddy = mb_find_buddy(e4b, 0, &max);
Alex Tomasc9de5602008-01-29 00:19:52 -05001521 BUG_ON(buddy == NULL);
1522 BUG_ON(block >= max);
1523 if (mb_test_bit(block, buddy)) {
1524 ex->fe_len = 0;
1525 ex->fe_start = 0;
1526 ex->fe_group = 0;
1527 return 0;
1528 }
1529
Robin Dong15c006a2012-08-17 10:02:17 -04001530 /* find actual order */
1531 order = mb_find_order_for_block(e4b, block);
1532 block = block >> order;
Alex Tomasc9de5602008-01-29 00:19:52 -05001533
1534 ex->fe_len = 1 << order;
1535 ex->fe_start = block << order;
1536 ex->fe_group = e4b->bd_group;
1537
1538 /* calc difference from given start */
1539 next = next - ex->fe_start;
1540 ex->fe_len -= next;
1541 ex->fe_start += next;
1542
1543 while (needed > ex->fe_len &&
Alan Coxd8ec0c32012-11-08 12:19:58 -05001544 mb_find_buddy(e4b, order, &max)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001545
1546 if (block + 1 >= max)
1547 break;
1548
1549 next = (block + 1) * (1 << order);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001550 if (mb_test_bit(next, e4b->bd_bitmap))
Alex Tomasc9de5602008-01-29 00:19:52 -05001551 break;
1552
Robin Dongb051d8d2011-10-26 05:30:30 -04001553 order = mb_find_order_for_block(e4b, next);
Alex Tomasc9de5602008-01-29 00:19:52 -05001554
Alex Tomasc9de5602008-01-29 00:19:52 -05001555 block = next >> order;
1556 ex->fe_len += 1 << order;
1557 }
1558
1559 BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1560 return ex->fe_len;
1561}
1562
1563static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1564{
1565 int ord;
1566 int mlen = 0;
1567 int max = 0;
1568 int cur;
1569 int start = ex->fe_start;
1570 int len = ex->fe_len;
1571 unsigned ret = 0;
1572 int len0 = len;
1573 void *buddy;
1574
1575 BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1576 BUG_ON(e4b->bd_group != ex->fe_group);
Vincent Minetbc8e6742009-05-15 08:33:18 -04001577 assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group));
Alex Tomasc9de5602008-01-29 00:19:52 -05001578 mb_check_buddy(e4b);
1579 mb_mark_used_double(e4b, start, len);
1580
1581 e4b->bd_info->bb_free -= len;
1582 if (e4b->bd_info->bb_first_free == start)
1583 e4b->bd_info->bb_first_free += len;
1584
1585 /* let's maintain fragments counter */
1586 if (start != 0)
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001587 mlen = !mb_test_bit(start - 1, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001588 if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001589 max = !mb_test_bit(start + len, e4b->bd_bitmap);
Alex Tomasc9de5602008-01-29 00:19:52 -05001590 if (mlen && max)
1591 e4b->bd_info->bb_fragments++;
1592 else if (!mlen && !max)
1593 e4b->bd_info->bb_fragments--;
1594
1595 /* let's maintain buddy itself */
1596 while (len) {
1597 ord = mb_find_order_for_block(e4b, start);
1598
1599 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1600 /* the whole chunk may be allocated at once! */
1601 mlen = 1 << ord;
1602 buddy = mb_find_buddy(e4b, ord, &max);
1603 BUG_ON((start >> ord) >= max);
1604 mb_set_bit(start >> ord, buddy);
1605 e4b->bd_info->bb_counters[ord]--;
1606 start += mlen;
1607 len -= mlen;
1608 BUG_ON(len < 0);
1609 continue;
1610 }
1611
1612 /* store for history */
1613 if (ret == 0)
1614 ret = len | (ord << 16);
1615
1616 /* we have to split large buddy */
1617 BUG_ON(ord <= 0);
1618 buddy = mb_find_buddy(e4b, ord, &max);
1619 mb_set_bit(start >> ord, buddy);
1620 e4b->bd_info->bb_counters[ord]--;
1621
1622 ord--;
1623 cur = (start >> ord) & ~1U;
1624 buddy = mb_find_buddy(e4b, ord, &max);
1625 mb_clear_bit(cur, buddy);
1626 mb_clear_bit(cur + 1, buddy);
1627 e4b->bd_info->bb_counters[ord]++;
1628 e4b->bd_info->bb_counters[ord]++;
1629 }
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04001630 mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05001631
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001632 ext4_set_bits(e4b->bd_bitmap, ex->fe_start, len0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001633 mb_check_buddy(e4b);
1634
1635 return ret;
1636}
1637
1638/*
1639 * Must be called under group lock!
1640 */
1641static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1642 struct ext4_buddy *e4b)
1643{
1644 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1645 int ret;
1646
1647 BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1648 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1649
1650 ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1651 ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1652 ret = mb_mark_used(e4b, &ac->ac_b_ex);
1653
1654 /* preallocation can change ac_b_ex, thus we store actually
1655 * allocated blocks for history */
1656 ac->ac_f_ex = ac->ac_b_ex;
1657
1658 ac->ac_status = AC_STATUS_FOUND;
1659 ac->ac_tail = ret & 0xffff;
1660 ac->ac_buddy = ret >> 16;
1661
Aneesh Kumar K.Vc3a326a2008-11-25 15:11:52 -05001662 /*
1663 * take the page reference. We want the page to be pinned
1664 * so that we don't get a ext4_mb_init_cache_call for this
1665 * group until we update the bitmap. That would mean we
1666 * double allocate blocks. The reference is dropped
1667 * in ext4_mb_release_context
1668 */
Alex Tomasc9de5602008-01-29 00:19:52 -05001669 ac->ac_bitmap_page = e4b->bd_bitmap_page;
1670 get_page(ac->ac_bitmap_page);
1671 ac->ac_buddy_page = e4b->bd_buddy_page;
1672 get_page(ac->ac_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05001673 /* store last allocated for subsequent stream allocation */
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04001674 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
Alex Tomasc9de5602008-01-29 00:19:52 -05001675 spin_lock(&sbi->s_md_lock);
1676 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1677 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1678 spin_unlock(&sbi->s_md_lock);
1679 }
1680}
1681
1682/*
1683 * regular allocator, for general purposes allocation
1684 */
1685
1686static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1687 struct ext4_buddy *e4b,
1688 int finish_group)
1689{
1690 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1691 struct ext4_free_extent *bex = &ac->ac_b_ex;
1692 struct ext4_free_extent *gex = &ac->ac_g_ex;
1693 struct ext4_free_extent ex;
1694 int max;
1695
Aneesh Kumar K.V032115f2009-01-05 21:34:30 -05001696 if (ac->ac_status == AC_STATUS_FOUND)
1697 return;
Alex Tomasc9de5602008-01-29 00:19:52 -05001698 /*
1699 * We don't want to scan for a whole year
1700 */
1701 if (ac->ac_found > sbi->s_mb_max_to_scan &&
1702 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1703 ac->ac_status = AC_STATUS_BREAK;
1704 return;
1705 }
1706
1707 /*
1708 * Haven't found good chunk so far, let's continue
1709 */
1710 if (bex->fe_len < gex->fe_len)
1711 return;
1712
1713 if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1714 && bex->fe_group == e4b->bd_group) {
1715 /* recheck chunk's availability - we don't know
1716 * when it was found (within this lock-unlock
1717 * period or not) */
Robin Dong15c006a2012-08-17 10:02:17 -04001718 max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05001719 if (max >= gex->fe_len) {
1720 ext4_mb_use_best_found(ac, e4b);
1721 return;
1722 }
1723 }
1724}
1725
1726/*
1727 * The routine checks whether found extent is good enough. If it is,
1728 * then the extent gets marked used and flag is set to the context
1729 * to stop scanning. Otherwise, the extent is compared with the
1730 * previous found extent and if new one is better, then it's stored
1731 * in the context. Later, the best found extent will be used, if
1732 * mballoc can't find good enough extent.
1733 *
1734 * FIXME: real allocation policy is to be designed yet!
1735 */
1736static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1737 struct ext4_free_extent *ex,
1738 struct ext4_buddy *e4b)
1739{
1740 struct ext4_free_extent *bex = &ac->ac_b_ex;
1741 struct ext4_free_extent *gex = &ac->ac_g_ex;
1742
1743 BUG_ON(ex->fe_len <= 0);
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001744 BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
1745 BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05001746 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1747
1748 ac->ac_found++;
1749
1750 /*
1751 * The special case - take what you catch first
1752 */
1753 if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1754 *bex = *ex;
1755 ext4_mb_use_best_found(ac, e4b);
1756 return;
1757 }
1758
1759 /*
1760 * Let's check whether the chuck is good enough
1761 */
1762 if (ex->fe_len == gex->fe_len) {
1763 *bex = *ex;
1764 ext4_mb_use_best_found(ac, e4b);
1765 return;
1766 }
1767
1768 /*
1769 * If this is first found extent, just store it in the context
1770 */
1771 if (bex->fe_len == 0) {
1772 *bex = *ex;
1773 return;
1774 }
1775
1776 /*
1777 * If new found extent is better, store it in the context
1778 */
1779 if (bex->fe_len < gex->fe_len) {
1780 /* if the request isn't satisfied, any found extent
1781 * larger than previous best one is better */
1782 if (ex->fe_len > bex->fe_len)
1783 *bex = *ex;
1784 } else if (ex->fe_len > gex->fe_len) {
1785 /* if the request is satisfied, then we try to find
1786 * an extent that still satisfy the request, but is
1787 * smaller than previous one */
1788 if (ex->fe_len < bex->fe_len)
1789 *bex = *ex;
1790 }
1791
1792 ext4_mb_check_limits(ac, e4b, 0);
1793}
1794
Eric Sandeen089ceec2009-07-05 22:17:31 -04001795static noinline_for_stack
1796int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001797 struct ext4_buddy *e4b)
1798{
1799 struct ext4_free_extent ex = ac->ac_b_ex;
1800 ext4_group_t group = ex.fe_group;
1801 int max;
1802 int err;
1803
1804 BUG_ON(ex.fe_len <= 0);
1805 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1806 if (err)
1807 return err;
1808
1809 ext4_lock_group(ac->ac_sb, group);
Robin Dong15c006a2012-08-17 10:02:17 -04001810 max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05001811
1812 if (max > 0) {
1813 ac->ac_b_ex = ex;
1814 ext4_mb_use_best_found(ac, e4b);
1815 }
1816
1817 ext4_unlock_group(ac->ac_sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04001818 ext4_mb_unload_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001819
1820 return 0;
1821}
1822
Eric Sandeen089ceec2009-07-05 22:17:31 -04001823static noinline_for_stack
1824int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001825 struct ext4_buddy *e4b)
1826{
1827 ext4_group_t group = ac->ac_g_ex.fe_group;
1828 int max;
1829 int err;
1830 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Yongqiang Yang838cd0c2012-09-23 23:10:51 -04001831 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05001832 struct ext4_free_extent ex;
1833
1834 if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1835 return 0;
Yongqiang Yang838cd0c2012-09-23 23:10:51 -04001836 if (grp->bb_free == 0)
1837 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05001838
1839 err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1840 if (err)
1841 return err;
1842
Darrick J. Wong163a2032013-08-28 17:35:51 -04001843 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
1844 ext4_mb_unload_buddy(e4b);
1845 return 0;
1846 }
1847
Alex Tomasc9de5602008-01-29 00:19:52 -05001848 ext4_lock_group(ac->ac_sb, group);
Robin Dong15c006a2012-08-17 10:02:17 -04001849 max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
Alex Tomasc9de5602008-01-29 00:19:52 -05001850 ac->ac_g_ex.fe_len, &ex);
Theodore Ts'oab0c00f2014-02-20 00:36:41 -05001851 ex.fe_logical = 0xDEADFA11; /* debug value */
Alex Tomasc9de5602008-01-29 00:19:52 -05001852
1853 if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1854 ext4_fsblk_t start;
1855
Akinobu Mita5661bd62010-03-03 23:53:39 -05001856 start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
1857 ex.fe_start;
Alex Tomasc9de5602008-01-29 00:19:52 -05001858 /* use do_div to get remainder (would be 64-bit modulo) */
1859 if (do_div(start, sbi->s_stripe) == 0) {
1860 ac->ac_found++;
1861 ac->ac_b_ex = ex;
1862 ext4_mb_use_best_found(ac, e4b);
1863 }
1864 } else if (max >= ac->ac_g_ex.fe_len) {
1865 BUG_ON(ex.fe_len <= 0);
1866 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1867 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1868 ac->ac_found++;
1869 ac->ac_b_ex = ex;
1870 ext4_mb_use_best_found(ac, e4b);
1871 } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1872 /* Sometimes, caller may want to merge even small
1873 * number of blocks to an existing extent */
1874 BUG_ON(ex.fe_len <= 0);
1875 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1876 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1877 ac->ac_found++;
1878 ac->ac_b_ex = ex;
1879 ext4_mb_use_best_found(ac, e4b);
1880 }
1881 ext4_unlock_group(ac->ac_sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04001882 ext4_mb_unload_buddy(e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05001883
1884 return 0;
1885}
1886
1887/*
1888 * The routine scans buddy structures (not bitmap!) from given order
1889 * to max order and tries to find big enough chunk to satisfy the req
1890 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001891static noinline_for_stack
1892void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001893 struct ext4_buddy *e4b)
1894{
1895 struct super_block *sb = ac->ac_sb;
1896 struct ext4_group_info *grp = e4b->bd_info;
1897 void *buddy;
1898 int i;
1899 int k;
1900 int max;
1901
1902 BUG_ON(ac->ac_2order <= 0);
1903 for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1904 if (grp->bb_counters[i] == 0)
1905 continue;
1906
1907 buddy = mb_find_buddy(e4b, i, &max);
1908 BUG_ON(buddy == NULL);
1909
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05001910 k = mb_find_next_zero_bit(buddy, max, 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05001911 BUG_ON(k >= max);
1912
1913 ac->ac_found++;
1914
1915 ac->ac_b_ex.fe_len = 1 << i;
1916 ac->ac_b_ex.fe_start = k << i;
1917 ac->ac_b_ex.fe_group = e4b->bd_group;
1918
1919 ext4_mb_use_best_found(ac, e4b);
1920
1921 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1922
1923 if (EXT4_SB(sb)->s_mb_stats)
1924 atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1925
1926 break;
1927 }
1928}
1929
1930/*
1931 * The routine scans the group and measures all found extents.
1932 * In order to optimize scanning, caller must pass number of
1933 * free blocks in the group, so the routine can know upper limit.
1934 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001935static noinline_for_stack
1936void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001937 struct ext4_buddy *e4b)
1938{
1939 struct super_block *sb = ac->ac_sb;
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05001940 void *bitmap = e4b->bd_bitmap;
Alex Tomasc9de5602008-01-29 00:19:52 -05001941 struct ext4_free_extent ex;
1942 int i;
1943 int free;
1944
1945 free = e4b->bd_info->bb_free;
1946 BUG_ON(free <= 0);
1947
1948 i = e4b->bd_info->bb_first_free;
1949
1950 while (free && ac->ac_status == AC_STATUS_CONTINUE) {
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05001951 i = mb_find_next_zero_bit(bitmap,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04001952 EXT4_CLUSTERS_PER_GROUP(sb), i);
1953 if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) {
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001954 /*
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05001955 * IF we have corrupt bitmap, we won't find any
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001956 * free blocks even though group info says we
1957 * we have free blocks
1958 */
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001959 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001960 "%d free clusters as per "
Theodore Ts'ofde4d952009-01-05 22:17:35 -05001961 "group info. But bitmap says 0",
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001962 free);
Alex Tomasc9de5602008-01-29 00:19:52 -05001963 break;
1964 }
1965
Robin Dong15c006a2012-08-17 10:02:17 -04001966 mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05001967 BUG_ON(ex.fe_len <= 0);
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001968 if (free < ex.fe_len) {
Theodore Ts'oe29136f2010-06-29 12:54:28 -04001969 ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04001970 "%d free clusters as per "
Theodore Ts'ofde4d952009-01-05 22:17:35 -05001971 "group info. But got %d blocks",
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001972 free, ex.fe_len);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05001973 /*
1974 * The number of free blocks differs. This mostly
1975 * indicate that the bitmap is corrupt. So exit
1976 * without claiming the space.
1977 */
1978 break;
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05001979 }
Theodore Ts'oab0c00f2014-02-20 00:36:41 -05001980 ex.fe_logical = 0xDEADC0DE; /* debug value */
Alex Tomasc9de5602008-01-29 00:19:52 -05001981 ext4_mb_measure_extent(ac, &ex, e4b);
1982
1983 i += ex.fe_len;
1984 free -= ex.fe_len;
1985 }
1986
1987 ext4_mb_check_limits(ac, e4b, 1);
1988}
1989
1990/*
1991 * This is a special case for storages like raid5
Eric Sandeen506bf2d2010-07-27 11:56:06 -04001992 * we try to find stripe-aligned chunks for stripe-size-multiple requests
Alex Tomasc9de5602008-01-29 00:19:52 -05001993 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04001994static noinline_for_stack
1995void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05001996 struct ext4_buddy *e4b)
1997{
1998 struct super_block *sb = ac->ac_sb;
1999 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'oc5e8f3f2012-02-20 17:54:06 -05002000 void *bitmap = e4b->bd_bitmap;
Alex Tomasc9de5602008-01-29 00:19:52 -05002001 struct ext4_free_extent ex;
2002 ext4_fsblk_t first_group_block;
2003 ext4_fsblk_t a;
2004 ext4_grpblk_t i;
2005 int max;
2006
2007 BUG_ON(sbi->s_stripe == 0);
2008
2009 /* find first stripe-aligned block in group */
Akinobu Mita5661bd62010-03-03 23:53:39 -05002010 first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
2011
Alex Tomasc9de5602008-01-29 00:19:52 -05002012 a = first_group_block + sbi->s_stripe - 1;
2013 do_div(a, sbi->s_stripe);
2014 i = (a * sbi->s_stripe) - first_group_block;
2015
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04002016 while (i < EXT4_CLUSTERS_PER_GROUP(sb)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002017 if (!mb_test_bit(i, bitmap)) {
Robin Dong15c006a2012-08-17 10:02:17 -04002018 max = mb_find_extent(e4b, i, sbi->s_stripe, &ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05002019 if (max >= sbi->s_stripe) {
2020 ac->ac_found++;
Theodore Ts'oab0c00f2014-02-20 00:36:41 -05002021 ex.fe_logical = 0xDEADF00D; /* debug value */
Alex Tomasc9de5602008-01-29 00:19:52 -05002022 ac->ac_b_ex = ex;
2023 ext4_mb_use_best_found(ac, e4b);
2024 break;
2025 }
2026 }
2027 i += sbi->s_stripe;
2028 }
2029}
2030
Lukas Czerner42ac1842015-06-08 11:40:40 -04002031/*
2032 * This is now called BEFORE we load the buddy bitmap.
2033 * Returns either 1 or 0 indicating that the group is either suitable
2034 * for the allocation or not. In addition it can also return negative
2035 * error code when something goes wrong.
2036 */
Alex Tomasc9de5602008-01-29 00:19:52 -05002037static int ext4_mb_good_group(struct ext4_allocation_context *ac,
2038 ext4_group_t group, int cr)
2039{
2040 unsigned free, fragments;
Theodore Ts'oa4912122009-03-12 12:18:34 -04002041 int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05002042 struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
2043
2044 BUG_ON(cr < 0 || cr >= 4);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002045
Theodore Ts'o01fc48e2012-08-17 09:46:17 -04002046 free = grp->bb_free;
2047 if (free == 0)
2048 return 0;
2049 if (cr <= 2 && free < ac->ac_g_ex.fe_len)
2050 return 0;
2051
Darrick J. Wong163a2032013-08-28 17:35:51 -04002052 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
2053 return 0;
2054
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002055 /* We only do this if the grp has never been initialized */
2056 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04002057 int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002058 if (ret)
Lukas Czerner42ac1842015-06-08 11:40:40 -04002059 return ret;
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002060 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002061
Alex Tomasc9de5602008-01-29 00:19:52 -05002062 fragments = grp->bb_fragments;
Alex Tomasc9de5602008-01-29 00:19:52 -05002063 if (fragments == 0)
2064 return 0;
2065
2066 switch (cr) {
2067 case 0:
2068 BUG_ON(ac->ac_2order == 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05002069
Theodore Ts'oa4912122009-03-12 12:18:34 -04002070 /* Avoid using the first bg of a flexgroup for data files */
2071 if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
2072 (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
2073 ((group % flex_size) == 0))
2074 return 0;
2075
Theodore Ts'o40ae3482013-02-04 15:08:40 -05002076 if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
2077 (free / fragments) >= ac->ac_g_ex.fe_len)
2078 return 1;
2079
2080 if (grp->bb_largest_free_order < ac->ac_2order)
2081 return 0;
2082
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002083 return 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002084 case 1:
2085 if ((free / fragments) >= ac->ac_g_ex.fe_len)
2086 return 1;
2087 break;
2088 case 2:
2089 if (free >= ac->ac_g_ex.fe_len)
2090 return 1;
2091 break;
2092 case 3:
2093 return 1;
2094 default:
2095 BUG();
2096 }
2097
2098 return 0;
2099}
2100
Eric Sandeen4ddfef72008-04-29 08:11:12 -04002101static noinline_for_stack int
2102ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05002103{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002104 ext4_group_t ngroups, group, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05002105 int cr;
Lukas Czerner42ac1842015-06-08 11:40:40 -04002106 int err = 0, first_err = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002107 struct ext4_sb_info *sbi;
2108 struct super_block *sb;
2109 struct ext4_buddy e4b;
Alex Tomasc9de5602008-01-29 00:19:52 -05002110
2111 sb = ac->ac_sb;
2112 sbi = EXT4_SB(sb);
Theodore Ts'o8df96752009-05-01 08:50:38 -04002113 ngroups = ext4_get_groups_count(sb);
Eric Sandeenfb0a3872009-09-16 14:45:10 -04002114 /* non-extent files are limited to low blocks/groups */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04002115 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
Eric Sandeenfb0a3872009-09-16 14:45:10 -04002116 ngroups = sbi->s_blockfile_groups;
2117
Alex Tomasc9de5602008-01-29 00:19:52 -05002118 BUG_ON(ac->ac_status == AC_STATUS_FOUND);
2119
2120 /* first, try the goal */
2121 err = ext4_mb_find_by_goal(ac, &e4b);
2122 if (err || ac->ac_status == AC_STATUS_FOUND)
2123 goto out;
2124
2125 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2126 goto out;
2127
2128 /*
2129 * ac->ac2_order is set only if the fe_len is a power of 2
2130 * if ac2_order is set we also set criteria to 0 so that we
2131 * try exact allocation using buddy.
2132 */
2133 i = fls(ac->ac_g_ex.fe_len);
2134 ac->ac_2order = 0;
2135 /*
2136 * We search using buddy data only if the order of the request
2137 * is greater than equal to the sbi_s_mb_order2_reqs
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04002138 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
Jan Kara0e9deca2017-10-07 22:36:49 +00002139 * We also support searching for power-of-two requests only for
2140 * requests upto maximum buddy size we have constructed.
Alex Tomasc9de5602008-01-29 00:19:52 -05002141 */
Jan Kara0e9deca2017-10-07 22:36:49 +00002142 if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002143 /*
2144 * This should tell if fe_len is exactly power of 2
2145 */
2146 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
2147 ac->ac_2order = i - 1;
2148 }
2149
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04002150 /* if stream allocation is enabled, use global goal */
2151 if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002152 /* TBD: may be hot point */
2153 spin_lock(&sbi->s_md_lock);
2154 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
2155 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
2156 spin_unlock(&sbi->s_md_lock);
2157 }
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04002158
Alex Tomasc9de5602008-01-29 00:19:52 -05002159 /* Let's just scan groups to find more-less suitable blocks */
2160 cr = ac->ac_2order ? 0 : 1;
2161 /*
2162 * cr == 0 try to get exact allocation,
2163 * cr == 3 try to get anything
2164 */
2165repeat:
2166 for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
2167 ac->ac_criteria = cr;
Aneesh Kumar K.Ved8f9c72008-07-11 19:27:31 -04002168 /*
2169 * searching for the right group start
2170 * from the goal value specified
2171 */
2172 group = ac->ac_g_ex.fe_group;
2173
Theodore Ts'o8df96752009-05-01 08:50:38 -04002174 for (i = 0; i < ngroups; group++, i++) {
Lukas Czerner42ac1842015-06-08 11:40:40 -04002175 int ret = 0;
Theodore Ts'o2ed57242013-06-12 11:43:02 -04002176 cond_resched();
Lachlan McIlroye6155732013-05-05 23:10:00 -04002177 /*
2178 * Artificially restricted ngroups for non-extent
2179 * files makes group > ngroups possible on first loop.
2180 */
2181 if (group >= ngroups)
Alex Tomasc9de5602008-01-29 00:19:52 -05002182 group = 0;
2183
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002184 /* This now checks without needing the buddy page */
Lukas Czerner42ac1842015-06-08 11:40:40 -04002185 ret = ext4_mb_good_group(ac, group, cr);
2186 if (ret <= 0) {
2187 if (!first_err)
2188 first_err = ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002189 continue;
Lukas Czerner42ac1842015-06-08 11:40:40 -04002190 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002191
Alex Tomasc9de5602008-01-29 00:19:52 -05002192 err = ext4_mb_load_buddy(sb, group, &e4b);
2193 if (err)
2194 goto out;
2195
2196 ext4_lock_group(sb, group);
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002197
2198 /*
2199 * We need to check again after locking the
2200 * block group
2201 */
Lukas Czerner42ac1842015-06-08 11:40:40 -04002202 ret = ext4_mb_good_group(ac, group, cr);
2203 if (ret <= 0) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002204 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04002205 ext4_mb_unload_buddy(&e4b);
Lukas Czerner42ac1842015-06-08 11:40:40 -04002206 if (!first_err)
2207 first_err = ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002208 continue;
2209 }
2210
2211 ac->ac_groups_scanned++;
Jan Kara0e9deca2017-10-07 22:36:49 +00002212 if (cr == 0)
Alex Tomasc9de5602008-01-29 00:19:52 -05002213 ext4_mb_simple_scan_group(ac, &e4b);
Eric Sandeen506bf2d2010-07-27 11:56:06 -04002214 else if (cr == 1 && sbi->s_stripe &&
2215 !(ac->ac_g_ex.fe_len % sbi->s_stripe))
Alex Tomasc9de5602008-01-29 00:19:52 -05002216 ext4_mb_scan_aligned(ac, &e4b);
2217 else
2218 ext4_mb_complex_scan_group(ac, &e4b);
2219
2220 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04002221 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002222
2223 if (ac->ac_status != AC_STATUS_CONTINUE)
2224 break;
2225 }
2226 }
2227
2228 if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
2229 !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
2230 /*
2231 * We've been searching too long. Let's try to allocate
2232 * the best chunk we've found so far
2233 */
2234
2235 ext4_mb_try_best_found(ac, &e4b);
2236 if (ac->ac_status != AC_STATUS_FOUND) {
2237 /*
2238 * Someone more lucky has already allocated it.
2239 * The only thing we can do is just take first
2240 * found block(s)
2241 printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
2242 */
2243 ac->ac_b_ex.fe_group = 0;
2244 ac->ac_b_ex.fe_start = 0;
2245 ac->ac_b_ex.fe_len = 0;
2246 ac->ac_status = AC_STATUS_CONTINUE;
2247 ac->ac_flags |= EXT4_MB_HINT_FIRST;
2248 cr = 3;
2249 atomic_inc(&sbi->s_mb_lost_chunks);
2250 goto repeat;
2251 }
2252 }
2253out:
Lukas Czerner42ac1842015-06-08 11:40:40 -04002254 if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
2255 err = first_err;
Alex Tomasc9de5602008-01-29 00:19:52 -05002256 return err;
2257}
2258
Alex Tomasc9de5602008-01-29 00:19:52 -05002259static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2260{
2261 struct super_block *sb = seq->private;
Alex Tomasc9de5602008-01-29 00:19:52 -05002262 ext4_group_t group;
2263
Theodore Ts'o8df96752009-05-01 08:50:38 -04002264 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
Alex Tomasc9de5602008-01-29 00:19:52 -05002265 return NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002266 group = *pos + 1;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002267 return (void *) ((unsigned long) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002268}
2269
2270static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2271{
2272 struct super_block *sb = seq->private;
Alex Tomasc9de5602008-01-29 00:19:52 -05002273 ext4_group_t group;
2274
2275 ++*pos;
Theodore Ts'o8df96752009-05-01 08:50:38 -04002276 if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
Alex Tomasc9de5602008-01-29 00:19:52 -05002277 return NULL;
2278 group = *pos + 1;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002279 return (void *) ((unsigned long) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002280}
2281
2282static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2283{
2284 struct super_block *sb = seq->private;
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002285 ext4_group_t group = (ext4_group_t) ((unsigned long) v);
Alex Tomasc9de5602008-01-29 00:19:52 -05002286 int i;
Aditya Kali1c8457c2012-06-30 19:10:57 -04002287 int err, buddy_loaded = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002288 struct ext4_buddy e4b;
Aditya Kali1c8457c2012-06-30 19:10:57 -04002289 struct ext4_group_info *grinfo;
Alex Tomasc9de5602008-01-29 00:19:52 -05002290 struct sg {
2291 struct ext4_group_info info;
Chandan Rajendrab493c712016-11-14 21:26:26 -05002292 ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
Alex Tomasc9de5602008-01-29 00:19:52 -05002293 } sg;
2294
2295 group--;
2296 if (group == 0)
Rasmus Villemoes97b4af22015-06-15 00:32:58 -04002297 seq_puts(seq, "#group: free frags first ["
2298 " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
Huaitong Han802cf1f2016-02-12 00:17:16 -05002299 " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
Alex Tomasc9de5602008-01-29 00:19:52 -05002300
2301 i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2302 sizeof(struct ext4_group_info);
Aditya Kali1c8457c2012-06-30 19:10:57 -04002303 grinfo = ext4_get_group_info(sb, group);
2304 /* Load the group info in memory only if not already loaded. */
2305 if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
2306 err = ext4_mb_load_buddy(sb, group, &e4b);
2307 if (err) {
2308 seq_printf(seq, "#%-5u: I/O error\n", group);
2309 return 0;
2310 }
2311 buddy_loaded = 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002312 }
Aditya Kali1c8457c2012-06-30 19:10:57 -04002313
Alex Tomasc9de5602008-01-29 00:19:52 -05002314 memcpy(&sg, ext4_get_group_info(sb, group), i);
Aditya Kali1c8457c2012-06-30 19:10:57 -04002315
2316 if (buddy_loaded)
2317 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002318
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002319 seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
Alex Tomasc9de5602008-01-29 00:19:52 -05002320 sg.info.bb_fragments, sg.info.bb_first_free);
2321 for (i = 0; i <= 13; i++)
2322 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2323 sg.info.bb_counters[i] : 0);
2324 seq_printf(seq, " ]\n");
2325
2326 return 0;
2327}
2328
2329static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2330{
2331}
2332
Tobias Klauser7f1346a2009-09-05 09:28:54 -04002333static const struct seq_operations ext4_mb_seq_groups_ops = {
Alex Tomasc9de5602008-01-29 00:19:52 -05002334 .start = ext4_mb_seq_groups_start,
2335 .next = ext4_mb_seq_groups_next,
2336 .stop = ext4_mb_seq_groups_stop,
2337 .show = ext4_mb_seq_groups_show,
2338};
2339
2340static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2341{
Al Virod9dda782013-03-31 18:16:14 -04002342 struct super_block *sb = PDE_DATA(inode);
Alex Tomasc9de5602008-01-29 00:19:52 -05002343 int rc;
2344
2345 rc = seq_open(file, &ext4_mb_seq_groups_ops);
2346 if (rc == 0) {
Joe Perchesa271fe82010-07-27 11:56:04 -04002347 struct seq_file *m = file->private_data;
Alex Tomasc9de5602008-01-29 00:19:52 -05002348 m->private = sb;
2349 }
2350 return rc;
2351
2352}
2353
Theodore Ts'oebd173b2015-09-23 12:46:17 -04002354const struct file_operations ext4_seq_mb_groups_fops = {
Alex Tomasc9de5602008-01-29 00:19:52 -05002355 .open = ext4_mb_seq_groups_open,
2356 .read = seq_read,
2357 .llseek = seq_lseek,
2358 .release = seq_release,
2359};
2360
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002361static struct kmem_cache *get_groupinfo_cache(int blocksize_bits)
2362{
2363 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2364 struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index];
2365
2366 BUG_ON(!cachep);
2367 return cachep;
2368}
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002369
Theodore Ts'o28623c22012-09-05 01:31:50 -04002370/*
2371 * Allocate the top-level s_group_info array for the specified number
2372 * of groups
2373 */
2374int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2375{
2376 struct ext4_sb_info *sbi = EXT4_SB(sb);
2377 unsigned size;
2378 struct ext4_group_info ***new_groupinfo;
2379
2380 size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
2381 EXT4_DESC_PER_BLOCK_BITS(sb);
2382 if (size <= sbi->s_group_info_size)
2383 return 0;
2384
2385 size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size);
2386 new_groupinfo = ext4_kvzalloc(size, GFP_KERNEL);
2387 if (!new_groupinfo) {
2388 ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
2389 return -ENOMEM;
2390 }
2391 if (sbi->s_group_info) {
2392 memcpy(new_groupinfo, sbi->s_group_info,
2393 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
Al Virob93b41d2014-11-20 12:19:11 -05002394 kvfree(sbi->s_group_info);
Theodore Ts'o28623c22012-09-05 01:31:50 -04002395 }
2396 sbi->s_group_info = new_groupinfo;
2397 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
2398 ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
2399 sbi->s_group_info_size);
2400 return 0;
2401}
2402
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002403/* Create and initialize ext4_group_info data for the given group. */
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05002404int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002405 struct ext4_group_desc *desc)
2406{
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002407 int i;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002408 int metalen = 0;
2409 struct ext4_sb_info *sbi = EXT4_SB(sb);
2410 struct ext4_group_info **meta_group_info;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002411 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002412
2413 /*
2414 * First check if this group is the first of a reserved block.
2415 * If it's true, we have to allocate a new table of pointers
2416 * to ext4_group_info structures
2417 */
2418 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2419 metalen = sizeof(*meta_group_info) <<
2420 EXT4_DESC_PER_BLOCK_BITS(sb);
Dmitry Monakhov4fdb5542014-11-25 13:08:04 -05002421 meta_group_info = kmalloc(metalen, GFP_NOFS);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002422 if (meta_group_info == NULL) {
Joe Perches7f6a11e2012-03-19 23:09:43 -04002423 ext4_msg(sb, KERN_ERR, "can't allocate mem "
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002424 "for a buddy group");
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002425 goto exit_meta_group_info;
2426 }
2427 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2428 meta_group_info;
2429 }
2430
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002431 meta_group_info =
2432 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2433 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2434
Dmitry Monakhov4fdb5542014-11-25 13:08:04 -05002435 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002436 if (meta_group_info[i] == NULL) {
Joe Perches7f6a11e2012-03-19 23:09:43 -04002437 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002438 goto exit_group_info;
2439 }
2440 set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2441 &(meta_group_info[i]->bb_state));
2442
2443 /*
2444 * initialize bb_free to be able to skip
2445 * empty groups without initialization
2446 */
2447 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2448 meta_group_info[i]->bb_free =
Theodore Ts'ocff1dfd72011-09-09 19:12:51 -04002449 ext4_free_clusters_after_init(sb, group, desc);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002450 } else {
2451 meta_group_info[i]->bb_free =
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002452 ext4_free_group_clusters(sb, desc);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002453 }
2454
2455 INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05002456 init_rwsem(&meta_group_info[i]->alloc_sem);
Venkatesh Pallipadi64e290e2010-03-04 22:25:21 -05002457 meta_group_info[i]->bb_free_root = RB_ROOT;
Curt Wohlgemuth8a57d9d2010-05-16 15:00:00 -04002458 meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002459
2460#ifdef DOUBLE_CHECK
2461 {
2462 struct buffer_head *bh;
2463 meta_group_info[i]->bb_bitmap =
Dmitry Monakhov4fdb5542014-11-25 13:08:04 -05002464 kmalloc(sb->s_blocksize, GFP_NOFS);
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002465 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2466 bh = ext4_read_block_bitmap(sb, group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04002467 BUG_ON(IS_ERR_OR_NULL(bh));
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002468 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2469 sb->s_blocksize);
2470 put_bh(bh);
2471 }
2472#endif
2473
2474 return 0;
2475
2476exit_group_info:
2477 /* If a meta_group_info table has been allocated, release it now */
Tao Macaaf7a22011-07-11 18:42:42 -04002478 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002479 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
Tao Macaaf7a22011-07-11 18:42:42 -04002480 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
2481 }
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002482exit_meta_group_info:
2483 return -ENOMEM;
2484} /* ext4_mb_add_groupinfo */
2485
Alex Tomasc9de5602008-01-29 00:19:52 -05002486static int ext4_mb_init_backend(struct super_block *sb)
2487{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002488 ext4_group_t ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002489 ext4_group_t i;
Alex Tomasc9de5602008-01-29 00:19:52 -05002490 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o28623c22012-09-05 01:31:50 -04002491 int err;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002492 struct ext4_group_desc *desc;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002493 struct kmem_cache *cachep;
Alex Tomasc9de5602008-01-29 00:19:52 -05002494
Theodore Ts'o28623c22012-09-05 01:31:50 -04002495 err = ext4_mb_alloc_groupinfo(sb, ngroups);
2496 if (err)
2497 return err;
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002498
Alex Tomasc9de5602008-01-29 00:19:52 -05002499 sbi->s_buddy_cache = new_inode(sb);
2500 if (sbi->s_buddy_cache == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002501 ext4_msg(sb, KERN_ERR, "can't get new inode");
Alex Tomasc9de5602008-01-29 00:19:52 -05002502 goto err_freesgi;
2503 }
Yu Jian48e60612011-08-01 17:41:39 -04002504 /* To avoid potentially colliding with an valid on-disk inode number,
2505 * use EXT4_BAD_INO for the buddy cache inode number. This inode is
2506 * not in the inode hash, so it should never be found by iget(), but
2507 * this will avoid confusion if it ever shows up during debugging. */
2508 sbi->s_buddy_cache->i_ino = EXT4_BAD_INO;
Alex Tomasc9de5602008-01-29 00:19:52 -05002509 EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
Theodore Ts'o8df96752009-05-01 08:50:38 -04002510 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002511 desc = ext4_get_group_desc(sb, i, NULL);
2512 if (desc == NULL) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002513 ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i);
Alex Tomasc9de5602008-01-29 00:19:52 -05002514 goto err_freebuddy;
2515 }
Frederic Bohe5f21b0e2008-07-11 19:27:31 -04002516 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2517 goto err_freebuddy;
Alex Tomasc9de5602008-01-29 00:19:52 -05002518 }
2519
2520 return 0;
2521
2522err_freebuddy:
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002523 cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Roel Kluinf1fa3342008-04-29 22:01:15 -04002524 while (i-- > 0)
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002525 kmem_cache_free(cachep, ext4_get_group_info(sb, i));
Theodore Ts'o28623c22012-09-05 01:31:50 -04002526 i = sbi->s_group_info_size;
Roel Kluinf1fa3342008-04-29 22:01:15 -04002527 while (i-- > 0)
Alex Tomasc9de5602008-01-29 00:19:52 -05002528 kfree(sbi->s_group_info[i]);
2529 iput(sbi->s_buddy_cache);
2530err_freesgi:
Al Virob93b41d2014-11-20 12:19:11 -05002531 kvfree(sbi->s_group_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05002532 return -ENOMEM;
2533}
2534
Eric Sandeen2892c152011-02-12 08:12:18 -05002535static void ext4_groupinfo_destroy_slabs(void)
2536{
2537 int i;
2538
2539 for (i = 0; i < NR_GRPINFO_CACHES; i++) {
2540 if (ext4_groupinfo_caches[i])
2541 kmem_cache_destroy(ext4_groupinfo_caches[i]);
2542 ext4_groupinfo_caches[i] = NULL;
2543 }
2544}
2545
2546static int ext4_groupinfo_create_slab(size_t size)
2547{
2548 static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
2549 int slab_size;
2550 int blocksize_bits = order_base_2(size);
2551 int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
2552 struct kmem_cache *cachep;
2553
2554 if (cache_index >= NR_GRPINFO_CACHES)
2555 return -EINVAL;
2556
2557 if (unlikely(cache_index < 0))
2558 cache_index = 0;
2559
2560 mutex_lock(&ext4_grpinfo_slab_create_mutex);
2561 if (ext4_groupinfo_caches[cache_index]) {
2562 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2563 return 0; /* Already created */
2564 }
2565
2566 slab_size = offsetof(struct ext4_group_info,
2567 bb_counters[blocksize_bits + 2]);
2568
2569 cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
2570 slab_size, 0, SLAB_RECLAIM_ACCOUNT,
2571 NULL);
2572
Tao Ma823ba012011-07-11 18:26:01 -04002573 ext4_groupinfo_caches[cache_index] = cachep;
2574
Eric Sandeen2892c152011-02-12 08:12:18 -05002575 mutex_unlock(&ext4_grpinfo_slab_create_mutex);
2576 if (!cachep) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002577 printk(KERN_EMERG
2578 "EXT4-fs: no memory for groupinfo slab cache\n");
Eric Sandeen2892c152011-02-12 08:12:18 -05002579 return -ENOMEM;
2580 }
2581
Eric Sandeen2892c152011-02-12 08:12:18 -05002582 return 0;
2583}
2584
Akira Fujita9d990122012-05-28 14:19:25 -04002585int ext4_mb_init(struct super_block *sb)
Alex Tomasc9de5602008-01-29 00:19:52 -05002586{
2587 struct ext4_sb_info *sbi = EXT4_SB(sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04002588 unsigned i, j;
Nicolai Stange935244c2016-05-05 19:46:19 -04002589 unsigned offset, offset_incr;
Alex Tomasc9de5602008-01-29 00:19:52 -05002590 unsigned max;
Shen Feng74767c52008-07-11 19:27:31 -04002591 int ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002592
Eric Sandeen19278052009-08-25 22:36:25 -04002593 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
Alex Tomasc9de5602008-01-29 00:19:52 -05002594
2595 sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2596 if (sbi->s_mb_offsets == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002597 ret = -ENOMEM;
2598 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002599 }
Yasunori Gotoff7ef322008-12-17 00:48:39 -05002600
Eric Sandeen19278052009-08-25 22:36:25 -04002601 i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_maxs);
Alex Tomasc9de5602008-01-29 00:19:52 -05002602 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2603 if (sbi->s_mb_maxs == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002604 ret = -ENOMEM;
2605 goto out;
2606 }
2607
Eric Sandeen2892c152011-02-12 08:12:18 -05002608 ret = ext4_groupinfo_create_slab(sb->s_blocksize);
2609 if (ret < 0)
2610 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002611
2612 /* order 0 is regular bitmap */
2613 sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2614 sbi->s_mb_offsets[0] = 0;
2615
2616 i = 1;
2617 offset = 0;
Nicolai Stange935244c2016-05-05 19:46:19 -04002618 offset_incr = 1 << (sb->s_blocksize_bits - 1);
Alex Tomasc9de5602008-01-29 00:19:52 -05002619 max = sb->s_blocksize << 2;
2620 do {
2621 sbi->s_mb_offsets[i] = offset;
2622 sbi->s_mb_maxs[i] = max;
Nicolai Stange935244c2016-05-05 19:46:19 -04002623 offset += offset_incr;
2624 offset_incr = offset_incr >> 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05002625 max = max >> 1;
2626 i++;
2627 } while (i <= sb->s_blocksize_bits + 1);
2628
Alex Tomasc9de5602008-01-29 00:19:52 -05002629 spin_lock_init(&sbi->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05002630 spin_lock_init(&sbi->s_bal_lock);
Theodore Ts'od08854f2016-06-26 18:24:01 -04002631 sbi->s_mb_free_pending = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002632
2633 sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2634 sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2635 sbi->s_mb_stats = MB_DEFAULT_STATS;
2636 sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2637 sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
Theodore Ts'o27baebb2011-09-09 19:02:51 -04002638 /*
2639 * The default group preallocation is 512, which for 4k block
2640 * sizes translates to 2 megabytes. However for bigalloc file
2641 * systems, this is probably too big (i.e, if the cluster size
2642 * is 1 megabyte, then group preallocation size becomes half a
2643 * gigabyte!). As a default, we will keep a two megabyte
2644 * group pralloc size for cluster sizes up to 64k, and after
2645 * that, we will force a minimum group preallocation size of
2646 * 32 clusters. This translates to 8 megs when the cluster
2647 * size is 256k, and 32 megs when the cluster size is 1 meg,
2648 * which seems reasonable as a default.
2649 */
2650 sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >>
2651 sbi->s_cluster_bits, 32);
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04002652 /*
2653 * If there is a s_stripe > 1, then we set the s_mb_group_prealloc
2654 * to the lowest multiple of s_stripe which is bigger than
2655 * the s_mb_group_prealloc as determined above. We want
2656 * the preallocation size to be an exact multiple of the
2657 * RAID stripe size so that preallocations don't fragment
2658 * the stripes.
2659 */
2660 if (sbi->s_stripe > 1) {
2661 sbi->s_mb_group_prealloc = roundup(
2662 sbi->s_mb_group_prealloc, sbi->s_stripe);
2663 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002664
Eric Sandeen730c2132008-09-13 15:23:29 -04002665 sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002666 if (sbi->s_locality_groups == NULL) {
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002667 ret = -ENOMEM;
Andrey Tsyvarev029b10c2014-05-12 12:34:21 -04002668 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05002669 }
Eric Sandeen730c2132008-09-13 15:23:29 -04002670 for_each_possible_cpu(i) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002671 struct ext4_locality_group *lg;
Eric Sandeen730c2132008-09-13 15:23:29 -04002672 lg = per_cpu_ptr(sbi->s_locality_groups, i);
Alex Tomasc9de5602008-01-29 00:19:52 -05002673 mutex_init(&lg->lg_mutex);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04002674 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2675 INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
Alex Tomasc9de5602008-01-29 00:19:52 -05002676 spin_lock_init(&lg->lg_prealloc_lock);
2677 }
2678
Yu Jian79a77c52011-08-01 17:41:46 -04002679 /* init file for buddy data */
2680 ret = ext4_mb_init_backend(sb);
Tao Ma7aa0bae2011-10-06 10:22:28 -04002681 if (ret != 0)
2682 goto out_free_locality_groups;
Yu Jian79a77c52011-08-01 17:41:46 -04002683
Tao Ma7aa0bae2011-10-06 10:22:28 -04002684 return 0;
2685
2686out_free_locality_groups:
2687 free_percpu(sbi->s_locality_groups);
2688 sbi->s_locality_groups = NULL;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002689out:
Tao Ma7aa0bae2011-10-06 10:22:28 -04002690 kfree(sbi->s_mb_offsets);
2691 sbi->s_mb_offsets = NULL;
2692 kfree(sbi->s_mb_maxs);
2693 sbi->s_mb_maxs = NULL;
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002694 return ret;
Alex Tomasc9de5602008-01-29 00:19:52 -05002695}
2696
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002697/* need to called with the ext4 group lock held */
Alex Tomasc9de5602008-01-29 00:19:52 -05002698static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2699{
2700 struct ext4_prealloc_space *pa;
2701 struct list_head *cur, *tmp;
2702 int count = 0;
2703
2704 list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2705 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2706 list_del(&pa->pa_group_list);
2707 count++;
Aneesh Kumar K.V688f05a2008-10-13 12:14:14 -04002708 kmem_cache_free(ext4_pspace_cachep, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05002709 }
2710 if (count)
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002711 mb_debug(1, "mballoc: %u PAs left\n", count);
Alex Tomasc9de5602008-01-29 00:19:52 -05002712
2713}
2714
2715int ext4_mb_release(struct super_block *sb)
2716{
Theodore Ts'o8df96752009-05-01 08:50:38 -04002717 ext4_group_t ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002718 ext4_group_t i;
2719 int num_meta_group_infos;
2720 struct ext4_group_info *grinfo;
2721 struct ext4_sb_info *sbi = EXT4_SB(sb);
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002722 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
Alex Tomasc9de5602008-01-29 00:19:52 -05002723
Alex Tomasc9de5602008-01-29 00:19:52 -05002724 if (sbi->s_group_info) {
Theodore Ts'o8df96752009-05-01 08:50:38 -04002725 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05002726 grinfo = ext4_get_group_info(sb, i);
2727#ifdef DOUBLE_CHECK
2728 kfree(grinfo->bb_bitmap);
2729#endif
2730 ext4_lock_group(sb, i);
2731 ext4_mb_cleanup_pa(grinfo);
2732 ext4_unlock_group(sb, i);
Curt Wohlgemuthfb1813f2010-10-27 21:29:12 -04002733 kmem_cache_free(cachep, grinfo);
Alex Tomasc9de5602008-01-29 00:19:52 -05002734 }
Theodore Ts'o8df96752009-05-01 08:50:38 -04002735 num_meta_group_infos = (ngroups +
Alex Tomasc9de5602008-01-29 00:19:52 -05002736 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2737 EXT4_DESC_PER_BLOCK_BITS(sb);
2738 for (i = 0; i < num_meta_group_infos; i++)
2739 kfree(sbi->s_group_info[i]);
Al Virob93b41d2014-11-20 12:19:11 -05002740 kvfree(sbi->s_group_info);
Alex Tomasc9de5602008-01-29 00:19:52 -05002741 }
2742 kfree(sbi->s_mb_offsets);
2743 kfree(sbi->s_mb_maxs);
Markus Elfringbfcba2d2014-11-25 20:01:37 -05002744 iput(sbi->s_buddy_cache);
Alex Tomasc9de5602008-01-29 00:19:52 -05002745 if (sbi->s_mb_stats) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002746 ext4_msg(sb, KERN_INFO,
2747 "mballoc: %u blocks %u reqs (%u success)",
Alex Tomasc9de5602008-01-29 00:19:52 -05002748 atomic_read(&sbi->s_bal_allocated),
2749 atomic_read(&sbi->s_bal_reqs),
2750 atomic_read(&sbi->s_bal_success));
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002751 ext4_msg(sb, KERN_INFO,
2752 "mballoc: %u extents scanned, %u goal hits, "
2753 "%u 2^N hits, %u breaks, %u lost",
Alex Tomasc9de5602008-01-29 00:19:52 -05002754 atomic_read(&sbi->s_bal_ex_scanned),
2755 atomic_read(&sbi->s_bal_goals),
2756 atomic_read(&sbi->s_bal_2orders),
2757 atomic_read(&sbi->s_bal_breaks),
2758 atomic_read(&sbi->s_mb_lost_chunks));
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002759 ext4_msg(sb, KERN_INFO,
2760 "mballoc: %lu generated and it took %Lu",
Tao Maced156e2011-07-23 16:18:05 -04002761 sbi->s_mb_buddies_generated,
Alex Tomasc9de5602008-01-29 00:19:52 -05002762 sbi->s_mb_generation_time);
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04002763 ext4_msg(sb, KERN_INFO,
2764 "mballoc: %u preallocated, %u discarded",
Alex Tomasc9de5602008-01-29 00:19:52 -05002765 atomic_read(&sbi->s_mb_preallocated),
2766 atomic_read(&sbi->s_mb_discarded));
2767 }
2768
Eric Sandeen730c2132008-09-13 15:23:29 -04002769 free_percpu(sbi->s_locality_groups);
Alex Tomasc9de5602008-01-29 00:19:52 -05002770
2771 return 0;
2772}
2773
Lukas Czerner77ca6cd2010-10-27 21:30:11 -04002774static inline int ext4_issue_discard(struct super_block *sb,
Theodore Ts'o84130192011-09-09 18:50:51 -04002775 ext4_group_t block_group, ext4_grpblk_t cluster, int count)
Jiaying Zhang5c521832010-07-27 11:56:05 -04002776{
Jiaying Zhang5c521832010-07-27 11:56:05 -04002777 ext4_fsblk_t discard_block;
2778
Theodore Ts'o84130192011-09-09 18:50:51 -04002779 discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) +
2780 ext4_group_first_block_no(sb, block_group));
2781 count = EXT4_C2B(EXT4_SB(sb), count);
Jiaying Zhang5c521832010-07-27 11:56:05 -04002782 trace_ext4_discard_blocks(sb,
2783 (unsigned long long) discard_block, count);
Lukas Czerner93259632011-01-10 12:09:59 -05002784 return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
Jiaying Zhang5c521832010-07-27 11:56:05 -04002785}
2786
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04002787/*
2788 * This function is called by the jbd2 layer once the commit has finished,
2789 * so we know we can free the blocks that were released with that commit.
2790 */
Bobi Jam18aadd42012-02-20 17:53:02 -05002791static void ext4_free_data_callback(struct super_block *sb,
2792 struct ext4_journal_cb_entry *jce,
2793 int rc)
Alex Tomasc9de5602008-01-29 00:19:52 -05002794{
Bobi Jam18aadd42012-02-20 17:53:02 -05002795 struct ext4_free_data *entry = (struct ext4_free_data *)jce;
Alex Tomasc9de5602008-01-29 00:19:52 -05002796 struct ext4_buddy e4b;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002797 struct ext4_group_info *db;
Theodore Ts'od9f34502011-04-30 13:47:24 -04002798 int err, count = 0, count2 = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05002799
Bobi Jam18aadd42012-02-20 17:53:02 -05002800 mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
2801 entry->efd_count, entry->efd_group, entry);
Alex Tomasc9de5602008-01-29 00:19:52 -05002802
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05002803 if (test_opt(sb, DISCARD)) {
2804 err = ext4_issue_discard(sb, entry->efd_group,
2805 entry->efd_start_cluster,
2806 entry->efd_count);
2807 if (err && err != -EOPNOTSUPP)
2808 ext4_msg(sb, KERN_WARNING, "discard request in"
2809 " group:%d block:%d count:%d failed"
2810 " with %d", entry->efd_group,
2811 entry->efd_start_cluster,
2812 entry->efd_count, err);
2813 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002814
Bobi Jam18aadd42012-02-20 17:53:02 -05002815 err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
2816 /* we expect to find existing buddy because it's pinned */
2817 BUG_ON(err != 0);
Theodore Ts'ob90f6872010-04-20 16:51:59 -04002818
Theodore Ts'od08854f2016-06-26 18:24:01 -04002819 spin_lock(&EXT4_SB(sb)->s_md_lock);
2820 EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
2821 spin_unlock(&EXT4_SB(sb)->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05002822
Bobi Jam18aadd42012-02-20 17:53:02 -05002823 db = e4b.bd_info;
2824 /* there are blocks to put in buddy to make them really free */
2825 count += entry->efd_count;
2826 count2++;
2827 ext4_lock_group(sb, entry->efd_group);
2828 /* Take it out of per group rb tree */
2829 rb_erase(&entry->efd_node, &(db->bb_free_root));
2830 mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002831
Bobi Jam18aadd42012-02-20 17:53:02 -05002832 /*
2833 * Clear the trimmed flag for the group so that the next
2834 * ext4_trim_fs can trim it.
2835 * If the volume is mounted with -o discard, online discard
2836 * is supported and the free blocks will be trimmed online.
2837 */
2838 if (!test_opt(sb, DISCARD))
2839 EXT4_MB_GRP_CLEAR_TRIMMED(db);
2840
2841 if (!db->bb_free_root.rb_node) {
2842 /* No more items in the per group rb tree
2843 * balance refcounts from ext4_mb_free_metadata()
Tao Ma3d56b8d2011-07-11 00:03:38 -04002844 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002845 put_page(e4b.bd_buddy_page);
2846 put_page(e4b.bd_bitmap_page);
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04002847 }
Bobi Jam18aadd42012-02-20 17:53:02 -05002848 ext4_unlock_group(sb, entry->efd_group);
2849 kmem_cache_free(ext4_free_data_cachep, entry);
2850 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05002851
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04002852 mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
Alex Tomasc9de5602008-01-29 00:19:52 -05002853}
2854
Theodore Ts'o5dabfc72010-10-27 21:30:14 -04002855int __init ext4_init_mballoc(void)
Alex Tomasc9de5602008-01-29 00:19:52 -05002856{
Theodore Ts'o16828082010-10-27 21:30:09 -04002857 ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
2858 SLAB_RECLAIM_ACCOUNT);
Alex Tomasc9de5602008-01-29 00:19:52 -05002859 if (ext4_pspace_cachep == NULL)
2860 return -ENOMEM;
2861
Theodore Ts'o16828082010-10-27 21:30:09 -04002862 ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
2863 SLAB_RECLAIM_ACCOUNT);
Eric Sandeen256bdb42008-02-10 01:13:33 -05002864 if (ext4_ac_cachep == NULL) {
2865 kmem_cache_destroy(ext4_pspace_cachep);
2866 return -ENOMEM;
2867 }
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002868
Bobi Jam18aadd42012-02-20 17:53:02 -05002869 ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
2870 SLAB_RECLAIM_ACCOUNT);
2871 if (ext4_free_data_cachep == NULL) {
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04002872 kmem_cache_destroy(ext4_pspace_cachep);
2873 kmem_cache_destroy(ext4_ac_cachep);
2874 return -ENOMEM;
2875 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002876 return 0;
2877}
2878
Theodore Ts'o5dabfc72010-10-27 21:30:14 -04002879void ext4_exit_mballoc(void)
Alex Tomasc9de5602008-01-29 00:19:52 -05002880{
Theodore Ts'o60e66792010-05-17 07:00:00 -04002881 /*
Jesper Dangaard Brouer3e03f9c2009-07-05 22:29:27 -04002882 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
2883 * before destroying the slab cache.
2884 */
2885 rcu_barrier();
Alex Tomasc9de5602008-01-29 00:19:52 -05002886 kmem_cache_destroy(ext4_pspace_cachep);
Eric Sandeen256bdb42008-02-10 01:13:33 -05002887 kmem_cache_destroy(ext4_ac_cachep);
Bobi Jam18aadd42012-02-20 17:53:02 -05002888 kmem_cache_destroy(ext4_free_data_cachep);
Eric Sandeen2892c152011-02-12 08:12:18 -05002889 ext4_groupinfo_destroy_slabs();
Alex Tomasc9de5602008-01-29 00:19:52 -05002890}
2891
2892
2893/*
Uwe Kleine-König73b2c712010-07-30 21:02:47 +02002894 * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps
Alex Tomasc9de5602008-01-29 00:19:52 -05002895 * Returns 0 if success or error code
2896 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04002897static noinline_for_stack int
2898ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002899 handle_t *handle, unsigned int reserv_clstrs)
Alex Tomasc9de5602008-01-29 00:19:52 -05002900{
2901 struct buffer_head *bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002902 struct ext4_group_desc *gdp;
2903 struct buffer_head *gdp_bh;
2904 struct ext4_sb_info *sbi;
2905 struct super_block *sb;
2906 ext4_fsblk_t block;
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002907 int err, len;
Alex Tomasc9de5602008-01-29 00:19:52 -05002908
2909 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2910 BUG_ON(ac->ac_b_ex.fe_len <= 0);
2911
2912 sb = ac->ac_sb;
2913 sbi = EXT4_SB(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05002914
Theodore Ts'o574ca172008-07-11 19:27:31 -04002915 bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04002916 if (IS_ERR(bitmap_bh)) {
2917 err = PTR_ERR(bitmap_bh);
2918 bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05002919 goto out_err;
Darrick J. Wong9008a582015-10-17 21:33:24 -04002920 }
Alex Tomasc9de5602008-01-29 00:19:52 -05002921
liang xie5d601252014-05-12 22:06:43 -04002922 BUFFER_TRACE(bitmap_bh, "getting write access");
Alex Tomasc9de5602008-01-29 00:19:52 -05002923 err = ext4_journal_get_write_access(handle, bitmap_bh);
2924 if (err)
2925 goto out_err;
2926
2927 err = -EIO;
2928 gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2929 if (!gdp)
2930 goto out_err;
2931
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05002932 ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002933 ext4_free_group_clusters(sb, gdp));
Aneesh Kumar K.V03cddb82008-06-05 20:59:29 -04002934
liang xie5d601252014-05-12 22:06:43 -04002935 BUFFER_TRACE(gdp_bh, "get_write_access");
Alex Tomasc9de5602008-01-29 00:19:52 -05002936 err = ext4_journal_get_write_access(handle, gdp_bh);
2937 if (err)
2938 goto out_err;
2939
Akinobu Mitabda00de2010-03-03 23:53:25 -05002940 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
Alex Tomasc9de5602008-01-29 00:19:52 -05002941
Theodore Ts'o53accfa2011-09-09 18:48:51 -04002942 len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Theodore Ts'o6fd058f2009-05-17 15:38:01 -04002943 if (!ext4_data_block_valid(sbi, block, len)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05002944 ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
Theodore Ts'o1084f252012-03-19 23:13:43 -04002945 "fs metadata", block, block+len);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002946 /* File system mounted not to panic on error
Vegard Nossum554a5cc2016-07-14 23:02:47 -04002947 * Fix the bitmap and return EFSCORRUPTED
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002948 * We leak some of the blocks here.
2949 */
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002950 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04002951 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2952 ac->ac_b_ex.fe_len);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002953 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
Frank Mayhar03901312009-01-07 00:06:22 -05002954 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002955 if (!err)
Vegard Nossum554a5cc2016-07-14 23:02:47 -04002956 err = -EFSCORRUPTED;
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04002957 goto out_err;
Alex Tomasc9de5602008-01-29 00:19:52 -05002958 }
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002959
2960 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05002961#ifdef AGGRESSIVE_CHECK
2962 {
2963 int i;
2964 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2965 BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2966 bitmap_bh->b_data));
2967 }
2968 }
2969#endif
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04002970 ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2971 ac->ac_b_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05002972 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2973 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002974 ext4_free_group_clusters_set(sb, gdp,
Theodore Ts'ocff1dfd72011-09-09 19:12:51 -04002975 ext4_free_clusters_after_init(sb,
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002976 ac->ac_b_ex.fe_group, gdp));
Alex Tomasc9de5602008-01-29 00:19:52 -05002977 }
Theodore Ts'o021b65b2011-09-09 19:08:51 -04002978 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2979 ext4_free_group_clusters_set(sb, gdp, len);
Tao Ma79f1ba42012-10-22 00:34:32 -04002980 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04002981 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04002982
2983 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04002984 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
Mingming Caod2a17632008-07-14 17:52:37 -04002985 /*
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04002986 * Now reduce the dirty block count also. Should not go negative
Mingming Caod2a17632008-07-14 17:52:37 -04002987 */
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04002988 if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2989 /* release all the reserved blocks if non delalloc */
Theodore Ts'o57042652011-09-09 18:56:51 -04002990 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
2991 reserv_clstrs);
Alex Tomasc9de5602008-01-29 00:19:52 -05002992
Jose R. Santos772cb7c2008-07-11 19:27:31 -04002993 if (sbi->s_log_groups_per_flex) {
2994 ext4_group_t flex_group = ext4_flex_group(sbi,
2995 ac->ac_b_ex.fe_group);
Theodore Ts'o90ba9832013-03-11 23:39:59 -04002996 atomic64_sub(ac->ac_b_ex.fe_len,
2997 &sbi->s_flex_groups[flex_group].free_clusters);
Jose R. Santos772cb7c2008-07-11 19:27:31 -04002998 }
2999
Frank Mayhar03901312009-01-07 00:06:22 -05003000 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05003001 if (err)
3002 goto out_err;
Frank Mayhar03901312009-01-07 00:06:22 -05003003 err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05003004
3005out_err:
Aneesh Kumar K.V42a10ad2008-02-10 01:07:28 -05003006 brelse(bitmap_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05003007 return err;
3008}
3009
3010/*
3011 * here we normalize request for locality group
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04003012 * Group request are normalized to s_mb_group_prealloc, which goes to
3013 * s_strip if we set the same via mount option.
3014 * s_mb_group_prealloc can be configured via
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04003015 * /sys/fs/ext4/<partition>/mb_group_prealloc
Alex Tomasc9de5602008-01-29 00:19:52 -05003016 *
3017 * XXX: should we try to preallocate more than the group has now?
3018 */
3019static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
3020{
3021 struct super_block *sb = ac->ac_sb;
3022 struct ext4_locality_group *lg = ac->ac_lg;
3023
3024 BUG_ON(lg == NULL);
Dan Ehrenbergd7a1fee2011-07-17 21:11:30 -04003025 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003026 mb_debug(1, "#%u: goal %u blocks for locality group\n",
Alex Tomasc9de5602008-01-29 00:19:52 -05003027 current->pid, ac->ac_g_ex.fe_len);
3028}
3029
3030/*
3031 * Normalization means making request better in terms of
3032 * size and alignment
3033 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003034static noinline_for_stack void
3035ext4_mb_normalize_request(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05003036 struct ext4_allocation_request *ar)
3037{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003038 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003039 int bsbits, max;
3040 ext4_lblk_t end;
Curt Wohlgemuth1592d2c2012-02-20 17:53:03 -05003041 loff_t size, start_off;
3042 loff_t orig_size __maybe_unused;
Andi Kleen5a0790c2010-06-14 13:28:03 -04003043 ext4_lblk_t start;
Alex Tomasc9de5602008-01-29 00:19:52 -05003044 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003045 struct ext4_prealloc_space *pa;
Alex Tomasc9de5602008-01-29 00:19:52 -05003046
3047 /* do normalize only data requests, metadata requests
3048 do not need preallocation */
3049 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3050 return;
3051
3052 /* sometime caller may want exact blocks */
3053 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
3054 return;
3055
3056 /* caller may indicate that preallocation isn't
3057 * required (it's a tail, for example) */
3058 if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
3059 return;
3060
3061 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
3062 ext4_mb_normalize_group_request(ac);
3063 return ;
3064 }
3065
3066 bsbits = ac->ac_sb->s_blocksize_bits;
3067
3068 /* first, let's learn actual file size
3069 * given current request is allocated */
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003070 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003071 size = size << bsbits;
3072 if (size < i_size_read(ac->ac_inode))
3073 size = i_size_read(ac->ac_inode);
Andi Kleen5a0790c2010-06-14 13:28:03 -04003074 orig_size = size;
Alex Tomasc9de5602008-01-29 00:19:52 -05003075
Valerie Clement19304792008-05-13 19:31:14 -04003076 /* max size of free chunks */
3077 max = 2 << bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003078
Valerie Clement19304792008-05-13 19:31:14 -04003079#define NRL_CHECK_SIZE(req, size, max, chunk_size) \
3080 (req <= (size) || max <= (chunk_size))
Alex Tomasc9de5602008-01-29 00:19:52 -05003081
3082 /* first, try to predict filesize */
3083 /* XXX: should this table be tunable? */
3084 start_off = 0;
3085 if (size <= 16 * 1024) {
3086 size = 16 * 1024;
3087 } else if (size <= 32 * 1024) {
3088 size = 32 * 1024;
3089 } else if (size <= 64 * 1024) {
3090 size = 64 * 1024;
3091 } else if (size <= 128 * 1024) {
3092 size = 128 * 1024;
3093 } else if (size <= 256 * 1024) {
3094 size = 256 * 1024;
3095 } else if (size <= 512 * 1024) {
3096 size = 512 * 1024;
3097 } else if (size <= 1024 * 1024) {
3098 size = 1024 * 1024;
Valerie Clement19304792008-05-13 19:31:14 -04003099 } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003100 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
Valerie Clement19304792008-05-13 19:31:14 -04003101 (21 - bsbits)) << 21;
3102 size = 2 * 1024 * 1024;
3103 } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003104 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3105 (22 - bsbits)) << 22;
3106 size = 4 * 1024 * 1024;
3107 } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
Valerie Clement19304792008-05-13 19:31:14 -04003108 (8<<20)>>bsbits, max, 8 * 1024)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003109 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3110 (23 - bsbits)) << 23;
3111 size = 8 * 1024 * 1024;
3112 } else {
Xiaoguang Wangb27b1532014-07-27 22:26:36 -04003113 start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
3114 size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
3115 ac->ac_o_ex.fe_len) << bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003116 }
Andi Kleen5a0790c2010-06-14 13:28:03 -04003117 size = size >> bsbits;
3118 start = start_off >> bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05003119
3120 /* don't cover already allocated blocks in selected range */
3121 if (ar->pleft && start <= ar->lleft) {
3122 size -= ar->lleft + 1 - start;
3123 start = ar->lleft + 1;
3124 }
3125 if (ar->pright && start + size - 1 >= ar->lright)
3126 size -= start + size - ar->lright;
3127
Jan Karafc6c2da2017-01-27 14:34:30 -05003128 /*
3129 * Trim allocation request for filesystems with artificially small
3130 * groups.
3131 */
3132 if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb))
3133 size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb);
3134
Alex Tomasc9de5602008-01-29 00:19:52 -05003135 end = start + size;
3136
3137 /* check we don't cross already preallocated blocks */
3138 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003139 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003140 ext4_lblk_t pa_end;
Alex Tomasc9de5602008-01-29 00:19:52 -05003141
Alex Tomasc9de5602008-01-29 00:19:52 -05003142 if (pa->pa_deleted)
3143 continue;
3144 spin_lock(&pa->pa_lock);
3145 if (pa->pa_deleted) {
3146 spin_unlock(&pa->pa_lock);
3147 continue;
3148 }
3149
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003150 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3151 pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003152
3153 /* PA must not overlap original request */
3154 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3155 ac->ac_o_ex.fe_logical < pa->pa_lstart));
3156
Eric Sandeen38877f42009-08-17 23:55:24 -04003157 /* skip PAs this normalized request doesn't overlap with */
3158 if (pa->pa_lstart >= end || pa_end <= start) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003159 spin_unlock(&pa->pa_lock);
3160 continue;
3161 }
3162 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3163
Eric Sandeen38877f42009-08-17 23:55:24 -04003164 /* adjust start or end to be adjacent to this pa */
Alex Tomasc9de5602008-01-29 00:19:52 -05003165 if (pa_end <= ac->ac_o_ex.fe_logical) {
3166 BUG_ON(pa_end < start);
3167 start = pa_end;
Eric Sandeen38877f42009-08-17 23:55:24 -04003168 } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003169 BUG_ON(pa->pa_lstart > end);
3170 end = pa->pa_lstart;
3171 }
3172 spin_unlock(&pa->pa_lock);
3173 }
3174 rcu_read_unlock();
3175 size = end - start;
3176
3177 /* XXX: extra loop to check we really don't overlap preallocations */
3178 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003179 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003180 ext4_lblk_t pa_end;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003181
Alex Tomasc9de5602008-01-29 00:19:52 -05003182 spin_lock(&pa->pa_lock);
3183 if (pa->pa_deleted == 0) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003184 pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
3185 pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003186 BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3187 }
3188 spin_unlock(&pa->pa_lock);
3189 }
3190 rcu_read_unlock();
3191
3192 if (start + size <= ac->ac_o_ex.fe_logical &&
3193 start > ac->ac_o_ex.fe_logical) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003194 ext4_msg(ac->ac_sb, KERN_ERR,
3195 "start %lu, size %lu, fe_logical %lu",
3196 (unsigned long) start, (unsigned long) size,
3197 (unsigned long) ac->ac_o_ex.fe_logical);
Dmitry Monakhovdfe076c2014-10-01 22:26:17 -04003198 BUG();
Alex Tomasc9de5602008-01-29 00:19:52 -05003199 }
Maurizio Lombardib5b60772014-05-27 12:48:56 -04003200 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
Alex Tomasc9de5602008-01-29 00:19:52 -05003201
3202 /* now prepare goal request */
3203
3204 /* XXX: is it better to align blocks WRT to logical
3205 * placement or satisfy big request as is */
3206 ac->ac_g_ex.fe_logical = start;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003207 ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size);
Alex Tomasc9de5602008-01-29 00:19:52 -05003208
3209 /* define goal start in order to merge */
3210 if (ar->pright && (ar->lright == (start + size))) {
3211 /* merge to the right */
3212 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3213 &ac->ac_f_ex.fe_group,
3214 &ac->ac_f_ex.fe_start);
3215 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3216 }
3217 if (ar->pleft && (ar->lleft + 1 == start)) {
3218 /* merge to the left */
3219 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3220 &ac->ac_f_ex.fe_group,
3221 &ac->ac_f_ex.fe_start);
3222 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3223 }
3224
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003225 mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
Alex Tomasc9de5602008-01-29 00:19:52 -05003226 (unsigned) orig_size, (unsigned) start);
3227}
3228
3229static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3230{
3231 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3232
3233 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3234 atomic_inc(&sbi->s_bal_reqs);
3235 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
Curt Wohlgemuth291dae42010-05-16 16:00:00 -04003236 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
Alex Tomasc9de5602008-01-29 00:19:52 -05003237 atomic_inc(&sbi->s_bal_success);
3238 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3239 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3240 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3241 atomic_inc(&sbi->s_bal_goals);
3242 if (ac->ac_found > sbi->s_mb_max_to_scan)
3243 atomic_inc(&sbi->s_bal_breaks);
3244 }
3245
Theodore Ts'o296c3552009-09-30 00:32:42 -04003246 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
3247 trace_ext4_mballoc_alloc(ac);
3248 else
3249 trace_ext4_mballoc_prealloc(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05003250}
3251
3252/*
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003253 * Called on failure; free up any blocks from the inode PA for this
3254 * context. We don't need this for MB_GROUP_PA because we only change
3255 * pa_free in ext4_mb_release_context(), but on failure, we've already
3256 * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
3257 */
3258static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3259{
3260 struct ext4_prealloc_space *pa = ac->ac_pa;
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003261 struct ext4_buddy e4b;
3262 int err;
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003263
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003264 if (pa == NULL) {
Theodore Ts'oc99d1e62014-08-23 17:47:28 -04003265 if (ac->ac_f_ex.fe_len == 0)
3266 return;
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003267 err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
3268 if (err) {
3269 /*
3270 * This should never happen since we pin the
3271 * pages in the ext4_allocation_context so
3272 * ext4_mb_load_buddy() should never fail.
3273 */
3274 WARN(1, "mb_load_buddy failed (%d)", err);
3275 return;
3276 }
3277 ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
3278 mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
3279 ac->ac_f_ex.fe_len);
3280 ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
Theodore Ts'oc99d1e62014-08-23 17:47:28 -04003281 ext4_mb_unload_buddy(&e4b);
Theodore Ts'o86f0afd2014-07-30 22:17:17 -04003282 return;
3283 }
3284 if (pa->pa_type == MB_INODE_PA)
Zheng Liu400db9d2012-05-28 17:53:53 -04003285 pa->pa_free += ac->ac_b_ex.fe_len;
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05003286}
3287
3288/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003289 * use blocks preallocated to inode
3290 */
3291static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3292 struct ext4_prealloc_space *pa)
3293{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003294 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003295 ext4_fsblk_t start;
3296 ext4_fsblk_t end;
3297 int len;
3298
3299 /* found preallocated blocks, use them */
3300 start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003301 end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len),
3302 start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len));
3303 len = EXT4_NUM_B2C(sbi, end - start);
Alex Tomasc9de5602008-01-29 00:19:52 -05003304 ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3305 &ac->ac_b_ex.fe_start);
3306 ac->ac_b_ex.fe_len = len;
3307 ac->ac_status = AC_STATUS_FOUND;
3308 ac->ac_pa = pa;
3309
3310 BUG_ON(start < pa->pa_pstart);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003311 BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
Alex Tomasc9de5602008-01-29 00:19:52 -05003312 BUG_ON(pa->pa_free < len);
3313 pa->pa_free -= len;
3314
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003315 mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003316}
3317
3318/*
3319 * use blocks preallocated to locality group
3320 */
3321static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3322 struct ext4_prealloc_space *pa)
3323{
Aneesh Kumar K.V03cddb82008-06-05 20:59:29 -04003324 unsigned int len = ac->ac_o_ex.fe_len;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003325
Alex Tomasc9de5602008-01-29 00:19:52 -05003326 ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3327 &ac->ac_b_ex.fe_group,
3328 &ac->ac_b_ex.fe_start);
3329 ac->ac_b_ex.fe_len = len;
3330 ac->ac_status = AC_STATUS_FOUND;
3331 ac->ac_pa = pa;
3332
3333 /* we don't correct pa_pstart or pa_plen here to avoid
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05003334 * possible race when the group is being loaded concurrently
Alex Tomasc9de5602008-01-29 00:19:52 -05003335 * instead we correct pa later, after blocks are marked
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05003336 * in on-disk bitmap -- see ext4_mb_release_context()
3337 * Other CPUs are prevented from allocating from this pa by lg_mutex
Alex Tomasc9de5602008-01-29 00:19:52 -05003338 */
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003339 mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003340}
3341
3342/*
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003343 * Return the prealloc space that have minimal distance
3344 * from the goal block. @cpa is the prealloc
3345 * space that is having currently known minimal distance
3346 * from the goal block.
3347 */
3348static struct ext4_prealloc_space *
3349ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3350 struct ext4_prealloc_space *pa,
3351 struct ext4_prealloc_space *cpa)
3352{
3353 ext4_fsblk_t cur_distance, new_distance;
3354
3355 if (cpa == NULL) {
3356 atomic_inc(&pa->pa_count);
3357 return pa;
3358 }
Andrew Morton79211c82015-11-09 14:58:13 -08003359 cur_distance = abs(goal_block - cpa->pa_pstart);
3360 new_distance = abs(goal_block - pa->pa_pstart);
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003361
Coly Li5a54b2f2011-02-24 14:10:05 -05003362 if (cur_distance <= new_distance)
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003363 return cpa;
3364
3365 /* drop the previous reference */
3366 atomic_dec(&cpa->pa_count);
3367 atomic_inc(&pa->pa_count);
3368 return pa;
3369}
3370
3371/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003372 * search goal blocks in preallocated space
3373 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003374static noinline_for_stack int
3375ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003376{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003377 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003378 int order, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05003379 struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3380 struct ext4_locality_group *lg;
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003381 struct ext4_prealloc_space *pa, *cpa = NULL;
3382 ext4_fsblk_t goal_block;
Alex Tomasc9de5602008-01-29 00:19:52 -05003383
3384 /* only data can be preallocated */
3385 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3386 return 0;
3387
3388 /* first, try per-file preallocation */
3389 rcu_read_lock();
Aneesh Kumar K.V9a0762c2008-04-17 10:38:59 -04003390 list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003391
3392 /* all fields in this condition don't change,
3393 * so we can skip locking for them */
3394 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003395 ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
3396 EXT4_C2B(sbi, pa->pa_len)))
Alex Tomasc9de5602008-01-29 00:19:52 -05003397 continue;
3398
Eric Sandeenfb0a3872009-09-16 14:45:10 -04003399 /* non-extent files can't have physical blocks past 2^32 */
Dmitry Monakhov12e9b892010-05-16 22:00:00 -04003400 if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003401 (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) >
3402 EXT4_MAX_BLOCK_FILE_PHYS))
Eric Sandeenfb0a3872009-09-16 14:45:10 -04003403 continue;
3404
Alex Tomasc9de5602008-01-29 00:19:52 -05003405 /* found preallocated blocks, use them */
3406 spin_lock(&pa->pa_lock);
3407 if (pa->pa_deleted == 0 && pa->pa_free) {
3408 atomic_inc(&pa->pa_count);
3409 ext4_mb_use_inode_pa(ac, pa);
3410 spin_unlock(&pa->pa_lock);
3411 ac->ac_criteria = 10;
3412 rcu_read_unlock();
3413 return 1;
3414 }
3415 spin_unlock(&pa->pa_lock);
3416 }
3417 rcu_read_unlock();
3418
3419 /* can we use group allocation? */
3420 if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3421 return 0;
3422
3423 /* inode may have no locality group for some reason */
3424 lg = ac->ac_lg;
3425 if (lg == NULL)
3426 return 0;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003427 order = fls(ac->ac_o_ex.fe_len) - 1;
3428 if (order > PREALLOC_TB_SIZE - 1)
3429 /* The max size of hash table is PREALLOC_TB_SIZE */
3430 order = PREALLOC_TB_SIZE - 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05003431
Akinobu Mitabda00de2010-03-03 23:53:25 -05003432 goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003433 /*
3434 * search for the prealloc space that is having
3435 * minimal distance from the goal block.
3436 */
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003437 for (i = order; i < PREALLOC_TB_SIZE; i++) {
3438 rcu_read_lock();
3439 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3440 pa_inode_list) {
3441 spin_lock(&pa->pa_lock);
3442 if (pa->pa_deleted == 0 &&
3443 pa->pa_free >= ac->ac_o_ex.fe_len) {
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003444
3445 cpa = ext4_mb_check_group_pa(goal_block,
3446 pa, cpa);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003447 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003448 spin_unlock(&pa->pa_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05003449 }
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003450 rcu_read_unlock();
Alex Tomasc9de5602008-01-29 00:19:52 -05003451 }
Aneesh Kumar K.V5e745b02008-08-18 18:00:57 -04003452 if (cpa) {
3453 ext4_mb_use_group_pa(ac, cpa);
3454 ac->ac_criteria = 20;
3455 return 1;
3456 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003457 return 0;
3458}
3459
3460/*
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003461 * the function goes through all block freed in the group
3462 * but not yet committed and marks them used in in-core bitmap.
3463 * buddy must be generated from this bitmap
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003464 * Need to be called with the ext4 group lock held
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003465 */
3466static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
3467 ext4_group_t group)
3468{
3469 struct rb_node *n;
3470 struct ext4_group_info *grp;
3471 struct ext4_free_data *entry;
3472
3473 grp = ext4_get_group_info(sb, group);
3474 n = rb_first(&(grp->bb_free_root));
3475
3476 while (n) {
Bobi Jam18aadd42012-02-20 17:53:02 -05003477 entry = rb_entry(n, struct ext4_free_data, efd_node);
3478 ext4_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05003479 n = rb_next(n);
3480 }
3481 return;
3482}
3483
3484/*
Alex Tomasc9de5602008-01-29 00:19:52 -05003485 * the function goes through all preallocation in this group and marks them
3486 * used in in-core bitmap. buddy must be generated from this bitmap
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04003487 * Need to be called with ext4 group lock held
Alex Tomasc9de5602008-01-29 00:19:52 -05003488 */
Eric Sandeen089ceec2009-07-05 22:17:31 -04003489static noinline_for_stack
3490void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
Alex Tomasc9de5602008-01-29 00:19:52 -05003491 ext4_group_t group)
3492{
3493 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3494 struct ext4_prealloc_space *pa;
3495 struct list_head *cur;
3496 ext4_group_t groupnr;
3497 ext4_grpblk_t start;
3498 int preallocated = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003499 int len;
3500
3501 /* all form of preallocation discards first load group,
3502 * so the only competing code is preallocation use.
3503 * we don't need any locking here
3504 * notice we do NOT ignore preallocations with pa_deleted
3505 * otherwise we could leave used blocks available for
3506 * allocation in buddy when concurrent ext4_mb_put_pa()
3507 * is dropping preallocation
3508 */
3509 list_for_each(cur, &grp->bb_prealloc_list) {
3510 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3511 spin_lock(&pa->pa_lock);
3512 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3513 &groupnr, &start);
3514 len = pa->pa_len;
3515 spin_unlock(&pa->pa_lock);
3516 if (unlikely(len == 0))
3517 continue;
3518 BUG_ON(groupnr != group);
Yongqiang Yangc3e94d12011-07-26 22:05:53 -04003519 ext4_set_bits(bitmap, start, len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003520 preallocated += len;
Alex Tomasc9de5602008-01-29 00:19:52 -05003521 }
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003522 mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003523}
3524
3525static void ext4_mb_pa_callback(struct rcu_head *head)
3526{
3527 struct ext4_prealloc_space *pa;
3528 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
Junho Ryu4e8d2132013-12-03 18:10:28 -05003529
3530 BUG_ON(atomic_read(&pa->pa_count));
3531 BUG_ON(pa->pa_deleted == 0);
Alex Tomasc9de5602008-01-29 00:19:52 -05003532 kmem_cache_free(ext4_pspace_cachep, pa);
3533}
3534
3535/*
3536 * drops a reference to preallocated space descriptor
3537 * if this was the last reference and the space is consumed
3538 */
3539static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3540 struct super_block *sb, struct ext4_prealloc_space *pa)
3541{
Theodore Ts'oa9df9a42009-01-05 22:18:16 -05003542 ext4_group_t grp;
Eric Sandeend33a1972009-03-16 23:25:40 -04003543 ext4_fsblk_t grp_blk;
Alex Tomasc9de5602008-01-29 00:19:52 -05003544
Alex Tomasc9de5602008-01-29 00:19:52 -05003545 /* in this short window concurrent discard can set pa_deleted */
3546 spin_lock(&pa->pa_lock);
Junho Ryu4e8d2132013-12-03 18:10:28 -05003547 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3548 spin_unlock(&pa->pa_lock);
3549 return;
3550 }
3551
Alex Tomasc9de5602008-01-29 00:19:52 -05003552 if (pa->pa_deleted == 1) {
3553 spin_unlock(&pa->pa_lock);
3554 return;
3555 }
3556
3557 pa->pa_deleted = 1;
3558 spin_unlock(&pa->pa_lock);
3559
Eric Sandeend33a1972009-03-16 23:25:40 -04003560 grp_blk = pa->pa_pstart;
Theodore Ts'o60e66792010-05-17 07:00:00 -04003561 /*
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003562 * If doing group-based preallocation, pa_pstart may be in the
3563 * next group when pa is used up
3564 */
3565 if (pa->pa_type == MB_GROUP_PA)
Eric Sandeend33a1972009-03-16 23:25:40 -04003566 grp_blk--;
3567
Lukas Czernerbd862982013-04-03 23:32:34 -04003568 grp = ext4_get_group_number(sb, grp_blk);
Alex Tomasc9de5602008-01-29 00:19:52 -05003569
3570 /*
3571 * possible race:
3572 *
3573 * P1 (buddy init) P2 (regular allocation)
3574 * find block B in PA
3575 * copy on-disk bitmap to buddy
3576 * mark B in on-disk bitmap
3577 * drop PA from group
3578 * mark all PAs in buddy
3579 *
3580 * thus, P1 initializes buddy with B available. to prevent this
3581 * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3582 * against that pair
3583 */
3584 ext4_lock_group(sb, grp);
3585 list_del(&pa->pa_group_list);
3586 ext4_unlock_group(sb, grp);
3587
3588 spin_lock(pa->pa_obj_lock);
3589 list_del_rcu(&pa->pa_inode_list);
3590 spin_unlock(pa->pa_obj_lock);
3591
3592 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3593}
3594
3595/*
3596 * creates new preallocated space for given inode
3597 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003598static noinline_for_stack int
3599ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003600{
3601 struct super_block *sb = ac->ac_sb;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003602 struct ext4_sb_info *sbi = EXT4_SB(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05003603 struct ext4_prealloc_space *pa;
3604 struct ext4_group_info *grp;
3605 struct ext4_inode_info *ei;
3606
3607 /* preallocate only when found space is larger then requested */
3608 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3609 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3610 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3611
3612 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3613 if (pa == NULL)
3614 return -ENOMEM;
3615
3616 if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3617 int winl;
3618 int wins;
3619 int win;
3620 int offs;
3621
3622 /* we can't allocate as much as normalizer wants.
3623 * so, found space must get proper lstart
3624 * to cover original request */
3625 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3626 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3627
3628 /* we're limited by original request in that
3629 * logical block must be covered any way
3630 * winl is window we can move our chunk within */
3631 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3632
3633 /* also, we should cover whole original request */
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003634 wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003635
3636 /* the smallest one defines real window */
3637 win = min(winl, wins);
3638
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003639 offs = ac->ac_o_ex.fe_logical %
3640 EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003641 if (offs && offs < win)
3642 win = offs;
3643
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003644 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
Lukas Czerner810da242013-03-02 17:18:58 -05003645 EXT4_NUM_B2C(sbi, win);
Alex Tomasc9de5602008-01-29 00:19:52 -05003646 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3647 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3648 }
3649
3650 /* preallocation can change ac_b_ex, thus we store actually
3651 * allocated blocks for history */
3652 ac->ac_f_ex = ac->ac_b_ex;
3653
3654 pa->pa_lstart = ac->ac_b_ex.fe_logical;
3655 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3656 pa->pa_len = ac->ac_b_ex.fe_len;
3657 pa->pa_free = pa->pa_len;
3658 atomic_set(&pa->pa_count, 1);
3659 spin_lock_init(&pa->pa_lock);
Aneesh Kumar K.Vd794bf82009-02-14 10:31:16 -05003660 INIT_LIST_HEAD(&pa->pa_inode_list);
3661 INIT_LIST_HEAD(&pa->pa_group_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003662 pa->pa_deleted = 0;
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003663 pa->pa_type = MB_INODE_PA;
Alex Tomasc9de5602008-01-29 00:19:52 -05003664
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003665 mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
Alex Tomasc9de5602008-01-29 00:19:52 -05003666 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003667 trace_ext4_mb_new_inode_pa(ac, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003668
3669 ext4_mb_use_inode_pa(ac, pa);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003670 atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
Alex Tomasc9de5602008-01-29 00:19:52 -05003671
3672 ei = EXT4_I(ac->ac_inode);
3673 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3674
3675 pa->pa_obj_lock = &ei->i_prealloc_lock;
3676 pa->pa_inode = ac->ac_inode;
3677
3678 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3679 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3680 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3681
3682 spin_lock(pa->pa_obj_lock);
3683 list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3684 spin_unlock(pa->pa_obj_lock);
3685
3686 return 0;
3687}
3688
3689/*
3690 * creates new preallocated space for locality group inodes belongs to
3691 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003692static noinline_for_stack int
3693ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
Alex Tomasc9de5602008-01-29 00:19:52 -05003694{
3695 struct super_block *sb = ac->ac_sb;
3696 struct ext4_locality_group *lg;
3697 struct ext4_prealloc_space *pa;
3698 struct ext4_group_info *grp;
3699
3700 /* preallocate only when found space is larger then requested */
3701 BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3702 BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3703 BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3704
3705 BUG_ON(ext4_pspace_cachep == NULL);
3706 pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3707 if (pa == NULL)
3708 return -ENOMEM;
3709
3710 /* preallocation can change ac_b_ex, thus we store actually
3711 * allocated blocks for history */
3712 ac->ac_f_ex = ac->ac_b_ex;
3713
3714 pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3715 pa->pa_lstart = pa->pa_pstart;
3716 pa->pa_len = ac->ac_b_ex.fe_len;
3717 pa->pa_free = pa->pa_len;
3718 atomic_set(&pa->pa_count, 1);
3719 spin_lock_init(&pa->pa_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003720 INIT_LIST_HEAD(&pa->pa_inode_list);
Aneesh Kumar K.Vd794bf82009-02-14 10:31:16 -05003721 INIT_LIST_HEAD(&pa->pa_group_list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003722 pa->pa_deleted = 0;
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003723 pa->pa_type = MB_GROUP_PA;
Alex Tomasc9de5602008-01-29 00:19:52 -05003724
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003725 mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003726 pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3727 trace_ext4_mb_new_group_pa(ac, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003728
3729 ext4_mb_use_group_pa(ac, pa);
3730 atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3731
3732 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3733 lg = ac->ac_lg;
3734 BUG_ON(lg == NULL);
3735
3736 pa->pa_obj_lock = &lg->lg_prealloc_lock;
3737 pa->pa_inode = NULL;
3738
3739 ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3740 list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3741 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3742
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04003743 /*
3744 * We will later add the new pa to the right bucket
3745 * after updating the pa_free in ext4_mb_release_context
3746 */
Alex Tomasc9de5602008-01-29 00:19:52 -05003747 return 0;
3748}
3749
3750static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3751{
3752 int err;
3753
3754 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3755 err = ext4_mb_new_group_pa(ac);
3756 else
3757 err = ext4_mb_new_inode_pa(ac);
3758 return err;
3759}
3760
3761/*
3762 * finds all unused blocks in on-disk bitmap, frees them in
3763 * in-core bitmap and buddy.
3764 * @pa must be unlinked from inode and group lists, so that
3765 * nobody else can find/use it.
3766 * the caller MUST hold group/inode locks.
3767 * TODO: optimize the case when there are no in-core structures yet
3768 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003769static noinline_for_stack int
3770ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003771 struct ext4_prealloc_space *pa)
Alex Tomasc9de5602008-01-29 00:19:52 -05003772{
Alex Tomasc9de5602008-01-29 00:19:52 -05003773 struct super_block *sb = e4b->bd_sb;
3774 struct ext4_sb_info *sbi = EXT4_SB(sb);
Theodore Ts'o498e5f22008-11-05 00:14:04 -05003775 unsigned int end;
3776 unsigned int next;
Alex Tomasc9de5602008-01-29 00:19:52 -05003777 ext4_group_t group;
3778 ext4_grpblk_t bit;
Theodore Ts'oba80b102009-01-03 20:03:21 -05003779 unsigned long long grp_blk_start;
Alex Tomasc9de5602008-01-29 00:19:52 -05003780 int err = 0;
3781 int free = 0;
3782
3783 BUG_ON(pa->pa_deleted == 0);
3784 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003785 grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003786 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3787 end = bit + pa->pa_len;
3788
Alex Tomasc9de5602008-01-29 00:19:52 -05003789 while (bit < end) {
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05003790 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003791 if (bit >= end)
3792 break;
Aneesh Kumar K.Vffad0a42008-02-23 01:38:34 -05003793 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003794 mb_debug(1, " free preallocated %u/%u in group %u\n",
Andi Kleen5a0790c2010-06-14 13:28:03 -04003795 (unsigned) ext4_group_first_block_no(sb, group) + bit,
3796 (unsigned) next - bit, (unsigned) group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003797 free += next - bit;
3798
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003799 trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04003800 trace_ext4_mb_release_inode_pa(pa, (grp_blk_start +
3801 EXT4_C2B(sbi, bit)),
Lukas Czernera9c667f2011-06-06 09:51:52 -04003802 next - bit);
Alex Tomasc9de5602008-01-29 00:19:52 -05003803 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3804 bit = next + 1;
3805 }
3806 if (free != pa->pa_free) {
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003807 ext4_msg(e4b->bd_sb, KERN_CRIT,
3808 "pa %p: logic %lu, phys. %lu, len %lu",
3809 pa, (unsigned long) pa->pa_lstart,
3810 (unsigned long) pa->pa_pstart,
3811 (unsigned long) pa->pa_len);
Theodore Ts'oe29136f2010-06-29 12:54:28 -04003812 ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
Aneesh Kumar K.V5d1b1b32009-01-05 22:19:52 -05003813 free, pa->pa_free);
Aneesh Kumar K.Ve56eb652008-02-15 13:48:21 -05003814 /*
3815 * pa is already deleted so we use the value obtained
3816 * from the bitmap and continue.
3817 */
Alex Tomasc9de5602008-01-29 00:19:52 -05003818 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003819 atomic_add(free, &sbi->s_mb_discarded);
3820
3821 return err;
3822}
3823
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003824static noinline_for_stack int
3825ext4_mb_release_group_pa(struct ext4_buddy *e4b,
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003826 struct ext4_prealloc_space *pa)
Alex Tomasc9de5602008-01-29 00:19:52 -05003827{
Alex Tomasc9de5602008-01-29 00:19:52 -05003828 struct super_block *sb = e4b->bd_sb;
3829 ext4_group_t group;
3830 ext4_grpblk_t bit;
3831
Yongqiang Yang60e07cf2011-12-18 15:49:54 -05003832 trace_ext4_mb_release_group_pa(sb, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003833 BUG_ON(pa->pa_deleted == 0);
3834 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3835 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3836 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3837 atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003838 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05003839
3840 return 0;
3841}
3842
3843/*
3844 * releases all preallocations in given group
3845 *
3846 * first, we need to decide discard policy:
3847 * - when do we discard
3848 * 1) ENOSPC
3849 * - how many do we discard
3850 * 1) how many requested
3851 */
Eric Sandeen4ddfef72008-04-29 08:11:12 -04003852static noinline_for_stack int
3853ext4_mb_discard_group_preallocations(struct super_block *sb,
Alex Tomasc9de5602008-01-29 00:19:52 -05003854 ext4_group_t group, int needed)
3855{
3856 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3857 struct buffer_head *bitmap_bh = NULL;
3858 struct ext4_prealloc_space *pa, *tmp;
3859 struct list_head list;
3860 struct ext4_buddy e4b;
3861 int err;
3862 int busy = 0;
3863 int free = 0;
3864
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003865 mb_debug(1, "discard preallocation for group %u\n", group);
Alex Tomasc9de5602008-01-29 00:19:52 -05003866
3867 if (list_empty(&grp->bb_prealloc_list))
3868 return 0;
3869
Theodore Ts'o574ca172008-07-11 19:27:31 -04003870 bitmap_bh = ext4_read_block_bitmap(sb, group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04003871 if (IS_ERR(bitmap_bh)) {
3872 err = PTR_ERR(bitmap_bh);
3873 ext4_error(sb, "Error %d reading block bitmap for %u",
3874 err, group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003875 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05003876 }
3877
3878 err = ext4_mb_load_buddy(sb, group, &e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003879 if (err) {
Konstantin Khlebnikova90e0452017-05-21 22:35:23 -04003880 ext4_warning(sb, "Error %d loading buddy information for %u",
3881 err, group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04003882 put_bh(bitmap_bh);
3883 return 0;
3884 }
Alex Tomasc9de5602008-01-29 00:19:52 -05003885
3886 if (needed == 0)
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04003887 needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1;
Alex Tomasc9de5602008-01-29 00:19:52 -05003888
Alex Tomasc9de5602008-01-29 00:19:52 -05003889 INIT_LIST_HEAD(&list);
Alex Tomasc9de5602008-01-29 00:19:52 -05003890repeat:
3891 ext4_lock_group(sb, group);
3892 list_for_each_entry_safe(pa, tmp,
3893 &grp->bb_prealloc_list, pa_group_list) {
3894 spin_lock(&pa->pa_lock);
3895 if (atomic_read(&pa->pa_count)) {
3896 spin_unlock(&pa->pa_lock);
3897 busy = 1;
3898 continue;
3899 }
3900 if (pa->pa_deleted) {
3901 spin_unlock(&pa->pa_lock);
3902 continue;
3903 }
3904
3905 /* seems this one can be freed ... */
3906 pa->pa_deleted = 1;
3907
3908 /* we can trust pa_free ... */
3909 free += pa->pa_free;
3910
3911 spin_unlock(&pa->pa_lock);
3912
3913 list_del(&pa->pa_group_list);
3914 list_add(&pa->u.pa_tmp_list, &list);
3915 }
3916
3917 /* if we still need more blocks and some PAs were used, try again */
3918 if (free < needed && busy) {
3919 busy = 0;
3920 ext4_unlock_group(sb, group);
Lukas Czernerbb8b20e2013-03-10 22:28:09 -04003921 cond_resched();
Alex Tomasc9de5602008-01-29 00:19:52 -05003922 goto repeat;
3923 }
3924
3925 /* found anything to free? */
3926 if (list_empty(&list)) {
3927 BUG_ON(free != 0);
3928 goto out;
3929 }
3930
3931 /* now free all selected PAs */
3932 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3933
3934 /* remove from object (inode or locality group) */
3935 spin_lock(pa->pa_obj_lock);
3936 list_del_rcu(&pa->pa_inode_list);
3937 spin_unlock(pa->pa_obj_lock);
3938
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04003939 if (pa->pa_type == MB_GROUP_PA)
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003940 ext4_mb_release_group_pa(&e4b, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003941 else
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04003942 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05003943
3944 list_del(&pa->u.pa_tmp_list);
3945 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3946 }
3947
3948out:
3949 ext4_unlock_group(sb, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04003950 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05003951 put_bh(bitmap_bh);
3952 return free;
3953}
3954
3955/*
3956 * releases all non-used preallocated blocks for given inode
3957 *
3958 * It's important to discard preallocations under i_data_sem
3959 * We don't want another block to be served from the prealloc
3960 * space when we are discarding the inode prealloc space.
3961 *
3962 * FIXME!! Make sure it is valid at all the call sites
3963 */
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003964void ext4_discard_preallocations(struct inode *inode)
Alex Tomasc9de5602008-01-29 00:19:52 -05003965{
3966 struct ext4_inode_info *ei = EXT4_I(inode);
3967 struct super_block *sb = inode->i_sb;
3968 struct buffer_head *bitmap_bh = NULL;
3969 struct ext4_prealloc_space *pa, *tmp;
3970 ext4_group_t group = 0;
3971 struct list_head list;
3972 struct ext4_buddy e4b;
3973 int err;
3974
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -04003975 if (!S_ISREG(inode->i_mode)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05003976 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3977 return;
3978 }
3979
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04003980 mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
Theodore Ts'o9bffad12009-06-17 11:48:11 -04003981 trace_ext4_discard_preallocations(inode);
Alex Tomasc9de5602008-01-29 00:19:52 -05003982
3983 INIT_LIST_HEAD(&list);
3984
3985repeat:
3986 /* first, collect all pa's in the inode */
3987 spin_lock(&ei->i_prealloc_lock);
3988 while (!list_empty(&ei->i_prealloc_list)) {
3989 pa = list_entry(ei->i_prealloc_list.next,
3990 struct ext4_prealloc_space, pa_inode_list);
3991 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3992 spin_lock(&pa->pa_lock);
3993 if (atomic_read(&pa->pa_count)) {
3994 /* this shouldn't happen often - nobody should
3995 * use preallocation while we're discarding it */
3996 spin_unlock(&pa->pa_lock);
3997 spin_unlock(&ei->i_prealloc_lock);
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04003998 ext4_msg(sb, KERN_ERR,
3999 "uh-oh! used pa while discarding");
Alex Tomasc9de5602008-01-29 00:19:52 -05004000 WARN_ON(1);
4001 schedule_timeout_uninterruptible(HZ);
4002 goto repeat;
4003
4004 }
4005 if (pa->pa_deleted == 0) {
4006 pa->pa_deleted = 1;
4007 spin_unlock(&pa->pa_lock);
4008 list_del_rcu(&pa->pa_inode_list);
4009 list_add(&pa->u.pa_tmp_list, &list);
4010 continue;
4011 }
4012
4013 /* someone is deleting pa right now */
4014 spin_unlock(&pa->pa_lock);
4015 spin_unlock(&ei->i_prealloc_lock);
4016
4017 /* we have to wait here because pa_deleted
4018 * doesn't mean pa is already unlinked from
4019 * the list. as we might be called from
4020 * ->clear_inode() the inode will get freed
4021 * and concurrent thread which is unlinking
4022 * pa from inode's list may access already
4023 * freed memory, bad-bad-bad */
4024
4025 /* XXX: if this happens too often, we can
4026 * add a flag to force wait only in case
4027 * of ->clear_inode(), but not in case of
4028 * regular truncate */
4029 schedule_timeout_uninterruptible(HZ);
4030 goto repeat;
4031 }
4032 spin_unlock(&ei->i_prealloc_lock);
4033
4034 list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004035 BUG_ON(pa->pa_type != MB_INODE_PA);
Lukas Czernerbd862982013-04-03 23:32:34 -04004036 group = ext4_get_group_number(sb, pa->pa_pstart);
Alex Tomasc9de5602008-01-29 00:19:52 -05004037
Konstantin Khlebnikova90e0452017-05-21 22:35:23 -04004038 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4039 GFP_NOFS|__GFP_NOFAIL);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004040 if (err) {
Konstantin Khlebnikova90e0452017-05-21 22:35:23 -04004041 ext4_error(sb, "Error %d loading buddy information for %u",
4042 err, group);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004043 continue;
4044 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004045
Theodore Ts'o574ca172008-07-11 19:27:31 -04004046 bitmap_bh = ext4_read_block_bitmap(sb, group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04004047 if (IS_ERR(bitmap_bh)) {
4048 err = PTR_ERR(bitmap_bh);
4049 ext4_error(sb, "Error %d reading block bitmap for %u",
4050 err, group);
Jing Zhange39e07f2010-05-14 00:00:00 -04004051 ext4_mb_unload_buddy(&e4b);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004052 continue;
Alex Tomasc9de5602008-01-29 00:19:52 -05004053 }
4054
4055 ext4_lock_group(sb, group);
4056 list_del(&pa->pa_group_list);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04004057 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
Alex Tomasc9de5602008-01-29 00:19:52 -05004058 ext4_unlock_group(sb, group);
4059
Jing Zhange39e07f2010-05-14 00:00:00 -04004060 ext4_mb_unload_buddy(&e4b);
Alex Tomasc9de5602008-01-29 00:19:52 -05004061 put_bh(bitmap_bh);
4062
4063 list_del(&pa->u.pa_tmp_list);
4064 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4065 }
4066}
4067
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004068#ifdef CONFIG_EXT4_DEBUG
Alex Tomasc9de5602008-01-29 00:19:52 -05004069static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4070{
4071 struct super_block *sb = ac->ac_sb;
Theodore Ts'o8df96752009-05-01 08:50:38 -04004072 ext4_group_t ngroups, i;
Alex Tomasc9de5602008-01-29 00:19:52 -05004073
Theodore Ts'oa0b30c12013-02-09 16:28:20 -05004074 if (!ext4_mballoc_debug ||
Theodore Ts'o4dd89fc2011-02-27 17:23:47 -05004075 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
Eric Sandeene3570632010-07-27 11:56:08 -04004076 return;
4077
Joe Perches7f6a11e2012-03-19 23:09:43 -04004078 ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04004079 " Allocation context details:");
Joe Perches7f6a11e2012-03-19 23:09:43 -04004080 ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
Alex Tomasc9de5602008-01-29 00:19:52 -05004081 ac->ac_status, ac->ac_flags);
Joe Perches7f6a11e2012-03-19 23:09:43 -04004082 ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
Theodore Ts'o9d8b9ec2011-08-01 17:41:35 -04004083 "goal %lu/%lu/%lu@%lu, "
4084 "best %lu/%lu/%lu@%lu cr %d",
Alex Tomasc9de5602008-01-29 00:19:52 -05004085 (unsigned long)ac->ac_o_ex.fe_group,
4086 (unsigned long)ac->ac_o_ex.fe_start,
4087 (unsigned long)ac->ac_o_ex.fe_len,
4088 (unsigned long)ac->ac_o_ex.fe_logical,
4089 (unsigned long)ac->ac_g_ex.fe_group,
4090 (unsigned long)ac->ac_g_ex.fe_start,
4091 (unsigned long)ac->ac_g_ex.fe_len,
4092 (unsigned long)ac->ac_g_ex.fe_logical,
4093 (unsigned long)ac->ac_b_ex.fe_group,
4094 (unsigned long)ac->ac_b_ex.fe_start,
4095 (unsigned long)ac->ac_b_ex.fe_len,
4096 (unsigned long)ac->ac_b_ex.fe_logical,
4097 (int)ac->ac_criteria);
Eric Sandeendc9ddd92014-02-20 13:32:10 -05004098 ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
Joe Perches7f6a11e2012-03-19 23:09:43 -04004099 ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
Theodore Ts'o8df96752009-05-01 08:50:38 -04004100 ngroups = ext4_get_groups_count(sb);
4101 for (i = 0; i < ngroups; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004102 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
4103 struct ext4_prealloc_space *pa;
4104 ext4_grpblk_t start;
4105 struct list_head *cur;
4106 ext4_lock_group(sb, i);
4107 list_for_each(cur, &grp->bb_prealloc_list) {
4108 pa = list_entry(cur, struct ext4_prealloc_space,
4109 pa_group_list);
4110 spin_lock(&pa->pa_lock);
4111 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
4112 NULL, &start);
4113 spin_unlock(&pa->pa_lock);
Akira Fujita1c718502009-07-05 23:04:36 -04004114 printk(KERN_ERR "PA:%u:%d:%u \n", i,
4115 start, pa->pa_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05004116 }
Solofo Ramangalahy60bd63d2008-04-29 21:59:59 -04004117 ext4_unlock_group(sb, i);
Alex Tomasc9de5602008-01-29 00:19:52 -05004118
4119 if (grp->bb_free == 0)
4120 continue;
Akira Fujita1c718502009-07-05 23:04:36 -04004121 printk(KERN_ERR "%u: %d/%d \n",
Alex Tomasc9de5602008-01-29 00:19:52 -05004122 i, grp->bb_free, grp->bb_fragments);
4123 }
4124 printk(KERN_ERR "\n");
4125}
4126#else
4127static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
4128{
4129 return;
4130}
4131#endif
4132
4133/*
4134 * We use locality group preallocation for small size file. The size of the
4135 * file is determined by the current size or the resulting size after
4136 * allocation which ever is larger
4137 *
Theodore Ts'ob713a5e2009-03-31 09:11:14 -04004138 * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req
Alex Tomasc9de5602008-01-29 00:19:52 -05004139 */
4140static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
4141{
4142 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
4143 int bsbits = ac->ac_sb->s_blocksize_bits;
4144 loff_t size, isize;
4145
4146 if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
4147 return;
4148
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04004149 if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4150 return;
4151
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004152 size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
Theodore Ts'o50797482009-09-18 13:34:02 -04004153 isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
4154 >> bsbits;
Alex Tomasc9de5602008-01-29 00:19:52 -05004155
Theodore Ts'o50797482009-09-18 13:34:02 -04004156 if ((size == isize) &&
4157 !ext4_fs_is_busy(sbi) &&
4158 (atomic_read(&ac->ac_inode->i_writecount) == 0)) {
4159 ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC;
4160 return;
4161 }
4162
Robin Dongebbe0272011-10-26 05:14:27 -04004163 if (sbi->s_mb_group_prealloc <= 0) {
4164 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
4165 return;
4166 }
4167
Alex Tomasc9de5602008-01-29 00:19:52 -05004168 /* don't use group allocation for large files */
Theodore Ts'o71780572009-09-28 00:06:20 -04004169 size = max(size, isize);
Tao Macc483f12010-03-01 19:06:35 -05004170 if (size > sbi->s_mb_stream_request) {
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04004171 ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
Alex Tomasc9de5602008-01-29 00:19:52 -05004172 return;
Theodore Ts'o4ba74d02009-08-09 22:01:13 -04004173 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004174
4175 BUG_ON(ac->ac_lg != NULL);
4176 /*
4177 * locality group prealloc space are per cpu. The reason for having
4178 * per cpu locality group is to reduce the contention between block
4179 * request from multiple CPUs.
4180 */
Christoph Lametera0b6bc62014-08-17 12:30:28 -05004181 ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
Alex Tomasc9de5602008-01-29 00:19:52 -05004182
4183 /* we're going to use group allocation */
4184 ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4185
4186 /* serialize all allocations in the group */
4187 mutex_lock(&ac->ac_lg->lg_mutex);
4188}
4189
Eric Sandeen4ddfef72008-04-29 08:11:12 -04004190static noinline_for_stack int
4191ext4_mb_initialize_context(struct ext4_allocation_context *ac,
Alex Tomasc9de5602008-01-29 00:19:52 -05004192 struct ext4_allocation_request *ar)
4193{
4194 struct super_block *sb = ar->inode->i_sb;
4195 struct ext4_sb_info *sbi = EXT4_SB(sb);
4196 struct ext4_super_block *es = sbi->s_es;
4197 ext4_group_t group;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05004198 unsigned int len;
4199 ext4_fsblk_t goal;
Alex Tomasc9de5602008-01-29 00:19:52 -05004200 ext4_grpblk_t block;
4201
4202 /* we can't allocate > group size */
4203 len = ar->len;
4204
4205 /* just a dirty hack to filter too big requests */
Theodore Ts'o40ae3482013-02-04 15:08:40 -05004206 if (len >= EXT4_CLUSTERS_PER_GROUP(sb))
4207 len = EXT4_CLUSTERS_PER_GROUP(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004208
4209 /* start searching from the goal */
4210 goal = ar->goal;
4211 if (goal < le32_to_cpu(es->s_first_data_block) ||
4212 goal >= ext4_blocks_count(es))
4213 goal = le32_to_cpu(es->s_first_data_block);
4214 ext4_get_group_no_and_offset(sb, goal, &group, &block);
4215
4216 /* set up allocation goals */
Theodore Ts'of5a44db2013-12-20 09:29:35 -05004217 ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
Alex Tomasc9de5602008-01-29 00:19:52 -05004218 ac->ac_status = AC_STATUS_CONTINUE;
Alex Tomasc9de5602008-01-29 00:19:52 -05004219 ac->ac_sb = sb;
4220 ac->ac_inode = ar->inode;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004221 ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical;
Alex Tomasc9de5602008-01-29 00:19:52 -05004222 ac->ac_o_ex.fe_group = group;
4223 ac->ac_o_ex.fe_start = block;
4224 ac->ac_o_ex.fe_len = len;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004225 ac->ac_g_ex = ac->ac_o_ex;
Alex Tomasc9de5602008-01-29 00:19:52 -05004226 ac->ac_flags = ar->flags;
Alex Tomasc9de5602008-01-29 00:19:52 -05004227
4228 /* we have to define context: we'll we work with a file or
4229 * locality group. this is a policy, actually */
4230 ext4_mb_group_or_file(ac);
4231
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004232 mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
Alex Tomasc9de5602008-01-29 00:19:52 -05004233 "left: %u/%u, right %u/%u to %swritable\n",
4234 (unsigned) ar->len, (unsigned) ar->logical,
4235 (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4236 (unsigned) ar->lleft, (unsigned) ar->pleft,
4237 (unsigned) ar->lright, (unsigned) ar->pright,
4238 atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4239 return 0;
4240
4241}
4242
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004243static noinline_for_stack void
4244ext4_mb_discard_lg_preallocations(struct super_block *sb,
4245 struct ext4_locality_group *lg,
4246 int order, int total_entries)
4247{
4248 ext4_group_t group = 0;
4249 struct ext4_buddy e4b;
4250 struct list_head discard_list;
4251 struct ext4_prealloc_space *pa, *tmp;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004252
Theodore Ts'o6ba495e2009-09-18 13:38:55 -04004253 mb_debug(1, "discard locality group preallocation\n");
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004254
4255 INIT_LIST_HEAD(&discard_list);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004256
4257 spin_lock(&lg->lg_prealloc_lock);
4258 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4259 pa_inode_list) {
4260 spin_lock(&pa->pa_lock);
4261 if (atomic_read(&pa->pa_count)) {
4262 /*
4263 * This is the pa that we just used
4264 * for block allocation. So don't
4265 * free that
4266 */
4267 spin_unlock(&pa->pa_lock);
4268 continue;
4269 }
4270 if (pa->pa_deleted) {
4271 spin_unlock(&pa->pa_lock);
4272 continue;
4273 }
4274 /* only lg prealloc space */
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004275 BUG_ON(pa->pa_type != MB_GROUP_PA);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004276
4277 /* seems this one can be freed ... */
4278 pa->pa_deleted = 1;
4279 spin_unlock(&pa->pa_lock);
4280
4281 list_del_rcu(&pa->pa_inode_list);
4282 list_add(&pa->u.pa_tmp_list, &discard_list);
4283
4284 total_entries--;
4285 if (total_entries <= 5) {
4286 /*
4287 * we want to keep only 5 entries
4288 * allowing it to grow to 8. This
4289 * mak sure we don't call discard
4290 * soon for this list.
4291 */
4292 break;
4293 }
4294 }
4295 spin_unlock(&lg->lg_prealloc_lock);
4296
4297 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
Konstantin Khlebnikova90e0452017-05-21 22:35:23 -04004298 int err;
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004299
Lukas Czernerbd862982013-04-03 23:32:34 -04004300 group = ext4_get_group_number(sb, pa->pa_pstart);
Konstantin Khlebnikova90e0452017-05-21 22:35:23 -04004301 err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
4302 GFP_NOFS|__GFP_NOFAIL);
4303 if (err) {
4304 ext4_error(sb, "Error %d loading buddy information for %u",
4305 err, group);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004306 continue;
4307 }
4308 ext4_lock_group(sb, group);
4309 list_del(&pa->pa_group_list);
Eric Sandeen3e1e5f52010-10-27 21:30:07 -04004310 ext4_mb_release_group_pa(&e4b, pa);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004311 ext4_unlock_group(sb, group);
4312
Jing Zhange39e07f2010-05-14 00:00:00 -04004313 ext4_mb_unload_buddy(&e4b);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004314 list_del(&pa->u.pa_tmp_list);
4315 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4316 }
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004317}
4318
4319/*
4320 * We have incremented pa_count. So it cannot be freed at this
4321 * point. Also we hold lg_mutex. So no parallel allocation is
4322 * possible from this lg. That means pa_free cannot be updated.
4323 *
4324 * A parallel ext4_mb_discard_group_preallocations is possible.
4325 * which can cause the lg_prealloc_list to be updated.
4326 */
4327
4328static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4329{
4330 int order, added = 0, lg_prealloc_count = 1;
4331 struct super_block *sb = ac->ac_sb;
4332 struct ext4_locality_group *lg = ac->ac_lg;
4333 struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4334
4335 order = fls(pa->pa_free) - 1;
4336 if (order > PREALLOC_TB_SIZE - 1)
4337 /* The max size of hash table is PREALLOC_TB_SIZE */
4338 order = PREALLOC_TB_SIZE - 1;
4339 /* Add the prealloc space to lg */
Niu Yaweif1167002013-02-01 21:31:27 -05004340 spin_lock(&lg->lg_prealloc_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004341 list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4342 pa_inode_list) {
4343 spin_lock(&tmp_pa->pa_lock);
4344 if (tmp_pa->pa_deleted) {
Theodore Ts'oe7c9e3e2009-03-27 19:43:21 -04004345 spin_unlock(&tmp_pa->pa_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004346 continue;
4347 }
4348 if (!added && pa->pa_free < tmp_pa->pa_free) {
4349 /* Add to the tail of the previous entry */
4350 list_add_tail_rcu(&pa->pa_inode_list,
4351 &tmp_pa->pa_inode_list);
4352 added = 1;
4353 /*
4354 * we want to count the total
4355 * number of entries in the list
4356 */
4357 }
4358 spin_unlock(&tmp_pa->pa_lock);
4359 lg_prealloc_count++;
4360 }
4361 if (!added)
4362 list_add_tail_rcu(&pa->pa_inode_list,
4363 &lg->lg_prealloc_list[order]);
Niu Yaweif1167002013-02-01 21:31:27 -05004364 spin_unlock(&lg->lg_prealloc_lock);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004365
4366 /* Now trim the list to be not more than 8 elements */
4367 if (lg_prealloc_count > 8) {
4368 ext4_mb_discard_lg_preallocations(sb, lg,
Niu Yaweif1167002013-02-01 21:31:27 -05004369 order, lg_prealloc_count);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004370 return;
4371 }
4372 return ;
4373}
4374
Alex Tomasc9de5602008-01-29 00:19:52 -05004375/*
4376 * release all resource we used in allocation
4377 */
4378static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4379{
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004380 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004381 struct ext4_prealloc_space *pa = ac->ac_pa;
4382 if (pa) {
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004383 if (pa->pa_type == MB_GROUP_PA) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004384 /* see comment in ext4_mb_use_group_pa() */
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004385 spin_lock(&pa->pa_lock);
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004386 pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
4387 pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
Aneesh Kumar K.V6be2ded2008-07-23 14:14:05 -04004388 pa->pa_free -= ac->ac_b_ex.fe_len;
4389 pa->pa_len -= ac->ac_b_ex.fe_len;
4390 spin_unlock(&pa->pa_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05004391 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004392 }
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004393 if (pa) {
4394 /*
4395 * We want to add the pa to the right bucket.
4396 * Remove it from the list and while adding
4397 * make sure the list to which we are adding
Amir Goldstein44183d42011-05-09 21:52:36 -04004398 * doesn't grow big.
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004399 */
Aneesh Kumar K.Vcc0fb9a2009-03-27 17:16:58 -04004400 if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
Aneesh Kumar K.Vba443912009-02-10 11:14:34 -05004401 spin_lock(pa->pa_obj_lock);
4402 list_del_rcu(&pa->pa_inode_list);
4403 spin_unlock(pa->pa_obj_lock);
4404 ext4_mb_add_n_trim(ac);
4405 }
4406 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4407 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004408 if (ac->ac_bitmap_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004409 put_page(ac->ac_bitmap_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05004410 if (ac->ac_buddy_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004411 put_page(ac->ac_buddy_page);
Alex Tomasc9de5602008-01-29 00:19:52 -05004412 if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4413 mutex_unlock(&ac->ac_lg->lg_mutex);
4414 ext4_mb_collect_stats(ac);
4415 return 0;
4416}
4417
4418static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4419{
Theodore Ts'o8df96752009-05-01 08:50:38 -04004420 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004421 int ret;
4422 int freed = 0;
4423
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004424 trace_ext4_mb_discard_preallocations(sb, needed);
Theodore Ts'o8df96752009-05-01 08:50:38 -04004425 for (i = 0; i < ngroups && needed > 0; i++) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004426 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4427 freed += ret;
4428 needed -= ret;
4429 }
4430
4431 return freed;
4432}
4433
4434/*
4435 * Main entry point into mballoc to allocate blocks
4436 * it tries to use preallocation first, then falls back
4437 * to usual allocation
4438 */
4439ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
Aditya Kali6c7a1202010-08-05 16:22:24 -04004440 struct ext4_allocation_request *ar, int *errp)
Alex Tomasc9de5602008-01-29 00:19:52 -05004441{
Aneesh Kumar K.V6bc6e632008-10-10 09:39:00 -04004442 int freed;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004443 struct ext4_allocation_context *ac = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004444 struct ext4_sb_info *sbi;
4445 struct super_block *sb;
4446 ext4_fsblk_t block = 0;
Mingming Cao60e58e02009-01-22 18:13:05 +01004447 unsigned int inquota = 0;
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004448 unsigned int reserv_clstrs = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004449
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04004450 might_sleep();
Alex Tomasc9de5602008-01-29 00:19:52 -05004451 sb = ar->inode->i_sb;
4452 sbi = EXT4_SB(sb);
4453
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004454 trace_ext4_request_blocks(ar);
Theodore Ts'oba80b102009-01-03 20:03:21 -05004455
Dmitry Monakhov45dc63e2011-10-20 20:07:23 -04004456 /* Allow to use superuser reservation for quota file */
4457 if (IS_NOQUOTA(ar->inode))
4458 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4459
Theodore Ts'oe3cf5d52014-09-04 18:07:25 -04004460 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
Mingming Cao60e58e02009-01-22 18:13:05 +01004461 /* Without delayed allocation we need to verify
4462 * there is enough free blocks to do block allocation
4463 * and verify allocation doesn't exceed the quota limits.
Mingming Caod2a17632008-07-14 17:52:37 -04004464 */
Allison Henderson55f020d2011-05-25 07:41:26 -04004465 while (ar->len &&
Theodore Ts'oe7d5f312011-09-09 19:14:51 -04004466 ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
Allison Henderson55f020d2011-05-25 07:41:26 -04004467
Aneesh Kumar K.V030ba6b2008-09-08 23:14:50 -04004468 /* let others to free the space */
Lukas Czernerbb8b20e2013-03-10 22:28:09 -04004469 cond_resched();
Aneesh Kumar K.V030ba6b2008-09-08 23:14:50 -04004470 ar->len = ar->len >> 1;
4471 }
4472 if (!ar->len) {
Aneesh Kumar K.Va30d542a2008-10-09 10:56:23 -04004473 *errp = -ENOSPC;
4474 return 0;
4475 }
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004476 reserv_clstrs = ar->len;
Allison Henderson55f020d2011-05-25 07:41:26 -04004477 if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004478 dquot_alloc_block_nofail(ar->inode,
4479 EXT4_C2B(sbi, ar->len));
Allison Henderson55f020d2011-05-25 07:41:26 -04004480 } else {
4481 while (ar->len &&
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004482 dquot_alloc_block(ar->inode,
4483 EXT4_C2B(sbi, ar->len))) {
Allison Henderson55f020d2011-05-25 07:41:26 -04004484
4485 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4486 ar->len--;
4487 }
Mingming Cao60e58e02009-01-22 18:13:05 +01004488 }
4489 inquota = ar->len;
4490 if (ar->len == 0) {
4491 *errp = -EDQUOT;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004492 goto out;
Mingming Cao60e58e02009-01-22 18:13:05 +01004493 }
Mingming Caod2a17632008-07-14 17:52:37 -04004494 }
Mingming Caod2a17632008-07-14 17:52:37 -04004495
Wei Yongjun85556c92012-09-26 20:43:37 -04004496 ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS);
Theodore Ts'o833576b2009-07-13 09:45:52 -04004497 if (!ac) {
Shen Feng363d4252008-07-11 19:27:31 -04004498 ar->len = 0;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004499 *errp = -ENOMEM;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004500 goto out;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004501 }
4502
Eric Sandeen256bdb42008-02-10 01:13:33 -05004503 *errp = ext4_mb_initialize_context(ac, ar);
Alex Tomasc9de5602008-01-29 00:19:52 -05004504 if (*errp) {
4505 ar->len = 0;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004506 goto out;
Alex Tomasc9de5602008-01-29 00:19:52 -05004507 }
4508
Eric Sandeen256bdb42008-02-10 01:13:33 -05004509 ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4510 if (!ext4_mb_use_preallocated(ac)) {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004511 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4512 ext4_mb_normalize_request(ac, ar);
Alex Tomasc9de5602008-01-29 00:19:52 -05004513repeat:
4514 /* allocate space in core */
Aditya Kali6c7a1202010-08-05 16:22:24 -04004515 *errp = ext4_mb_regular_allocator(ac);
Alexey Khoroshilov2c00ef32013-07-01 08:12:36 -04004516 if (*errp)
4517 goto discard_and_exit;
4518
4519 /* as we've just preallocated more space than
4520 * user requested originally, we store allocated
4521 * space in a special descriptor */
4522 if (ac->ac_status == AC_STATUS_FOUND &&
4523 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4524 *errp = ext4_mb_new_preallocation(ac);
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004525 if (*errp) {
Alexey Khoroshilov2c00ef32013-07-01 08:12:36 -04004526 discard_and_exit:
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004527 ext4_discard_allocated_blocks(ac);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004528 goto errout;
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004529 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004530 }
Eric Sandeen256bdb42008-02-10 01:13:33 -05004531 if (likely(ac->ac_status == AC_STATUS_FOUND)) {
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004532 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
Vegard Nossum554a5cc2016-07-14 23:02:47 -04004533 if (*errp) {
Curt Wohlgemuthb8441672009-12-08 22:18:25 -05004534 ext4_discard_allocated_blocks(ac);
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004535 goto errout;
4536 } else {
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04004537 block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4538 ar->len = ac->ac_b_ex.fe_len;
4539 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004540 } else {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004541 freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
Alex Tomasc9de5602008-01-29 00:19:52 -05004542 if (freed)
4543 goto repeat;
4544 *errp = -ENOSPC;
Aditya Kali6c7a1202010-08-05 16:22:24 -04004545 }
4546
Eric Sandeen6d138ce2012-11-08 11:11:59 -05004547errout:
Aditya Kali6c7a1202010-08-05 16:22:24 -04004548 if (*errp) {
Eric Sandeen256bdb42008-02-10 01:13:33 -05004549 ac->ac_b_ex.fe_len = 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004550 ar->len = 0;
Eric Sandeen256bdb42008-02-10 01:13:33 -05004551 ext4_mb_show_ac(ac);
Alex Tomasc9de5602008-01-29 00:19:52 -05004552 }
Eric Sandeen256bdb42008-02-10 01:13:33 -05004553 ext4_mb_release_context(ac);
Aditya Kali6c7a1202010-08-05 16:22:24 -04004554out:
4555 if (ac)
4556 kmem_cache_free(ext4_ac_cachep, ac);
Mingming Cao60e58e02009-01-22 18:13:05 +01004557 if (inquota && ar->len < inquota)
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004558 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004559 if (!ar->len) {
Theodore Ts'oe3cf5d52014-09-04 18:07:25 -04004560 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004561 /* release all the reserved blocks if non delalloc */
Theodore Ts'o57042652011-09-09 18:56:51 -04004562 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
Theodore Ts'o53accfa2011-09-09 18:48:51 -04004563 reserv_clstrs);
Aneesh Kumar K.V0087d9f2009-01-05 21:49:12 -05004564 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004565
Theodore Ts'o9bffad12009-06-17 11:48:11 -04004566 trace_ext4_allocate_blocks(ar, (unsigned long long)block);
Theodore Ts'oba80b102009-01-03 20:03:21 -05004567
Alex Tomasc9de5602008-01-29 00:19:52 -05004568 return block;
4569}
Alex Tomasc9de5602008-01-29 00:19:52 -05004570
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004571/*
4572 * We can merge two free data extents only if the physical blocks
4573 * are contiguous, AND the extents were freed by the same transaction,
4574 * AND the blocks are associated with the same group.
4575 */
4576static int can_merge(struct ext4_free_data *entry1,
4577 struct ext4_free_data *entry2)
4578{
Bobi Jam18aadd42012-02-20 17:53:02 -05004579 if ((entry1->efd_tid == entry2->efd_tid) &&
4580 (entry1->efd_group == entry2->efd_group) &&
4581 ((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004582 return 1;
4583 return 0;
4584}
4585
Eric Sandeen4ddfef72008-04-29 08:11:12 -04004586static noinline_for_stack int
4587ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004588 struct ext4_free_data *new_entry)
Alex Tomasc9de5602008-01-29 00:19:52 -05004589{
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004590 ext4_group_t group = e4b->bd_group;
Theodore Ts'o84130192011-09-09 18:50:51 -04004591 ext4_grpblk_t cluster;
Theodore Ts'od08854f2016-06-26 18:24:01 -04004592 ext4_grpblk_t clusters = new_entry->efd_count;
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004593 struct ext4_free_data *entry;
Alex Tomasc9de5602008-01-29 00:19:52 -05004594 struct ext4_group_info *db = e4b->bd_info;
4595 struct super_block *sb = e4b->bd_sb;
4596 struct ext4_sb_info *sbi = EXT4_SB(sb);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004597 struct rb_node **n = &db->bb_free_root.rb_node, *node;
4598 struct rb_node *parent = NULL, *new_node;
4599
Frank Mayhar03901312009-01-07 00:06:22 -05004600 BUG_ON(!ext4_handle_valid(handle));
Alex Tomasc9de5602008-01-29 00:19:52 -05004601 BUG_ON(e4b->bd_bitmap_page == NULL);
4602 BUG_ON(e4b->bd_buddy_page == NULL);
4603
Bobi Jam18aadd42012-02-20 17:53:02 -05004604 new_node = &new_entry->efd_node;
4605 cluster = new_entry->efd_start_cluster;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004606
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004607 if (!*n) {
4608 /* first free block exent. We need to
4609 protect buddy cache from being freed,
4610 * otherwise we'll refresh it from
4611 * on-disk bitmap and lose not-yet-available
4612 * blocks */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004613 get_page(e4b->bd_buddy_page);
4614 get_page(e4b->bd_bitmap_page);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004615 }
4616 while (*n) {
4617 parent = *n;
Bobi Jam18aadd42012-02-20 17:53:02 -05004618 entry = rb_entry(parent, struct ext4_free_data, efd_node);
4619 if (cluster < entry->efd_start_cluster)
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004620 n = &(*n)->rb_left;
Bobi Jam18aadd42012-02-20 17:53:02 -05004621 else if (cluster >= (entry->efd_start_cluster + entry->efd_count))
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004622 n = &(*n)->rb_right;
4623 else {
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004624 ext4_grp_locked_error(sb, group, 0,
Theodore Ts'o84130192011-09-09 18:50:51 -04004625 ext4_group_first_block_no(sb, group) +
4626 EXT4_C2B(sbi, cluster),
Theodore Ts'oe29136f2010-06-29 12:54:28 -04004627 "Block already on to-be-freed list");
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004628 return 0;
Alex Tomasc9de5602008-01-29 00:19:52 -05004629 }
4630 }
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004631
4632 rb_link_node(new_node, parent, n);
4633 rb_insert_color(new_node, &db->bb_free_root);
4634
4635 /* Now try to see the extent can be merged to left and right */
4636 node = rb_prev(new_node);
4637 if (node) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004638 entry = rb_entry(node, struct ext4_free_data, efd_node);
Dmitry Monakhov5d3ee202013-04-03 22:08:52 -04004639 if (can_merge(entry, new_entry) &&
4640 ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004641 new_entry->efd_start_cluster = entry->efd_start_cluster;
4642 new_entry->efd_count += entry->efd_count;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004643 rb_erase(node, &(db->bb_free_root));
Bobi Jam18aadd42012-02-20 17:53:02 -05004644 kmem_cache_free(ext4_free_data_cachep, entry);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004645 }
4646 }
4647
4648 node = rb_next(new_node);
4649 if (node) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004650 entry = rb_entry(node, struct ext4_free_data, efd_node);
Dmitry Monakhov5d3ee202013-04-03 22:08:52 -04004651 if (can_merge(new_entry, entry) &&
4652 ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
Bobi Jam18aadd42012-02-20 17:53:02 -05004653 new_entry->efd_count += entry->efd_count;
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004654 rb_erase(node, &(db->bb_free_root));
Bobi Jam18aadd42012-02-20 17:53:02 -05004655 kmem_cache_free(ext4_free_data_cachep, entry);
Aneesh Kumar K.Vc8940582008-10-16 10:14:27 -04004656 }
4657 }
Theodore Ts'o3e624fc2008-10-16 20:00:24 -04004658 /* Add the extent to transaction's private list */
Theodore Ts'od08854f2016-06-26 18:24:01 -04004659 new_entry->efd_jce.jce_func = ext4_free_data_callback;
4660 spin_lock(&sbi->s_md_lock);
4661 _ext4_journal_callback_add(handle, &new_entry->efd_jce);
4662 sbi->s_mb_free_pending += clusters;
4663 spin_unlock(&sbi->s_md_lock);
Alex Tomasc9de5602008-01-29 00:19:52 -05004664 return 0;
4665}
4666
Theodore Ts'o44338712009-11-22 07:44:56 -05004667/**
4668 * ext4_free_blocks() -- Free given blocks and update quota
4669 * @handle: handle for this transaction
4670 * @inode: inode
4671 * @block: start physical block to free
4672 * @count: number of blocks to count
Yongqiang Yang5def1362011-06-05 23:26:40 -04004673 * @flags: flags used by ext4_free_blocks
Alex Tomasc9de5602008-01-29 00:19:52 -05004674 */
Theodore Ts'o44338712009-11-22 07:44:56 -05004675void ext4_free_blocks(handle_t *handle, struct inode *inode,
Theodore Ts'oe6362602009-11-23 07:17:05 -05004676 struct buffer_head *bh, ext4_fsblk_t block,
4677 unsigned long count, int flags)
Alex Tomasc9de5602008-01-29 00:19:52 -05004678{
Aneesh Kumar K.V26346ff2008-02-10 01:10:04 -05004679 struct buffer_head *bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004680 struct super_block *sb = inode->i_sb;
Alex Tomasc9de5602008-01-29 00:19:52 -05004681 struct ext4_group_desc *gdp;
Theodore Ts'o498e5f22008-11-05 00:14:04 -05004682 unsigned int overflow;
Alex Tomasc9de5602008-01-29 00:19:52 -05004683 ext4_grpblk_t bit;
4684 struct buffer_head *gd_bh;
4685 ext4_group_t block_group;
4686 struct ext4_sb_info *sbi;
4687 struct ext4_buddy e4b;
Theodore Ts'o84130192011-09-09 18:50:51 -04004688 unsigned int count_clusters;
Alex Tomasc9de5602008-01-29 00:19:52 -05004689 int err = 0;
4690 int ret;
4691
Theodore Ts'ob10a44c2013-04-03 22:00:52 -04004692 might_sleep();
Theodore Ts'oe6362602009-11-23 07:17:05 -05004693 if (bh) {
4694 if (block)
4695 BUG_ON(block != bh->b_blocknr);
4696 else
4697 block = bh->b_blocknr;
4698 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004699
Alex Tomasc9de5602008-01-29 00:19:52 -05004700 sbi = EXT4_SB(sb);
Theodore Ts'o1f2acb62010-01-22 17:40:42 -05004701 if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
4702 !ext4_data_block_valid(sbi, block, count)) {
Eric Sandeen12062dd2010-02-15 14:19:27 -05004703 ext4_error(sb, "Freeing blocks not in datazone - "
Theodore Ts'o1f2acb62010-01-22 17:40:42 -05004704 "block = %llu, count = %lu", block, count);
Alex Tomasc9de5602008-01-29 00:19:52 -05004705 goto error_return;
4706 }
4707
Theodore Ts'o0610b6e2009-06-15 03:45:05 -04004708 ext4_debug("freeing block %llu\n", block);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004709 trace_ext4_free_blocks(inode, block, count, flags);
4710
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004711 if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4712 BUG_ON(count > 1);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004713
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004714 ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
4715 inode, bh, block);
Theodore Ts'oe6362602009-11-23 07:17:05 -05004716 }
4717
Theodore Ts'o60e66792010-05-17 07:00:00 -04004718 /*
Theodore Ts'o84130192011-09-09 18:50:51 -04004719 * If the extent to be freed does not begin on a cluster
4720 * boundary, we need to deal with partial clusters at the
4721 * beginning and end of the extent. Normally we will free
4722 * blocks at the beginning or the end unless we are explicitly
4723 * requested to avoid doing so.
4724 */
Theodore Ts'of5a44db2013-12-20 09:29:35 -05004725 overflow = EXT4_PBLK_COFF(sbi, block);
Theodore Ts'o84130192011-09-09 18:50:51 -04004726 if (overflow) {
4727 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
4728 overflow = sbi->s_cluster_ratio - overflow;
4729 block += overflow;
4730 if (count > overflow)
4731 count -= overflow;
4732 else
4733 return;
4734 } else {
4735 block -= overflow;
4736 count += overflow;
4737 }
4738 }
Theodore Ts'of5a44db2013-12-20 09:29:35 -05004739 overflow = EXT4_LBLK_COFF(sbi, count);
Theodore Ts'o84130192011-09-09 18:50:51 -04004740 if (overflow) {
4741 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
4742 if (count > overflow)
4743 count -= overflow;
4744 else
4745 return;
4746 } else
4747 count += sbi->s_cluster_ratio - overflow;
4748 }
4749
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004750 if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
4751 int i;
Daeho Jeongf96c4502016-02-21 18:31:41 -05004752 int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004753
4754 for (i = 0; i < count; i++) {
4755 cond_resched();
Daeho Jeongf96c4502016-02-21 18:31:41 -05004756 if (is_metadata)
4757 bh = sb_find_get_block(inode->i_sb, block + i);
4758 ext4_forget(handle, is_metadata, inode, bh, block + i);
Daeho Jeong9c02ac92015-10-17 22:28:21 -04004759 }
4760 }
4761
Alex Tomasc9de5602008-01-29 00:19:52 -05004762do_more:
4763 overflow = 0;
4764 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4765
Darrick J. Wong163a2032013-08-28 17:35:51 -04004766 if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
4767 ext4_get_group_info(sb, block_group))))
4768 return;
4769
Alex Tomasc9de5602008-01-29 00:19:52 -05004770 /*
4771 * Check to see if we are freeing blocks across a group
4772 * boundary.
4773 */
Theodore Ts'o84130192011-09-09 18:50:51 -04004774 if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4775 overflow = EXT4_C2B(sbi, bit) + count -
4776 EXT4_BLOCKS_PER_GROUP(sb);
Alex Tomasc9de5602008-01-29 00:19:52 -05004777 count -= overflow;
4778 }
Lukas Czerner810da242013-03-02 17:18:58 -05004779 count_clusters = EXT4_NUM_B2C(sbi, count);
Theodore Ts'o574ca172008-07-11 19:27:31 -04004780 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04004781 if (IS_ERR(bitmap_bh)) {
4782 err = PTR_ERR(bitmap_bh);
4783 bitmap_bh = NULL;
Alex Tomasc9de5602008-01-29 00:19:52 -05004784 goto error_return;
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004785 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004786 gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004787 if (!gdp) {
4788 err = -EIO;
Alex Tomasc9de5602008-01-29 00:19:52 -05004789 goto error_return;
Aneesh Kumar K.Vce89f462008-07-23 14:09:29 -04004790 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004791
4792 if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4793 in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4794 in_range(block, ext4_inode_table(sb, gdp),
Theodore Ts'o84130192011-09-09 18:50:51 -04004795 EXT4_SB(sb)->s_itb_per_group) ||
Alex Tomasc9de5602008-01-29 00:19:52 -05004796 in_range(block + count - 1, ext4_inode_table(sb, gdp),
Theodore Ts'o84130192011-09-09 18:50:51 -04004797 EXT4_SB(sb)->s_itb_per_group)) {
Alex Tomasc9de5602008-01-29 00:19:52 -05004798
Eric Sandeen12062dd2010-02-15 14:19:27 -05004799 ext4_error(sb, "Freeing blocks in system zone - "
Theodore Ts'o0610b6e2009-06-15 03:45:05 -04004800 "Block = %llu, count = %lu", block, count);
Aneesh Kumar K.V519deca2008-05-15 14:43:20 -04004801 /* err = 0. ext4_std_error should be a no op */
4802 goto error_return;
Alex Tomasc9de5602008-01-29 00:19:52 -05004803 }
4804
4805 BUFFER_TRACE(bitmap_bh, "getting write access");
4806 err = ext4_journal_get_write_access(handle, bitmap_bh);
4807 if (err)
4808 goto error_return;
4809
4810 /*
4811 * We are about to modify some metadata. Call the journal APIs
4812 * to unshare ->b_data if a currently-committing transaction is
4813 * using it
4814 */
4815 BUFFER_TRACE(gd_bh, "get_write_access");
4816 err = ext4_journal_get_write_access(handle, gd_bh);
4817 if (err)
4818 goto error_return;
Alex Tomasc9de5602008-01-29 00:19:52 -05004819#ifdef AGGRESSIVE_CHECK
4820 {
4821 int i;
Theodore Ts'o84130192011-09-09 18:50:51 -04004822 for (i = 0; i < count_clusters; i++)
Alex Tomasc9de5602008-01-29 00:19:52 -05004823 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4824 }
4825#endif
Theodore Ts'o84130192011-09-09 18:50:51 -04004826 trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004827
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04004828 /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */
4829 err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b,
4830 GFP_NOFS|__GFP_NOFAIL);
Aneesh Kumar K.V920313a2009-01-05 21:36:19 -05004831 if (err)
4832 goto error_return;
Theodore Ts'oe6362602009-11-23 07:17:05 -05004833
Daeho Jeongf96c4502016-02-21 18:31:41 -05004834 /*
4835 * We need to make sure we don't reuse the freed block until after the
4836 * transaction is committed. We make an exception if the inode is to be
4837 * written in writeback mode since writeback mode has weak data
4838 * consistency guarantees.
4839 */
4840 if (ext4_handle_valid(handle) &&
4841 ((flags & EXT4_FREE_BLOCKS_METADATA) ||
4842 !ext4_should_writeback_data(inode))) {
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004843 struct ext4_free_data *new_entry;
4844 /*
Michal Hocko7444a072015-07-05 12:33:44 -04004845 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
4846 * to fail.
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004847 */
Michal Hocko7444a072015-07-05 12:33:44 -04004848 new_entry = kmem_cache_alloc(ext4_free_data_cachep,
4849 GFP_NOFS|__GFP_NOFAIL);
Bobi Jam18aadd42012-02-20 17:53:02 -05004850 new_entry->efd_start_cluster = bit;
4851 new_entry->efd_group = block_group;
4852 new_entry->efd_count = count_clusters;
4853 new_entry->efd_tid = handle->h_transaction->t_tid;
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004854
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004855 ext4_lock_group(sb, block_group);
Theodore Ts'o84130192011-09-09 18:50:51 -04004856 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004857 ext4_mb_free_metadata(handle, &e4b, new_entry);
Alex Tomasc9de5602008-01-29 00:19:52 -05004858 } else {
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004859 /* need to update group_info->bb_free and bitmap
4860 * with group lock held. generate_buddy look at
4861 * them with group lock_held
4862 */
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05004863 if (test_opt(sb, DISCARD)) {
4864 err = ext4_issue_discard(sb, block_group, bit, count);
4865 if (err && err != -EOPNOTSUPP)
4866 ext4_msg(sb, KERN_WARNING, "discard request in"
4867 " group:%d block:%d count:%lu failed"
4868 " with %d", block_group, bit, count,
4869 err);
Lukas Czerner8f9ff182013-10-30 11:10:52 -04004870 } else
4871 EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05004872
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004873 ext4_lock_group(sb, block_group);
Theodore Ts'o84130192011-09-09 18:50:51 -04004874 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
4875 mb_free_blocks(inode, &e4b, bit, count_clusters);
Alex Tomasc9de5602008-01-29 00:19:52 -05004876 }
4877
Theodore Ts'o021b65b2011-09-09 19:08:51 -04004878 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4879 ext4_free_group_clusters_set(sb, gdp, ret);
Tao Ma79f1ba42012-10-22 00:34:32 -04004880 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04004881 ext4_group_desc_csum_set(sb, block_group, gdp);
Aneesh Kumar K.V955ce5f2009-05-02 20:35:09 -04004882 ext4_unlock_group(sb, block_group);
Alex Tomasc9de5602008-01-29 00:19:52 -05004883
Jose R. Santos772cb7c2008-07-11 19:27:31 -04004884 if (sbi->s_log_groups_per_flex) {
4885 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
Theodore Ts'o90ba9832013-03-11 23:39:59 -04004886 atomic64_add(count_clusters,
4887 &sbi->s_flex_groups[flex_group].free_clusters);
Jose R. Santos772cb7c2008-07-11 19:27:31 -04004888 }
4889
Theodore Ts'o71d4f7d2014-07-15 06:02:38 -04004890 if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
Aditya Kali7b415bf2011-09-09 19:04:51 -04004891 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
Jan Kara7d734532013-08-17 09:36:54 -04004892 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4893
4894 ext4_mb_unload_buddy(&e4b);
Aditya Kali7b415bf2011-09-09 19:04:51 -04004895
Aneesh Kumar K.V7a2fcbf2009-01-05 21:36:55 -05004896 /* We dirtied the bitmap block */
4897 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4898 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
4899
Alex Tomasc9de5602008-01-29 00:19:52 -05004900 /* And the group descriptor block */
4901 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
Frank Mayhar03901312009-01-07 00:06:22 -05004902 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
Alex Tomasc9de5602008-01-29 00:19:52 -05004903 if (!err)
4904 err = ret;
4905
4906 if (overflow && !err) {
4907 block += count;
4908 count = overflow;
4909 put_bh(bitmap_bh);
4910 goto do_more;
4911 }
Alex Tomasc9de5602008-01-29 00:19:52 -05004912error_return:
4913 brelse(bitmap_bh);
4914 ext4_std_error(sb, err);
Alex Tomasc9de5602008-01-29 00:19:52 -05004915 return;
4916}
Lukas Czerner7360d172010-10-27 21:30:12 -04004917
4918/**
Yongqiang Yang05291552011-07-26 21:43:56 -04004919 * ext4_group_add_blocks() -- Add given blocks to an existing group
Amir Goldstein2846e822011-05-09 10:46:41 -04004920 * @handle: handle to this transaction
4921 * @sb: super block
Anatol Pomozov4907cb72012-09-01 10:31:09 -07004922 * @block: start physical block to add to the block group
Amir Goldstein2846e822011-05-09 10:46:41 -04004923 * @count: number of blocks to free
4924 *
Amir Goldsteine73a3472011-05-09 21:40:01 -04004925 * This marks the blocks as free in the bitmap and buddy.
Amir Goldstein2846e822011-05-09 10:46:41 -04004926 */
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004927int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
Amir Goldstein2846e822011-05-09 10:46:41 -04004928 ext4_fsblk_t block, unsigned long count)
4929{
4930 struct buffer_head *bitmap_bh = NULL;
4931 struct buffer_head *gd_bh;
4932 ext4_group_t block_group;
4933 ext4_grpblk_t bit;
4934 unsigned int i;
4935 struct ext4_group_desc *desc;
4936 struct ext4_sb_info *sbi = EXT4_SB(sb);
Amir Goldsteine73a3472011-05-09 21:40:01 -04004937 struct ext4_buddy e4b;
Amir Goldstein2846e822011-05-09 10:46:41 -04004938 int err = 0, ret, blk_free_count;
4939 ext4_grpblk_t blocks_freed;
Amir Goldstein2846e822011-05-09 10:46:41 -04004940
4941 ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
4942
Yongqiang Yang4740b832011-07-26 21:51:08 -04004943 if (count == 0)
4944 return 0;
4945
Amir Goldstein2846e822011-05-09 10:46:41 -04004946 ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
Amir Goldstein2846e822011-05-09 10:46:41 -04004947 /*
4948 * Check to see if we are freeing blocks across a group
4949 * boundary.
4950 */
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004951 if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -04004952 ext4_warning(sb, "too much blocks added to group %u",
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004953 block_group);
4954 err = -EINVAL;
Amir Goldstein2846e822011-05-09 10:46:41 -04004955 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004956 }
Theodore Ts'o2cd05cc2011-05-09 10:58:45 -04004957
Amir Goldstein2846e822011-05-09 10:46:41 -04004958 bitmap_bh = ext4_read_block_bitmap(sb, block_group);
Darrick J. Wong9008a582015-10-17 21:33:24 -04004959 if (IS_ERR(bitmap_bh)) {
4960 err = PTR_ERR(bitmap_bh);
4961 bitmap_bh = NULL;
Amir Goldstein2846e822011-05-09 10:46:41 -04004962 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004963 }
4964
Amir Goldstein2846e822011-05-09 10:46:41 -04004965 desc = ext4_get_group_desc(sb, block_group, &gd_bh);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004966 if (!desc) {
4967 err = -EIO;
Amir Goldstein2846e822011-05-09 10:46:41 -04004968 goto error_return;
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004969 }
Amir Goldstein2846e822011-05-09 10:46:41 -04004970
4971 if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
4972 in_range(ext4_inode_bitmap(sb, desc), block, count) ||
4973 in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
4974 in_range(block + count - 1, ext4_inode_table(sb, desc),
4975 sbi->s_itb_per_group)) {
4976 ext4_error(sb, "Adding blocks in system zones - "
4977 "Block = %llu, count = %lu",
4978 block, count);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04004979 err = -EINVAL;
Amir Goldstein2846e822011-05-09 10:46:41 -04004980 goto error_return;
4981 }
4982
Theodore Ts'o2cd05cc2011-05-09 10:58:45 -04004983 BUFFER_TRACE(bitmap_bh, "getting write access");
4984 err = ext4_journal_get_write_access(handle, bitmap_bh);
Amir Goldstein2846e822011-05-09 10:46:41 -04004985 if (err)
4986 goto error_return;
4987
4988 /*
4989 * We are about to modify some metadata. Call the journal APIs
4990 * to unshare ->b_data if a currently-committing transaction is
4991 * using it
4992 */
4993 BUFFER_TRACE(gd_bh, "get_write_access");
4994 err = ext4_journal_get_write_access(handle, gd_bh);
4995 if (err)
4996 goto error_return;
Amir Goldsteine73a3472011-05-09 21:40:01 -04004997
Amir Goldstein2846e822011-05-09 10:46:41 -04004998 for (i = 0, blocks_freed = 0; i < count; i++) {
4999 BUFFER_TRACE(bitmap_bh, "clear bit");
Amir Goldsteine73a3472011-05-09 21:40:01 -04005000 if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
Amir Goldstein2846e822011-05-09 10:46:41 -04005001 ext4_error(sb, "bit already cleared for block %llu",
5002 (ext4_fsblk_t)(block + i));
5003 BUFFER_TRACE(bitmap_bh, "bit already cleared");
5004 } else {
5005 blocks_freed++;
5006 }
5007 }
Amir Goldsteine73a3472011-05-09 21:40:01 -04005008
5009 err = ext4_mb_load_buddy(sb, block_group, &e4b);
5010 if (err)
5011 goto error_return;
5012
5013 /*
5014 * need to update group_info->bb_free and bitmap
5015 * with group lock held. generate_buddy look at
5016 * them with group lock_held
5017 */
Amir Goldstein2846e822011-05-09 10:46:41 -04005018 ext4_lock_group(sb, block_group);
Amir Goldsteine73a3472011-05-09 21:40:01 -04005019 mb_clear_bits(bitmap_bh->b_data, bit, count);
5020 mb_free_blocks(NULL, &e4b, bit, count);
Theodore Ts'o021b65b2011-09-09 19:08:51 -04005021 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
5022 ext4_free_group_clusters_set(sb, desc, blk_free_count);
Tao Ma79f1ba42012-10-22 00:34:32 -04005023 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
Darrick J. Wongfeb0ab32012-04-29 18:45:10 -04005024 ext4_group_desc_csum_set(sb, block_group, desc);
Amir Goldstein2846e822011-05-09 10:46:41 -04005025 ext4_unlock_group(sb, block_group);
Theodore Ts'o57042652011-09-09 18:56:51 -04005026 percpu_counter_add(&sbi->s_freeclusters_counter,
Lukas Czerner810da242013-03-02 17:18:58 -05005027 EXT4_NUM_B2C(sbi, blocks_freed));
Amir Goldstein2846e822011-05-09 10:46:41 -04005028
5029 if (sbi->s_log_groups_per_flex) {
5030 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
Theodore Ts'o90ba9832013-03-11 23:39:59 -04005031 atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
5032 &sbi->s_flex_groups[flex_group].free_clusters);
Amir Goldstein2846e822011-05-09 10:46:41 -04005033 }
Amir Goldsteine73a3472011-05-09 21:40:01 -04005034
5035 ext4_mb_unload_buddy(&e4b);
Amir Goldstein2846e822011-05-09 10:46:41 -04005036
5037 /* We dirtied the bitmap block */
5038 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
5039 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
5040
5041 /* And the group descriptor block */
5042 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
5043 ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
5044 if (!err)
5045 err = ret;
5046
5047error_return:
5048 brelse(bitmap_bh);
5049 ext4_std_error(sb, err);
Yongqiang Yangcc7365d2011-07-26 21:46:07 -04005050 return err;
Amir Goldstein2846e822011-05-09 10:46:41 -04005051}
5052
5053/**
Lukas Czerner7360d172010-10-27 21:30:12 -04005054 * ext4_trim_extent -- function to TRIM one single free extent in the group
5055 * @sb: super block for the file system
5056 * @start: starting block of the free extent in the alloc. group
5057 * @count: number of blocks to TRIM
5058 * @group: alloc. group we are working with
5059 * @e4b: ext4 buddy for the group
5060 *
5061 * Trim "count" blocks starting at "start" in the "group". To assure that no
5062 * one will allocate those blocks, mark it as used in buddy bitmap. This must
5063 * be called with under the group lock.
5064 */
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005065static int ext4_trim_extent(struct super_block *sb, int start, int count,
Theodore Ts'od9f34502011-04-30 13:47:24 -04005066 ext4_group_t group, struct ext4_buddy *e4b)
jon ernste2cbd582014-04-12 23:01:28 -04005067__releases(bitlock)
5068__acquires(bitlock)
Lukas Czerner7360d172010-10-27 21:30:12 -04005069{
5070 struct ext4_free_extent ex;
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005071 int ret = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005072
Tao Mab3d4c2b2011-07-11 00:01:52 -04005073 trace_ext4_trim_extent(sb, group, start, count);
5074
Lukas Czerner7360d172010-10-27 21:30:12 -04005075 assert_spin_locked(ext4_group_lock_ptr(sb, group));
5076
5077 ex.fe_start = start;
5078 ex.fe_group = group;
5079 ex.fe_len = count;
5080
5081 /*
5082 * Mark blocks used, so no one can reuse them while
5083 * being trimmed.
5084 */
5085 mb_mark_used(e4b, &ex);
5086 ext4_unlock_group(sb, group);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005087 ret = ext4_issue_discard(sb, group, start, count);
Lukas Czerner7360d172010-10-27 21:30:12 -04005088 ext4_lock_group(sb, group);
5089 mb_free_blocks(NULL, e4b, start, ex.fe_len);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005090 return ret;
Lukas Czerner7360d172010-10-27 21:30:12 -04005091}
5092
5093/**
5094 * ext4_trim_all_free -- function to trim all free space in alloc. group
5095 * @sb: super block for file system
Tao Ma22612282011-07-11 00:04:34 -04005096 * @group: group to be trimmed
Lukas Czerner7360d172010-10-27 21:30:12 -04005097 * @start: first group block to examine
5098 * @max: last group block to examine
5099 * @minblocks: minimum extent block count
5100 *
5101 * ext4_trim_all_free walks through group's buddy bitmap searching for free
5102 * extents. When the free block is found, ext4_trim_extent is called to TRIM
5103 * the extent.
5104 *
5105 *
5106 * ext4_trim_all_free walks through group's block bitmap searching for free
5107 * extents. When the free extent is found, mark it as used in group buddy
5108 * bitmap. Then issue a TRIM command on this extent and free the extent in
5109 * the group buddy bitmap. This is done until whole group is scanned.
5110 */
Lukas Czerner0b75a842011-02-23 12:22:49 -05005111static ext4_grpblk_t
Lukas Czerner78944082011-05-24 18:16:27 -04005112ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
5113 ext4_grpblk_t start, ext4_grpblk_t max,
5114 ext4_grpblk_t minblocks)
Lukas Czerner7360d172010-10-27 21:30:12 -04005115{
5116 void *bitmap;
Tao Ma169ddc32011-07-11 00:00:07 -04005117 ext4_grpblk_t next, count = 0, free_count = 0;
Lukas Czerner78944082011-05-24 18:16:27 -04005118 struct ext4_buddy e4b;
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005119 int ret = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005120
Tao Mab3d4c2b2011-07-11 00:01:52 -04005121 trace_ext4_trim_all_free(sb, group, start, max);
5122
Lukas Czerner78944082011-05-24 18:16:27 -04005123 ret = ext4_mb_load_buddy(sb, group, &e4b);
5124 if (ret) {
Konstantin Khlebnikova90e0452017-05-21 22:35:23 -04005125 ext4_warning(sb, "Error %d loading buddy information for %u",
5126 ret, group);
Lukas Czerner78944082011-05-24 18:16:27 -04005127 return ret;
5128 }
Lukas Czerner78944082011-05-24 18:16:27 -04005129 bitmap = e4b.bd_bitmap;
Lukas Czerner28739ee2011-05-24 18:28:07 -04005130
5131 ext4_lock_group(sb, group);
Tao Ma3d56b8d2011-07-11 00:03:38 -04005132 if (EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) &&
5133 minblocks >= atomic_read(&EXT4_SB(sb)->s_last_trim_minblks))
5134 goto out;
5135
Lukas Czerner78944082011-05-24 18:16:27 -04005136 start = (e4b.bd_info->bb_first_free > start) ?
5137 e4b.bd_info->bb_first_free : start;
Lukas Czerner7360d172010-10-27 21:30:12 -04005138
Lukas Czerner913eed832012-03-21 21:22:22 -04005139 while (start <= max) {
5140 start = mb_find_next_zero_bit(bitmap, max + 1, start);
5141 if (start > max)
Lukas Czerner7360d172010-10-27 21:30:12 -04005142 break;
Lukas Czerner913eed832012-03-21 21:22:22 -04005143 next = mb_find_next_bit(bitmap, max + 1, start);
Lukas Czerner7360d172010-10-27 21:30:12 -04005144
5145 if ((next - start) >= minblocks) {
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005146 ret = ext4_trim_extent(sb, start,
5147 next - start, group, &e4b);
5148 if (ret && ret != -EOPNOTSUPP)
5149 break;
5150 ret = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005151 count += next - start;
5152 }
Tao Ma169ddc32011-07-11 00:00:07 -04005153 free_count += next - start;
Lukas Czerner7360d172010-10-27 21:30:12 -04005154 start = next + 1;
5155
5156 if (fatal_signal_pending(current)) {
5157 count = -ERESTARTSYS;
5158 break;
5159 }
5160
5161 if (need_resched()) {
5162 ext4_unlock_group(sb, group);
5163 cond_resched();
5164 ext4_lock_group(sb, group);
5165 }
5166
Tao Ma169ddc32011-07-11 00:00:07 -04005167 if ((e4b.bd_info->bb_free - free_count) < minblocks)
Lukas Czerner7360d172010-10-27 21:30:12 -04005168 break;
5169 }
Tao Ma3d56b8d2011-07-11 00:03:38 -04005170
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005171 if (!ret) {
5172 ret = count;
Tao Ma3d56b8d2011-07-11 00:03:38 -04005173 EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info);
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005174 }
Tao Ma3d56b8d2011-07-11 00:03:38 -04005175out:
Lukas Czerner7360d172010-10-27 21:30:12 -04005176 ext4_unlock_group(sb, group);
Lukas Czerner78944082011-05-24 18:16:27 -04005177 ext4_mb_unload_buddy(&e4b);
Lukas Czerner7360d172010-10-27 21:30:12 -04005178
5179 ext4_debug("trimmed %d blocks in the group %d\n",
5180 count, group);
5181
Lukas Czernerd71c1ae2012-11-08 14:04:52 -05005182 return ret;
Lukas Czerner7360d172010-10-27 21:30:12 -04005183}
5184
5185/**
5186 * ext4_trim_fs() -- trim ioctl handle function
5187 * @sb: superblock for filesystem
5188 * @range: fstrim_range structure
5189 *
5190 * start: First Byte to trim
5191 * len: number of Bytes to trim from start
5192 * minlen: minimum extent length in Bytes
5193 * ext4_trim_fs goes through all allocation groups containing Bytes from
5194 * start to start+len. For each such a group ext4_trim_all_free function
5195 * is invoked to trim all free space.
5196 */
5197int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
5198{
Lukas Czerner78944082011-05-24 18:16:27 -04005199 struct ext4_group_info *grp;
Lukas Czerner913eed832012-03-21 21:22:22 -04005200 ext4_group_t group, first_group, last_group;
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005201 ext4_grpblk_t cnt = 0, first_cluster, last_cluster;
Lukas Czerner913eed832012-03-21 21:22:22 -04005202 uint64_t start, end, minlen, trimmed = 0;
Jan Kara0f0a25b2011-01-11 15:16:31 -05005203 ext4_fsblk_t first_data_blk =
5204 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
Lukas Czerner913eed832012-03-21 21:22:22 -04005205 ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es);
Lukas Czerner7360d172010-10-27 21:30:12 -04005206 int ret = 0;
5207
5208 start = range->start >> sb->s_blocksize_bits;
Lukas Czerner913eed832012-03-21 21:22:22 -04005209 end = start + (range->len >> sb->s_blocksize_bits) - 1;
Lukas Czerneraaf7d732012-09-26 22:21:21 -04005210 minlen = EXT4_NUM_B2C(EXT4_SB(sb),
5211 range->minlen >> sb->s_blocksize_bits);
Lukas Czerner7360d172010-10-27 21:30:12 -04005212
Lukas Czerner5de35e82012-10-22 18:01:19 -04005213 if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
5214 start >= max_blks ||
5215 range->len < sb->s_blocksize)
Lukas Czerner7360d172010-10-27 21:30:12 -04005216 return -EINVAL;
Lukas Czerner913eed832012-03-21 21:22:22 -04005217 if (end >= max_blks)
5218 end = max_blks - 1;
5219 if (end <= first_data_blk)
Tao Ma22f10452011-07-10 23:52:37 -04005220 goto out;
Lukas Czerner913eed832012-03-21 21:22:22 -04005221 if (start < first_data_blk)
Jan Kara0f0a25b2011-01-11 15:16:31 -05005222 start = first_data_blk;
Lukas Czerner7360d172010-10-27 21:30:12 -04005223
Lukas Czerner913eed832012-03-21 21:22:22 -04005224 /* Determine first and last group to examine based on start and end */
Lukas Czerner7360d172010-10-27 21:30:12 -04005225 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005226 &first_group, &first_cluster);
Lukas Czerner913eed832012-03-21 21:22:22 -04005227 ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end,
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005228 &last_group, &last_cluster);
Lukas Czerner7360d172010-10-27 21:30:12 -04005229
Lukas Czerner913eed832012-03-21 21:22:22 -04005230 /* end now represents the last cluster to discard in this group */
5231 end = EXT4_CLUSTERS_PER_GROUP(sb) - 1;
Lukas Czerner7360d172010-10-27 21:30:12 -04005232
5233 for (group = first_group; group <= last_group; group++) {
Lukas Czerner78944082011-05-24 18:16:27 -04005234 grp = ext4_get_group_info(sb, group);
5235 /* We only do this if the grp has never been initialized */
5236 if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
Konstantin Khlebnikovadb7ef62016-03-13 17:29:06 -04005237 ret = ext4_mb_init_group(sb, group, GFP_NOFS);
Lukas Czerner78944082011-05-24 18:16:27 -04005238 if (ret)
5239 break;
Lukas Czerner7360d172010-10-27 21:30:12 -04005240 }
5241
Tao Ma0ba08512011-03-23 15:48:11 -04005242 /*
Lukas Czerner913eed832012-03-21 21:22:22 -04005243 * For all the groups except the last one, last cluster will
5244 * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to
5245 * change it for the last group, note that last_cluster is
5246 * already computed earlier by ext4_get_group_no_and_offset()
Tao Ma0ba08512011-03-23 15:48:11 -04005247 */
Lukas Czerner913eed832012-03-21 21:22:22 -04005248 if (group == last_group)
5249 end = last_cluster;
Lukas Czerner7360d172010-10-27 21:30:12 -04005250
Lukas Czerner78944082011-05-24 18:16:27 -04005251 if (grp->bb_free >= minlen) {
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005252 cnt = ext4_trim_all_free(sb, group, first_cluster,
Lukas Czerner913eed832012-03-21 21:22:22 -04005253 end, minlen);
Lukas Czerner7360d172010-10-27 21:30:12 -04005254 if (cnt < 0) {
5255 ret = cnt;
Lukas Czerner7360d172010-10-27 21:30:12 -04005256 break;
5257 }
Lukas Czerner21e7fd22012-03-21 21:24:22 -04005258 trimmed += cnt;
Lukas Czerner7360d172010-10-27 21:30:12 -04005259 }
Lukas Czerner913eed832012-03-21 21:22:22 -04005260
5261 /*
5262 * For every group except the first one, we are sure
5263 * that the first cluster to discard will be cluster #0.
5264 */
Theodore Ts'o7137d7a2011-09-09 18:38:51 -04005265 first_cluster = 0;
Lukas Czerner7360d172010-10-27 21:30:12 -04005266 }
Lukas Czerner7360d172010-10-27 21:30:12 -04005267
Tao Ma3d56b8d2011-07-11 00:03:38 -04005268 if (!ret)
5269 atomic_set(&EXT4_SB(sb)->s_last_trim_minblks, minlen);
5270
Tao Ma22f10452011-07-10 23:52:37 -04005271out:
Lukas Czerneraaf7d732012-09-26 22:21:21 -04005272 range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits;
Lukas Czerner7360d172010-10-27 21:30:12 -04005273 return ret;
5274}