blob: b4f9904c4c6b2ed5f0da30ad664baf33100a879c [file] [log] [blame]
Josef Bacik0f9dd462008-09-23 13:14:11 -04001/*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
Josef Bacik96303082009-07-13 21:29:25 -040019#include <linux/pagemap.h>
Josef Bacik0f9dd462008-09-23 13:14:11 -040020#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Josef Bacik96303082009-07-13 21:29:25 -040022#include <linux/math64.h>
Josef Bacik6ab60602011-08-08 08:24:46 -040023#include <linux/ratelimit.h>
Josef Bacik0f9dd462008-09-23 13:14:11 -040024#include "ctree.h"
Chris Masonfa9c0d792009-04-03 09:47:43 -040025#include "free-space-cache.h"
26#include "transaction.h"
Josef Bacik0af3d002010-06-21 14:48:16 -040027#include "disk-io.h"
Josef Bacik43be2142011-04-01 14:55:00 +000028#include "extent_io.h"
Li Zefan581bb052011-04-20 10:06:11 +080029#include "inode-map.h"
Chris Masonfa9c0d792009-04-03 09:47:43 -040030
Josef Bacik96303082009-07-13 21:29:25 -040031#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33
Li Zefan34d52cb2011-03-29 13:46:06 +080034static int link_free_space(struct btrfs_free_space_ctl *ctl,
Josef Bacik0cb59c92010-07-02 12:14:14 -040035 struct btrfs_free_space *info);
Josef Bacikcd023e72012-05-14 10:06:40 -040036static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
37 struct btrfs_free_space *info);
Josef Bacik0cb59c92010-07-02 12:14:14 -040038
Li Zefan0414efa2011-04-20 10:20:14 +080039static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
40 struct btrfs_path *path,
41 u64 offset)
Josef Bacik0af3d002010-06-21 14:48:16 -040042{
43 struct btrfs_key key;
44 struct btrfs_key location;
45 struct btrfs_disk_key disk_key;
46 struct btrfs_free_space_header *header;
47 struct extent_buffer *leaf;
48 struct inode *inode = NULL;
49 int ret;
50
Josef Bacik0af3d002010-06-21 14:48:16 -040051 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
Li Zefan0414efa2011-04-20 10:20:14 +080052 key.offset = offset;
Josef Bacik0af3d002010-06-21 14:48:16 -040053 key.type = 0;
54
55 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
56 if (ret < 0)
57 return ERR_PTR(ret);
58 if (ret > 0) {
David Sterbab3b4aa72011-04-21 01:20:15 +020059 btrfs_release_path(path);
Josef Bacik0af3d002010-06-21 14:48:16 -040060 return ERR_PTR(-ENOENT);
61 }
62
63 leaf = path->nodes[0];
64 header = btrfs_item_ptr(leaf, path->slots[0],
65 struct btrfs_free_space_header);
66 btrfs_free_space_key(leaf, header, &disk_key);
67 btrfs_disk_key_to_cpu(&location, &disk_key);
David Sterbab3b4aa72011-04-21 01:20:15 +020068 btrfs_release_path(path);
Josef Bacik0af3d002010-06-21 14:48:16 -040069
70 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
71 if (!inode)
72 return ERR_PTR(-ENOENT);
73 if (IS_ERR(inode))
74 return inode;
75 if (is_bad_inode(inode)) {
76 iput(inode);
77 return ERR_PTR(-ENOENT);
78 }
79
Al Viro528c0322012-04-13 11:03:55 -040080 mapping_set_gfp_mask(inode->i_mapping,
81 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
Miao Xieadae52b2011-03-31 09:43:23 +000082
Li Zefan0414efa2011-04-20 10:20:14 +080083 return inode;
84}
85
86struct inode *lookup_free_space_inode(struct btrfs_root *root,
87 struct btrfs_block_group_cache
88 *block_group, struct btrfs_path *path)
89{
90 struct inode *inode = NULL;
Josef Bacik5b0e95b2011-10-06 08:58:24 -040091 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
Li Zefan0414efa2011-04-20 10:20:14 +080092
93 spin_lock(&block_group->lock);
94 if (block_group->inode)
95 inode = igrab(block_group->inode);
96 spin_unlock(&block_group->lock);
97 if (inode)
98 return inode;
99
100 inode = __lookup_free_space_inode(root, path,
101 block_group->key.objectid);
102 if (IS_ERR(inode))
103 return inode;
104
Josef Bacik0af3d002010-06-21 14:48:16 -0400105 spin_lock(&block_group->lock);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400106 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
Simon Kirbyc2cf52e2013-03-19 22:41:23 +0000107 btrfs_info(root->fs_info,
108 "Old style space inode found, converting.");
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400109 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
110 BTRFS_INODE_NODATACOW;
Josef Bacik2f356122011-06-10 15:31:13 -0400111 block_group->disk_cache_state = BTRFS_DC_CLEAR;
112 }
113
Josef Bacik300e4f82011-08-29 14:06:00 -0400114 if (!block_group->iref) {
Josef Bacik0af3d002010-06-21 14:48:16 -0400115 block_group->inode = igrab(inode);
116 block_group->iref = 1;
117 }
118 spin_unlock(&block_group->lock);
119
120 return inode;
121}
122
Eric Sandeen48a3b632013-04-25 20:41:01 +0000123static int __create_free_space_inode(struct btrfs_root *root,
124 struct btrfs_trans_handle *trans,
125 struct btrfs_path *path,
126 u64 ino, u64 offset)
Josef Bacik0af3d002010-06-21 14:48:16 -0400127{
128 struct btrfs_key key;
129 struct btrfs_disk_key disk_key;
130 struct btrfs_free_space_header *header;
131 struct btrfs_inode_item *inode_item;
132 struct extent_buffer *leaf;
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400133 u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
Josef Bacik0af3d002010-06-21 14:48:16 -0400134 int ret;
135
Li Zefan0414efa2011-04-20 10:20:14 +0800136 ret = btrfs_insert_empty_inode(trans, root, path, ino);
Josef Bacik0af3d002010-06-21 14:48:16 -0400137 if (ret)
138 return ret;
139
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400140 /* We inline crc's for the free disk space cache */
141 if (ino != BTRFS_FREE_INO_OBJECTID)
142 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
143
Josef Bacik0af3d002010-06-21 14:48:16 -0400144 leaf = path->nodes[0];
145 inode_item = btrfs_item_ptr(leaf, path->slots[0],
146 struct btrfs_inode_item);
147 btrfs_item_key(leaf, &disk_key, path->slots[0]);
148 memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
149 sizeof(*inode_item));
150 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
151 btrfs_set_inode_size(leaf, inode_item, 0);
152 btrfs_set_inode_nbytes(leaf, inode_item, 0);
153 btrfs_set_inode_uid(leaf, inode_item, 0);
154 btrfs_set_inode_gid(leaf, inode_item, 0);
155 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400156 btrfs_set_inode_flags(leaf, inode_item, flags);
Josef Bacik0af3d002010-06-21 14:48:16 -0400157 btrfs_set_inode_nlink(leaf, inode_item, 1);
158 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
Li Zefan0414efa2011-04-20 10:20:14 +0800159 btrfs_set_inode_block_group(leaf, inode_item, offset);
Josef Bacik0af3d002010-06-21 14:48:16 -0400160 btrfs_mark_buffer_dirty(leaf);
David Sterbab3b4aa72011-04-21 01:20:15 +0200161 btrfs_release_path(path);
Josef Bacik0af3d002010-06-21 14:48:16 -0400162
163 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
Li Zefan0414efa2011-04-20 10:20:14 +0800164 key.offset = offset;
Josef Bacik0af3d002010-06-21 14:48:16 -0400165 key.type = 0;
166
167 ret = btrfs_insert_empty_item(trans, root, path, &key,
168 sizeof(struct btrfs_free_space_header));
169 if (ret < 0) {
David Sterbab3b4aa72011-04-21 01:20:15 +0200170 btrfs_release_path(path);
Josef Bacik0af3d002010-06-21 14:48:16 -0400171 return ret;
172 }
173 leaf = path->nodes[0];
174 header = btrfs_item_ptr(leaf, path->slots[0],
175 struct btrfs_free_space_header);
176 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
177 btrfs_set_free_space_key(leaf, header, &disk_key);
178 btrfs_mark_buffer_dirty(leaf);
David Sterbab3b4aa72011-04-21 01:20:15 +0200179 btrfs_release_path(path);
Josef Bacik0af3d002010-06-21 14:48:16 -0400180
181 return 0;
182}
183
Li Zefan0414efa2011-04-20 10:20:14 +0800184int create_free_space_inode(struct btrfs_root *root,
185 struct btrfs_trans_handle *trans,
186 struct btrfs_block_group_cache *block_group,
187 struct btrfs_path *path)
188{
189 int ret;
190 u64 ino;
191
192 ret = btrfs_find_free_objectid(root, &ino);
193 if (ret < 0)
194 return ret;
195
196 return __create_free_space_inode(root, trans, path, ino,
197 block_group->key.objectid);
198}
199
Miao Xie7b61cd92013-05-13 13:55:09 +0000200int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
201 struct btrfs_block_rsv *rsv)
Josef Bacik0af3d002010-06-21 14:48:16 -0400202{
Josef Bacikc8174312011-11-02 09:29:35 -0400203 u64 needed_bytes;
Miao Xie7b61cd92013-05-13 13:55:09 +0000204 int ret;
Josef Bacikc8174312011-11-02 09:29:35 -0400205
206 /* 1 for slack space, 1 for updating the inode */
207 needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
208 btrfs_calc_trans_metadata_size(root, 1);
209
Miao Xie7b61cd92013-05-13 13:55:09 +0000210 spin_lock(&rsv->lock);
211 if (rsv->reserved < needed_bytes)
212 ret = -ENOSPC;
213 else
214 ret = 0;
215 spin_unlock(&rsv->lock);
Wei Yongjun4b286cd2013-05-21 02:39:21 +0000216 return ret;
Miao Xie7b61cd92013-05-13 13:55:09 +0000217}
218
219int btrfs_truncate_free_space_cache(struct btrfs_root *root,
220 struct btrfs_trans_handle *trans,
221 struct btrfs_path *path,
222 struct inode *inode)
223{
Miao Xie7b61cd92013-05-13 13:55:09 +0000224 int ret = 0;
Josef Bacik0af3d002010-06-21 14:48:16 -0400225
Josef Bacik0af3d002010-06-21 14:48:16 -0400226 btrfs_i_size_write(inode, 0);
Kirill A. Shutemov7caef262013-09-12 15:13:56 -0700227 truncate_pagecache(inode, 0);
Josef Bacik0af3d002010-06-21 14:48:16 -0400228
229 /*
230 * We don't need an orphan item because truncating the free space cache
231 * will never be split across transactions.
232 */
233 ret = btrfs_truncate_inode_items(trans, root, inode,
234 0, BTRFS_EXTENT_DATA_KEY);
235 if (ret) {
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100236 btrfs_abort_transaction(trans, root, ret);
Josef Bacik0af3d002010-06-21 14:48:16 -0400237 return ret;
238 }
239
Li Zefan82d59022011-04-20 10:33:24 +0800240 ret = btrfs_update_inode(trans, root, inode);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100241 if (ret)
242 btrfs_abort_transaction(trans, root, ret);
Josef Bacikc8174312011-11-02 09:29:35 -0400243
Li Zefan82d59022011-04-20 10:33:24 +0800244 return ret;
Josef Bacik0af3d002010-06-21 14:48:16 -0400245}
246
Josef Bacik9d66e232010-08-25 16:54:15 -0400247static int readahead_cache(struct inode *inode)
248{
249 struct file_ra_state *ra;
250 unsigned long last_index;
251
252 ra = kzalloc(sizeof(*ra), GFP_NOFS);
253 if (!ra)
254 return -ENOMEM;
255
256 file_ra_state_init(ra, inode->i_mapping);
257 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
258
259 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
260
261 kfree(ra);
262
263 return 0;
264}
265
Josef Bacika67509c2011-10-05 15:18:58 -0400266struct io_ctl {
267 void *cur, *orig;
268 struct page *page;
269 struct page **pages;
270 struct btrfs_root *root;
271 unsigned long size;
272 int index;
273 int num_pages;
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400274 unsigned check_crcs:1;
Josef Bacika67509c2011-10-05 15:18:58 -0400275};
276
277static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
278 struct btrfs_root *root)
279{
280 memset(io_ctl, 0, sizeof(struct io_ctl));
281 io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
282 PAGE_CACHE_SHIFT;
283 io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
284 GFP_NOFS);
285 if (!io_ctl->pages)
286 return -ENOMEM;
287 io_ctl->root = root;
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400288 if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
289 io_ctl->check_crcs = 1;
Josef Bacika67509c2011-10-05 15:18:58 -0400290 return 0;
291}
292
293static void io_ctl_free(struct io_ctl *io_ctl)
294{
295 kfree(io_ctl->pages);
296}
297
298static void io_ctl_unmap_page(struct io_ctl *io_ctl)
299{
300 if (io_ctl->cur) {
301 kunmap(io_ctl->page);
302 io_ctl->cur = NULL;
303 io_ctl->orig = NULL;
304 }
305}
306
307static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
308{
Josef Bacikb12d6862013-08-26 17:14:08 -0400309 ASSERT(io_ctl->index < io_ctl->num_pages);
Josef Bacika67509c2011-10-05 15:18:58 -0400310 io_ctl->page = io_ctl->pages[io_ctl->index++];
311 io_ctl->cur = kmap(io_ctl->page);
312 io_ctl->orig = io_ctl->cur;
313 io_ctl->size = PAGE_CACHE_SIZE;
314 if (clear)
315 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
316}
317
318static void io_ctl_drop_pages(struct io_ctl *io_ctl)
319{
320 int i;
321
322 io_ctl_unmap_page(io_ctl);
323
324 for (i = 0; i < io_ctl->num_pages; i++) {
Li Zefana1ee5a42012-01-09 14:27:42 +0800325 if (io_ctl->pages[i]) {
326 ClearPageChecked(io_ctl->pages[i]);
327 unlock_page(io_ctl->pages[i]);
328 page_cache_release(io_ctl->pages[i]);
329 }
Josef Bacika67509c2011-10-05 15:18:58 -0400330 }
331}
332
333static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
334 int uptodate)
335{
336 struct page *page;
337 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
338 int i;
339
340 for (i = 0; i < io_ctl->num_pages; i++) {
341 page = find_or_create_page(inode->i_mapping, i, mask);
342 if (!page) {
343 io_ctl_drop_pages(io_ctl);
344 return -ENOMEM;
345 }
346 io_ctl->pages[i] = page;
347 if (uptodate && !PageUptodate(page)) {
348 btrfs_readpage(NULL, page);
349 lock_page(page);
350 if (!PageUptodate(page)) {
351 printk(KERN_ERR "btrfs: error reading free "
352 "space cache\n");
353 io_ctl_drop_pages(io_ctl);
354 return -EIO;
355 }
356 }
357 }
358
Josef Bacikf7d61dc2011-11-15 09:31:24 -0500359 for (i = 0; i < io_ctl->num_pages; i++) {
360 clear_page_dirty_for_io(io_ctl->pages[i]);
361 set_page_extent_mapped(io_ctl->pages[i]);
362 }
363
Josef Bacika67509c2011-10-05 15:18:58 -0400364 return 0;
365}
366
367static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
368{
Al Viro528c0322012-04-13 11:03:55 -0400369 __le64 *val;
Josef Bacika67509c2011-10-05 15:18:58 -0400370
371 io_ctl_map_page(io_ctl, 1);
372
373 /*
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400374 * Skip the csum areas. If we don't check crcs then we just have a
375 * 64bit chunk at the front of the first page.
Josef Bacika67509c2011-10-05 15:18:58 -0400376 */
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400377 if (io_ctl->check_crcs) {
378 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
379 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
380 } else {
381 io_ctl->cur += sizeof(u64);
382 io_ctl->size -= sizeof(u64) * 2;
383 }
Josef Bacika67509c2011-10-05 15:18:58 -0400384
385 val = io_ctl->cur;
386 *val = cpu_to_le64(generation);
387 io_ctl->cur += sizeof(u64);
Josef Bacika67509c2011-10-05 15:18:58 -0400388}
389
390static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
391{
Al Viro528c0322012-04-13 11:03:55 -0400392 __le64 *gen;
Josef Bacika67509c2011-10-05 15:18:58 -0400393
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400394 /*
395 * Skip the crc area. If we don't check crcs then we just have a 64bit
396 * chunk at the front of the first page.
397 */
398 if (io_ctl->check_crcs) {
399 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
400 io_ctl->size -= sizeof(u64) +
401 (sizeof(u32) * io_ctl->num_pages);
402 } else {
403 io_ctl->cur += sizeof(u64);
404 io_ctl->size -= sizeof(u64) * 2;
405 }
Josef Bacika67509c2011-10-05 15:18:58 -0400406
Josef Bacika67509c2011-10-05 15:18:58 -0400407 gen = io_ctl->cur;
408 if (le64_to_cpu(*gen) != generation) {
409 printk_ratelimited(KERN_ERR "btrfs: space cache generation "
410 "(%Lu) does not match inode (%Lu)\n", *gen,
411 generation);
412 io_ctl_unmap_page(io_ctl);
413 return -EIO;
414 }
415 io_ctl->cur += sizeof(u64);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400416 return 0;
417}
418
419static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
420{
421 u32 *tmp;
422 u32 crc = ~(u32)0;
423 unsigned offset = 0;
424
425 if (!io_ctl->check_crcs) {
426 io_ctl_unmap_page(io_ctl);
427 return;
428 }
429
430 if (index == 0)
Justin P. Mattockcb54f252011-11-21 08:43:28 -0800431 offset = sizeof(u32) * io_ctl->num_pages;
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400432
Liu Bob0496682013-03-14 14:57:45 +0000433 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400434 PAGE_CACHE_SIZE - offset);
435 btrfs_csum_final(crc, (char *)&crc);
436 io_ctl_unmap_page(io_ctl);
437 tmp = kmap(io_ctl->pages[0]);
438 tmp += index;
439 *tmp = crc;
440 kunmap(io_ctl->pages[0]);
441}
442
443static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
444{
445 u32 *tmp, val;
446 u32 crc = ~(u32)0;
447 unsigned offset = 0;
448
449 if (!io_ctl->check_crcs) {
450 io_ctl_map_page(io_ctl, 0);
451 return 0;
452 }
453
454 if (index == 0)
455 offset = sizeof(u32) * io_ctl->num_pages;
456
457 tmp = kmap(io_ctl->pages[0]);
458 tmp += index;
459 val = *tmp;
460 kunmap(io_ctl->pages[0]);
461
462 io_ctl_map_page(io_ctl, 0);
Liu Bob0496682013-03-14 14:57:45 +0000463 crc = btrfs_csum_data(io_ctl->orig + offset, crc,
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400464 PAGE_CACHE_SIZE - offset);
465 btrfs_csum_final(crc, (char *)&crc);
466 if (val != crc) {
467 printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
468 "space cache\n");
469 io_ctl_unmap_page(io_ctl);
470 return -EIO;
471 }
472
Josef Bacika67509c2011-10-05 15:18:58 -0400473 return 0;
474}
475
476static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
477 void *bitmap)
478{
479 struct btrfs_free_space_entry *entry;
480
481 if (!io_ctl->cur)
482 return -ENOSPC;
483
484 entry = io_ctl->cur;
485 entry->offset = cpu_to_le64(offset);
486 entry->bytes = cpu_to_le64(bytes);
487 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
488 BTRFS_FREE_SPACE_EXTENT;
489 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
490 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
491
492 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
493 return 0;
494
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400495 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
Josef Bacika67509c2011-10-05 15:18:58 -0400496
497 /* No more pages to map */
498 if (io_ctl->index >= io_ctl->num_pages)
499 return 0;
500
501 /* map the next page */
502 io_ctl_map_page(io_ctl, 1);
503 return 0;
504}
505
506static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
507{
508 if (!io_ctl->cur)
509 return -ENOSPC;
510
511 /*
512 * If we aren't at the start of the current page, unmap this one and
513 * map the next one if there is any left.
514 */
515 if (io_ctl->cur != io_ctl->orig) {
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400516 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
Josef Bacika67509c2011-10-05 15:18:58 -0400517 if (io_ctl->index >= io_ctl->num_pages)
518 return -ENOSPC;
519 io_ctl_map_page(io_ctl, 0);
520 }
521
522 memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400523 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
Josef Bacika67509c2011-10-05 15:18:58 -0400524 if (io_ctl->index < io_ctl->num_pages)
525 io_ctl_map_page(io_ctl, 0);
526 return 0;
527}
528
529static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
530{
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400531 /*
532 * If we're not on the boundary we know we've modified the page and we
533 * need to crc the page.
534 */
535 if (io_ctl->cur != io_ctl->orig)
536 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
537 else
538 io_ctl_unmap_page(io_ctl);
Josef Bacika67509c2011-10-05 15:18:58 -0400539
540 while (io_ctl->index < io_ctl->num_pages) {
541 io_ctl_map_page(io_ctl, 1);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400542 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
Josef Bacika67509c2011-10-05 15:18:58 -0400543 }
544}
545
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400546static int io_ctl_read_entry(struct io_ctl *io_ctl,
547 struct btrfs_free_space *entry, u8 *type)
Josef Bacika67509c2011-10-05 15:18:58 -0400548{
549 struct btrfs_free_space_entry *e;
Josef Bacik2f120c02011-11-10 20:45:05 -0500550 int ret;
551
552 if (!io_ctl->cur) {
553 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
554 if (ret)
555 return ret;
556 }
Josef Bacika67509c2011-10-05 15:18:58 -0400557
558 e = io_ctl->cur;
559 entry->offset = le64_to_cpu(e->offset);
560 entry->bytes = le64_to_cpu(e->bytes);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400561 *type = e->type;
Josef Bacika67509c2011-10-05 15:18:58 -0400562 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
563 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
564
565 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400566 return 0;
Josef Bacika67509c2011-10-05 15:18:58 -0400567
568 io_ctl_unmap_page(io_ctl);
569
Josef Bacik2f120c02011-11-10 20:45:05 -0500570 return 0;
Josef Bacika67509c2011-10-05 15:18:58 -0400571}
572
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400573static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
574 struct btrfs_free_space *entry)
Josef Bacika67509c2011-10-05 15:18:58 -0400575{
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400576 int ret;
577
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400578 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
579 if (ret)
580 return ret;
581
Josef Bacika67509c2011-10-05 15:18:58 -0400582 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
583 io_ctl_unmap_page(io_ctl);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400584
585 return 0;
Josef Bacika67509c2011-10-05 15:18:58 -0400586}
587
Josef Bacikcd023e72012-05-14 10:06:40 -0400588/*
589 * Since we attach pinned extents after the fact we can have contiguous sections
590 * of free space that are split up in entries. This poses a problem with the
591 * tree logging stuff since it could have allocated across what appears to be 2
592 * entries since we would have merged the entries when adding the pinned extents
593 * back to the free space cache. So run through the space cache that we just
594 * loaded and merge contiguous entries. This will make the log replay stuff not
595 * blow up and it will make for nicer allocator behavior.
596 */
597static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
598{
599 struct btrfs_free_space *e, *prev = NULL;
600 struct rb_node *n;
601
602again:
603 spin_lock(&ctl->tree_lock);
604 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
605 e = rb_entry(n, struct btrfs_free_space, offset_index);
606 if (!prev)
607 goto next;
608 if (e->bitmap || prev->bitmap)
609 goto next;
610 if (prev->offset + prev->bytes == e->offset) {
611 unlink_free_space(ctl, prev);
612 unlink_free_space(ctl, e);
613 prev->bytes += e->bytes;
614 kmem_cache_free(btrfs_free_space_cachep, e);
615 link_free_space(ctl, prev);
616 prev = NULL;
617 spin_unlock(&ctl->tree_lock);
618 goto again;
619 }
620next:
621 prev = e;
622 }
623 spin_unlock(&ctl->tree_lock);
624}
625
Eric Sandeen48a3b632013-04-25 20:41:01 +0000626static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
627 struct btrfs_free_space_ctl *ctl,
628 struct btrfs_path *path, u64 offset)
Josef Bacik9d66e232010-08-25 16:54:15 -0400629{
Josef Bacik9d66e232010-08-25 16:54:15 -0400630 struct btrfs_free_space_header *header;
631 struct extent_buffer *leaf;
Josef Bacika67509c2011-10-05 15:18:58 -0400632 struct io_ctl io_ctl;
Josef Bacik9d66e232010-08-25 16:54:15 -0400633 struct btrfs_key key;
Josef Bacika67509c2011-10-05 15:18:58 -0400634 struct btrfs_free_space *e, *n;
Josef Bacik9d66e232010-08-25 16:54:15 -0400635 struct list_head bitmaps;
636 u64 num_entries;
637 u64 num_bitmaps;
638 u64 generation;
Josef Bacika67509c2011-10-05 15:18:58 -0400639 u8 type;
Josef Bacikf6a39822011-06-06 10:50:35 -0400640 int ret = 0;
Josef Bacik9d66e232010-08-25 16:54:15 -0400641
642 INIT_LIST_HEAD(&bitmaps);
643
Josef Bacik9d66e232010-08-25 16:54:15 -0400644 /* Nothing in the space cache, goodbye */
Li Zefan0414efa2011-04-20 10:20:14 +0800645 if (!i_size_read(inode))
Josef Bacika67509c2011-10-05 15:18:58 -0400646 return 0;
Josef Bacik9d66e232010-08-25 16:54:15 -0400647
648 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
Li Zefan0414efa2011-04-20 10:20:14 +0800649 key.offset = offset;
Josef Bacik9d66e232010-08-25 16:54:15 -0400650 key.type = 0;
651
652 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
Li Zefan0414efa2011-04-20 10:20:14 +0800653 if (ret < 0)
Josef Bacika67509c2011-10-05 15:18:58 -0400654 return 0;
Li Zefan0414efa2011-04-20 10:20:14 +0800655 else if (ret > 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400656 btrfs_release_path(path);
Josef Bacika67509c2011-10-05 15:18:58 -0400657 return 0;
Josef Bacik9d66e232010-08-25 16:54:15 -0400658 }
659
Li Zefan0414efa2011-04-20 10:20:14 +0800660 ret = -1;
661
Josef Bacik9d66e232010-08-25 16:54:15 -0400662 leaf = path->nodes[0];
663 header = btrfs_item_ptr(leaf, path->slots[0],
664 struct btrfs_free_space_header);
665 num_entries = btrfs_free_space_entries(leaf, header);
666 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
667 generation = btrfs_free_space_generation(leaf, header);
Chris Mason945d8962011-05-22 12:33:42 -0400668 btrfs_release_path(path);
Josef Bacik9d66e232010-08-25 16:54:15 -0400669
670 if (BTRFS_I(inode)->generation != generation) {
Simon Kirbyc2cf52e2013-03-19 22:41:23 +0000671 btrfs_err(root->fs_info,
672 "free space inode generation (%llu) "
673 "did not match free space cache generation (%llu)",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200674 BTRFS_I(inode)->generation, generation);
Josef Bacika67509c2011-10-05 15:18:58 -0400675 return 0;
Josef Bacik9d66e232010-08-25 16:54:15 -0400676 }
677
678 if (!num_entries)
Josef Bacika67509c2011-10-05 15:18:58 -0400679 return 0;
Josef Bacik9d66e232010-08-25 16:54:15 -0400680
Li Zefan706efc62012-01-09 14:36:28 +0800681 ret = io_ctl_init(&io_ctl, inode, root);
682 if (ret)
683 return ret;
684
Josef Bacik9d66e232010-08-25 16:54:15 -0400685 ret = readahead_cache(inode);
Li Zefan0414efa2011-04-20 10:20:14 +0800686 if (ret)
Josef Bacik9d66e232010-08-25 16:54:15 -0400687 goto out;
Josef Bacik9d66e232010-08-25 16:54:15 -0400688
Josef Bacika67509c2011-10-05 15:18:58 -0400689 ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
690 if (ret)
691 goto out;
Josef Bacik9d66e232010-08-25 16:54:15 -0400692
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400693 ret = io_ctl_check_crc(&io_ctl, 0);
694 if (ret)
695 goto free_cache;
696
Josef Bacika67509c2011-10-05 15:18:58 -0400697 ret = io_ctl_check_generation(&io_ctl, generation);
698 if (ret)
699 goto free_cache;
Josef Bacik9d66e232010-08-25 16:54:15 -0400700
Josef Bacika67509c2011-10-05 15:18:58 -0400701 while (num_entries) {
702 e = kmem_cache_zalloc(btrfs_free_space_cachep,
703 GFP_NOFS);
704 if (!e)
Josef Bacik9d66e232010-08-25 16:54:15 -0400705 goto free_cache;
Josef Bacik9d66e232010-08-25 16:54:15 -0400706
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400707 ret = io_ctl_read_entry(&io_ctl, e, &type);
708 if (ret) {
709 kmem_cache_free(btrfs_free_space_cachep, e);
710 goto free_cache;
711 }
712
Josef Bacika67509c2011-10-05 15:18:58 -0400713 if (!e->bytes) {
714 kmem_cache_free(btrfs_free_space_cachep, e);
715 goto free_cache;
Josef Bacik9d66e232010-08-25 16:54:15 -0400716 }
Josef Bacik9d66e232010-08-25 16:54:15 -0400717
Josef Bacika67509c2011-10-05 15:18:58 -0400718 if (type == BTRFS_FREE_SPACE_EXTENT) {
719 spin_lock(&ctl->tree_lock);
720 ret = link_free_space(ctl, e);
721 spin_unlock(&ctl->tree_lock);
722 if (ret) {
Simon Kirbyc2cf52e2013-03-19 22:41:23 +0000723 btrfs_err(root->fs_info,
724 "Duplicate entries in free space cache, dumping");
Josef Bacikdc89e982011-01-28 17:05:48 -0500725 kmem_cache_free(btrfs_free_space_cachep, e);
Josef Bacik9d66e232010-08-25 16:54:15 -0400726 goto free_cache;
727 }
Josef Bacika67509c2011-10-05 15:18:58 -0400728 } else {
Josef Bacikb12d6862013-08-26 17:14:08 -0400729 ASSERT(num_bitmaps);
Josef Bacika67509c2011-10-05 15:18:58 -0400730 num_bitmaps--;
731 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
732 if (!e->bitmap) {
733 kmem_cache_free(
734 btrfs_free_space_cachep, e);
735 goto free_cache;
Josef Bacik9d66e232010-08-25 16:54:15 -0400736 }
Josef Bacika67509c2011-10-05 15:18:58 -0400737 spin_lock(&ctl->tree_lock);
738 ret = link_free_space(ctl, e);
739 ctl->total_bitmaps++;
740 ctl->op->recalc_thresholds(ctl);
741 spin_unlock(&ctl->tree_lock);
742 if (ret) {
Simon Kirbyc2cf52e2013-03-19 22:41:23 +0000743 btrfs_err(root->fs_info,
744 "Duplicate entries in free space cache, dumping");
Josef Bacika67509c2011-10-05 15:18:58 -0400745 kmem_cache_free(btrfs_free_space_cachep, e);
746 goto free_cache;
747 }
748 list_add_tail(&e->list, &bitmaps);
Josef Bacik9d66e232010-08-25 16:54:15 -0400749 }
750
Josef Bacika67509c2011-10-05 15:18:58 -0400751 num_entries--;
Josef Bacik9d66e232010-08-25 16:54:15 -0400752 }
753
Josef Bacik2f120c02011-11-10 20:45:05 -0500754 io_ctl_unmap_page(&io_ctl);
755
Josef Bacika67509c2011-10-05 15:18:58 -0400756 /*
757 * We add the bitmaps at the end of the entries in order that
758 * the bitmap entries are added to the cache.
759 */
760 list_for_each_entry_safe(e, n, &bitmaps, list) {
761 list_del_init(&e->list);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400762 ret = io_ctl_read_bitmap(&io_ctl, e);
763 if (ret)
764 goto free_cache;
Josef Bacika67509c2011-10-05 15:18:58 -0400765 }
766
767 io_ctl_drop_pages(&io_ctl);
Josef Bacikcd023e72012-05-14 10:06:40 -0400768 merge_space_tree(ctl);
Josef Bacik9d66e232010-08-25 16:54:15 -0400769 ret = 1;
770out:
Josef Bacika67509c2011-10-05 15:18:58 -0400771 io_ctl_free(&io_ctl);
Josef Bacik9d66e232010-08-25 16:54:15 -0400772 return ret;
Josef Bacik9d66e232010-08-25 16:54:15 -0400773free_cache:
Josef Bacika67509c2011-10-05 15:18:58 -0400774 io_ctl_drop_pages(&io_ctl);
Li Zefan0414efa2011-04-20 10:20:14 +0800775 __btrfs_remove_free_space_cache(ctl);
Josef Bacik9d66e232010-08-25 16:54:15 -0400776 goto out;
777}
778
Li Zefan0414efa2011-04-20 10:20:14 +0800779int load_free_space_cache(struct btrfs_fs_info *fs_info,
780 struct btrfs_block_group_cache *block_group)
Josef Bacik0cb59c92010-07-02 12:14:14 -0400781{
Li Zefan34d52cb2011-03-29 13:46:06 +0800782 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Li Zefan0414efa2011-04-20 10:20:14 +0800783 struct btrfs_root *root = fs_info->tree_root;
784 struct inode *inode;
785 struct btrfs_path *path;
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400786 int ret = 0;
Li Zefan0414efa2011-04-20 10:20:14 +0800787 bool matched;
788 u64 used = btrfs_block_group_used(&block_group->item);
789
790 /*
Li Zefan0414efa2011-04-20 10:20:14 +0800791 * If this block group has been marked to be cleared for one reason or
792 * another then we can't trust the on disk cache, so just return.
793 */
794 spin_lock(&block_group->lock);
795 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
796 spin_unlock(&block_group->lock);
797 return 0;
798 }
799 spin_unlock(&block_group->lock);
800
801 path = btrfs_alloc_path();
802 if (!path)
803 return 0;
Josef Bacikd53ba472012-04-12 16:03:57 -0400804 path->search_commit_root = 1;
805 path->skip_locking = 1;
Li Zefan0414efa2011-04-20 10:20:14 +0800806
807 inode = lookup_free_space_inode(root, block_group, path);
808 if (IS_ERR(inode)) {
809 btrfs_free_path(path);
810 return 0;
811 }
812
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400813 /* We may have converted the inode and made the cache invalid. */
814 spin_lock(&block_group->lock);
815 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
816 spin_unlock(&block_group->lock);
Tsutomu Itoha7e221e2012-02-14 17:12:23 +0900817 btrfs_free_path(path);
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400818 goto out;
819 }
820 spin_unlock(&block_group->lock);
821
Li Zefan0414efa2011-04-20 10:20:14 +0800822 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
823 path, block_group->key.objectid);
824 btrfs_free_path(path);
825 if (ret <= 0)
826 goto out;
827
828 spin_lock(&ctl->tree_lock);
829 matched = (ctl->free_space == (block_group->key.offset - used -
830 block_group->bytes_super));
831 spin_unlock(&ctl->tree_lock);
832
833 if (!matched) {
834 __btrfs_remove_free_space_cache(ctl);
Simon Kirbyc2cf52e2013-03-19 22:41:23 +0000835 btrfs_err(fs_info, "block group %llu has wrong amount of free space",
836 block_group->key.objectid);
Li Zefan0414efa2011-04-20 10:20:14 +0800837 ret = -1;
838 }
839out:
840 if (ret < 0) {
841 /* This cache is bogus, make sure it gets cleared */
842 spin_lock(&block_group->lock);
843 block_group->disk_cache_state = BTRFS_DC_CLEAR;
844 spin_unlock(&block_group->lock);
Li Zefan82d59022011-04-20 10:33:24 +0800845 ret = 0;
Li Zefan0414efa2011-04-20 10:20:14 +0800846
Simon Kirbyc2cf52e2013-03-19 22:41:23 +0000847 btrfs_err(fs_info, "failed to load free space cache for block group %llu",
848 block_group->key.objectid);
Li Zefan0414efa2011-04-20 10:20:14 +0800849 }
850
851 iput(inode);
852 return ret;
853}
854
Josef Bacikc09544e2011-08-30 10:19:10 -0400855/**
856 * __btrfs_write_out_cache - write out cached info to an inode
857 * @root - the root the inode belongs to
858 * @ctl - the free space cache we are going to write out
859 * @block_group - the block_group for this cache if it belongs to a block_group
860 * @trans - the trans handle
861 * @path - the path to use
862 * @offset - the offset for the key we'll insert
863 *
864 * This function writes out a free space cache struct to disk for quick recovery
865 * on mount. This will return 0 if it was successfull in writing the cache out,
866 * and -1 if it was not.
867 */
Eric Sandeen48a3b632013-04-25 20:41:01 +0000868static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
869 struct btrfs_free_space_ctl *ctl,
870 struct btrfs_block_group_cache *block_group,
871 struct btrfs_trans_handle *trans,
872 struct btrfs_path *path, u64 offset)
Josef Bacik0cb59c92010-07-02 12:14:14 -0400873{
874 struct btrfs_free_space_header *header;
875 struct extent_buffer *leaf;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400876 struct rb_node *node;
877 struct list_head *pos, *n;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400878 struct extent_state *cached_state = NULL;
Josef Bacik43be2142011-04-01 14:55:00 +0000879 struct btrfs_free_cluster *cluster = NULL;
880 struct extent_io_tree *unpin = NULL;
Josef Bacika67509c2011-10-05 15:18:58 -0400881 struct io_ctl io_ctl;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400882 struct list_head bitmap_list;
883 struct btrfs_key key;
Li Zefandb804f22012-01-10 16:41:01 +0800884 u64 start, extent_start, extent_end, len;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400885 int entries = 0;
886 int bitmaps = 0;
Josef Bacikc09544e2011-08-30 10:19:10 -0400887 int ret;
888 int err = -1;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400889
Josef Bacik0cb59c92010-07-02 12:14:14 -0400890 INIT_LIST_HEAD(&bitmap_list);
891
Li Zefan0414efa2011-04-20 10:20:14 +0800892 if (!i_size_read(inode))
893 return -1;
Josef Bacik2b209822010-12-03 13:17:53 -0500894
Li Zefan706efc62012-01-09 14:36:28 +0800895 ret = io_ctl_init(&io_ctl, inode, root);
896 if (ret)
897 return -1;
Josef Bacikbe1a12a2011-04-06 13:05:22 -0400898
Josef Bacik43be2142011-04-01 14:55:00 +0000899 /* Get the cluster for this block_group if it exists */
Li Zefan0414efa2011-04-20 10:20:14 +0800900 if (block_group && !list_empty(&block_group->cluster_list))
Josef Bacik43be2142011-04-01 14:55:00 +0000901 cluster = list_entry(block_group->cluster_list.next,
902 struct btrfs_free_cluster,
903 block_group_list);
904
Josef Bacika67509c2011-10-05 15:18:58 -0400905 /* Lock all pages first so we can lock the extent safely. */
906 io_ctl_prepare_pages(&io_ctl, inode, 0);
Josef Bacik0cb59c92010-07-02 12:14:14 -0400907
Josef Bacik0cb59c92010-07-02 12:14:14 -0400908 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
Jeff Mahoneyd0082372012-03-01 14:57:19 +0100909 0, &cached_state);
Josef Bacik0cb59c92010-07-02 12:14:14 -0400910
Josef Bacikf75b1302011-10-05 10:00:18 -0400911 node = rb_first(&ctl->free_space_offset);
912 if (!node && cluster) {
913 node = rb_first(&cluster->root);
914 cluster = NULL;
915 }
916
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400917 /* Make sure we can fit our crcs into the first page */
918 if (io_ctl.check_crcs &&
Josef Bacik73e1e612013-05-08 16:44:57 -0400919 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400920 goto out_nospc;
Josef Bacik5b0e95b2011-10-06 08:58:24 -0400921
Josef Bacika67509c2011-10-05 15:18:58 -0400922 io_ctl_set_generation(&io_ctl, trans->transid);
923
Josef Bacik0cb59c92010-07-02 12:14:14 -0400924 /* Write out the extent entries */
Josef Bacika67509c2011-10-05 15:18:58 -0400925 while (node) {
926 struct btrfs_free_space *e;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400927
Josef Bacika67509c2011-10-05 15:18:58 -0400928 e = rb_entry(node, struct btrfs_free_space, offset_index);
929 entries++;
Josef Bacik43be2142011-04-01 14:55:00 +0000930
Josef Bacika67509c2011-10-05 15:18:58 -0400931 ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
932 e->bitmap);
933 if (ret)
934 goto out_nospc;
935
936 if (e->bitmap) {
937 list_add_tail(&e->list, &bitmap_list);
938 bitmaps++;
939 }
940 node = rb_next(node);
941 if (!node && cluster) {
942 node = rb_first(&cluster->root);
943 cluster = NULL;
944 }
945 }
946
947 /*
948 * We want to add any pinned extents to our free space cache
949 * so we don't leak the space
950 */
Li Zefandb804f22012-01-10 16:41:01 +0800951
952 /*
953 * We shouldn't have switched the pinned extents yet so this is the
954 * right one
955 */
956 unpin = root->fs_info->pinned_extents;
957
958 if (block_group)
959 start = block_group->key.objectid;
960
Josef Bacika67509c2011-10-05 15:18:58 -0400961 while (block_group && (start < block_group->key.objectid +
962 block_group->key.offset)) {
Li Zefandb804f22012-01-10 16:41:01 +0800963 ret = find_first_extent_bit(unpin, start,
964 &extent_start, &extent_end,
Josef Bacike6138872012-09-27 17:07:30 -0400965 EXTENT_DIRTY, NULL);
Josef Bacika67509c2011-10-05 15:18:58 -0400966 if (ret) {
967 ret = 0;
Josef Bacikbe1a12a2011-04-06 13:05:22 -0400968 break;
969 }
970
Josef Bacika67509c2011-10-05 15:18:58 -0400971 /* This pinned extent is out of our range */
Li Zefandb804f22012-01-10 16:41:01 +0800972 if (extent_start >= block_group->key.objectid +
Josef Bacika67509c2011-10-05 15:18:58 -0400973 block_group->key.offset)
974 break;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400975
Li Zefandb804f22012-01-10 16:41:01 +0800976 extent_start = max(extent_start, start);
977 extent_end = min(block_group->key.objectid +
978 block_group->key.offset, extent_end + 1);
979 len = extent_end - extent_start;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400980
Josef Bacika67509c2011-10-05 15:18:58 -0400981 entries++;
Li Zefandb804f22012-01-10 16:41:01 +0800982 ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
Josef Bacika67509c2011-10-05 15:18:58 -0400983 if (ret)
984 goto out_nospc;
Josef Bacik2f356122011-06-10 15:31:13 -0400985
Li Zefandb804f22012-01-10 16:41:01 +0800986 start = extent_end;
Josef Bacika67509c2011-10-05 15:18:58 -0400987 }
Josef Bacik0cb59c92010-07-02 12:14:14 -0400988
989 /* Write out the bitmaps */
990 list_for_each_safe(pos, n, &bitmap_list) {
Josef Bacik0cb59c92010-07-02 12:14:14 -0400991 struct btrfs_free_space *entry =
992 list_entry(pos, struct btrfs_free_space, list);
993
Josef Bacika67509c2011-10-05 15:18:58 -0400994 ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
995 if (ret)
996 goto out_nospc;
Josef Bacik0cb59c92010-07-02 12:14:14 -0400997 list_del_init(&entry->list);
Josef Bacikbe1a12a2011-04-06 13:05:22 -0400998 }
999
Josef Bacik0cb59c92010-07-02 12:14:14 -04001000 /* Zero out the rest of the pages just to make sure */
Josef Bacika67509c2011-10-05 15:18:58 -04001001 io_ctl_zero_remaining_pages(&io_ctl);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001002
Josef Bacika67509c2011-10-05 15:18:58 -04001003 ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1004 0, i_size_read(inode), &cached_state);
1005 io_ctl_drop_pages(&io_ctl);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001006 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1007 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1008
Josef Bacikc09544e2011-08-30 10:19:10 -04001009 if (ret)
Josef Bacik2f356122011-06-10 15:31:13 -04001010 goto out;
Josef Bacikbe1a12a2011-04-06 13:05:22 -04001011
Josef Bacikbe1a12a2011-04-06 13:05:22 -04001012
Josef Bacik5fd02042012-05-02 14:00:54 -04001013 btrfs_wait_ordered_range(inode, 0, (u64)-1);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001014
1015 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
Li Zefan0414efa2011-04-20 10:20:14 +08001016 key.offset = offset;
Josef Bacik0cb59c92010-07-02 12:14:14 -04001017 key.type = 0;
1018
Josef Bacika9b5fcd2011-08-19 12:06:12 -04001019 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001020 if (ret < 0) {
Josef Bacika67509c2011-10-05 15:18:58 -04001021 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
Josef Bacik5b0e95b2011-10-06 08:58:24 -04001022 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1023 GFP_NOFS);
Josef Bacik2f356122011-06-10 15:31:13 -04001024 goto out;
Josef Bacik0cb59c92010-07-02 12:14:14 -04001025 }
1026 leaf = path->nodes[0];
1027 if (ret > 0) {
1028 struct btrfs_key found_key;
Josef Bacikb12d6862013-08-26 17:14:08 -04001029 ASSERT(path->slots[0]);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001030 path->slots[0]--;
1031 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1032 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
Li Zefan0414efa2011-04-20 10:20:14 +08001033 found_key.offset != offset) {
Josef Bacika67509c2011-10-05 15:18:58 -04001034 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1035 inode->i_size - 1,
Josef Bacik5b0e95b2011-10-06 08:58:24 -04001036 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1037 NULL, GFP_NOFS);
David Sterbab3b4aa72011-04-21 01:20:15 +02001038 btrfs_release_path(path);
Josef Bacik2f356122011-06-10 15:31:13 -04001039 goto out;
Josef Bacik0cb59c92010-07-02 12:14:14 -04001040 }
1041 }
Josef Bacik549b4fd2011-10-05 16:33:53 -04001042
1043 BTRFS_I(inode)->generation = trans->transid;
Josef Bacik0cb59c92010-07-02 12:14:14 -04001044 header = btrfs_item_ptr(leaf, path->slots[0],
1045 struct btrfs_free_space_header);
1046 btrfs_set_free_space_entries(leaf, header, entries);
1047 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1048 btrfs_set_free_space_generation(leaf, header, trans->transid);
1049 btrfs_mark_buffer_dirty(leaf);
David Sterbab3b4aa72011-04-21 01:20:15 +02001050 btrfs_release_path(path);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001051
Josef Bacikc09544e2011-08-30 10:19:10 -04001052 err = 0;
Josef Bacik2f356122011-06-10 15:31:13 -04001053out:
Josef Bacika67509c2011-10-05 15:18:58 -04001054 io_ctl_free(&io_ctl);
Josef Bacikc09544e2011-08-30 10:19:10 -04001055 if (err) {
Josef Bacika67509c2011-10-05 15:18:58 -04001056 invalidate_inode_pages2(inode->i_mapping);
Josef Bacik0cb59c92010-07-02 12:14:14 -04001057 BTRFS_I(inode)->generation = 0;
1058 }
Josef Bacik0cb59c92010-07-02 12:14:14 -04001059 btrfs_update_inode(trans, root, inode);
Josef Bacikc09544e2011-08-30 10:19:10 -04001060 return err;
Josef Bacika67509c2011-10-05 15:18:58 -04001061
1062out_nospc:
1063 list_for_each_safe(pos, n, &bitmap_list) {
1064 struct btrfs_free_space *entry =
1065 list_entry(pos, struct btrfs_free_space, list);
1066 list_del_init(&entry->list);
1067 }
1068 io_ctl_drop_pages(&io_ctl);
1069 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1070 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1071 goto out;
Li Zefan0414efa2011-04-20 10:20:14 +08001072}
1073
1074int btrfs_write_out_cache(struct btrfs_root *root,
1075 struct btrfs_trans_handle *trans,
1076 struct btrfs_block_group_cache *block_group,
1077 struct btrfs_path *path)
1078{
1079 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1080 struct inode *inode;
1081 int ret = 0;
1082
1083 root = root->fs_info->tree_root;
1084
1085 spin_lock(&block_group->lock);
1086 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1087 spin_unlock(&block_group->lock);
1088 return 0;
1089 }
1090 spin_unlock(&block_group->lock);
1091
1092 inode = lookup_free_space_inode(root, block_group, path);
1093 if (IS_ERR(inode))
1094 return 0;
1095
1096 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1097 path, block_group->key.objectid);
Josef Bacikc09544e2011-08-30 10:19:10 -04001098 if (ret) {
Li Zefan0414efa2011-04-20 10:20:14 +08001099 spin_lock(&block_group->lock);
1100 block_group->disk_cache_state = BTRFS_DC_ERROR;
1101 spin_unlock(&block_group->lock);
Li Zefan82d59022011-04-20 10:33:24 +08001102 ret = 0;
Josef Bacikc09544e2011-08-30 10:19:10 -04001103#ifdef DEBUG
Simon Kirbyc2cf52e2013-03-19 22:41:23 +00001104 btrfs_err(root->fs_info,
1105 "failed to write free space cache for block group %llu",
1106 block_group->key.objectid);
Josef Bacikc09544e2011-08-30 10:19:10 -04001107#endif
Li Zefan0414efa2011-04-20 10:20:14 +08001108 }
1109
Josef Bacik0cb59c92010-07-02 12:14:14 -04001110 iput(inode);
1111 return ret;
1112}
1113
Li Zefan34d52cb2011-03-29 13:46:06 +08001114static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
Josef Bacik96303082009-07-13 21:29:25 -04001115 u64 offset)
1116{
Josef Bacikb12d6862013-08-26 17:14:08 -04001117 ASSERT(offset >= bitmap_start);
Josef Bacik96303082009-07-13 21:29:25 -04001118 offset -= bitmap_start;
Li Zefan34d52cb2011-03-29 13:46:06 +08001119 return (unsigned long)(div_u64(offset, unit));
Josef Bacik96303082009-07-13 21:29:25 -04001120}
1121
Li Zefan34d52cb2011-03-29 13:46:06 +08001122static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
Josef Bacik96303082009-07-13 21:29:25 -04001123{
Li Zefan34d52cb2011-03-29 13:46:06 +08001124 return (unsigned long)(div_u64(bytes, unit));
Josef Bacik96303082009-07-13 21:29:25 -04001125}
1126
Li Zefan34d52cb2011-03-29 13:46:06 +08001127static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
Josef Bacik96303082009-07-13 21:29:25 -04001128 u64 offset)
1129{
1130 u64 bitmap_start;
1131 u64 bytes_per_bitmap;
1132
Li Zefan34d52cb2011-03-29 13:46:06 +08001133 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1134 bitmap_start = offset - ctl->start;
Josef Bacik96303082009-07-13 21:29:25 -04001135 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1136 bitmap_start *= bytes_per_bitmap;
Li Zefan34d52cb2011-03-29 13:46:06 +08001137 bitmap_start += ctl->start;
Josef Bacik96303082009-07-13 21:29:25 -04001138
1139 return bitmap_start;
1140}
Josef Bacik0f9dd462008-09-23 13:14:11 -04001141
1142static int tree_insert_offset(struct rb_root *root, u64 offset,
Josef Bacik96303082009-07-13 21:29:25 -04001143 struct rb_node *node, int bitmap)
Josef Bacik0f9dd462008-09-23 13:14:11 -04001144{
1145 struct rb_node **p = &root->rb_node;
1146 struct rb_node *parent = NULL;
1147 struct btrfs_free_space *info;
1148
1149 while (*p) {
1150 parent = *p;
1151 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1152
Josef Bacik96303082009-07-13 21:29:25 -04001153 if (offset < info->offset) {
Josef Bacik0f9dd462008-09-23 13:14:11 -04001154 p = &(*p)->rb_left;
Josef Bacik96303082009-07-13 21:29:25 -04001155 } else if (offset > info->offset) {
Josef Bacik0f9dd462008-09-23 13:14:11 -04001156 p = &(*p)->rb_right;
Josef Bacik96303082009-07-13 21:29:25 -04001157 } else {
1158 /*
1159 * we could have a bitmap entry and an extent entry
1160 * share the same offset. If this is the case, we want
1161 * the extent entry to always be found first if we do a
1162 * linear search through the tree, since we want to have
1163 * the quickest allocation time, and allocating from an
1164 * extent is faster than allocating from a bitmap. So
1165 * if we're inserting a bitmap and we find an entry at
1166 * this offset, we want to go right, or after this entry
1167 * logically. If we are inserting an extent and we've
1168 * found a bitmap, we want to go left, or before
1169 * logically.
1170 */
1171 if (bitmap) {
Josef Bacik207dde82011-05-13 14:49:23 -04001172 if (info->bitmap) {
1173 WARN_ON_ONCE(1);
1174 return -EEXIST;
1175 }
Josef Bacik96303082009-07-13 21:29:25 -04001176 p = &(*p)->rb_right;
1177 } else {
Josef Bacik207dde82011-05-13 14:49:23 -04001178 if (!info->bitmap) {
1179 WARN_ON_ONCE(1);
1180 return -EEXIST;
1181 }
Josef Bacik96303082009-07-13 21:29:25 -04001182 p = &(*p)->rb_left;
1183 }
1184 }
Josef Bacik0f9dd462008-09-23 13:14:11 -04001185 }
1186
1187 rb_link_node(node, parent, p);
1188 rb_insert_color(node, root);
1189
1190 return 0;
1191}
1192
1193/*
Josef Bacik70cb0742009-04-03 10:14:19 -04001194 * searches the tree for the given offset.
1195 *
Josef Bacik96303082009-07-13 21:29:25 -04001196 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1197 * want a section that has at least bytes size and comes at or after the given
1198 * offset.
Josef Bacik0f9dd462008-09-23 13:14:11 -04001199 */
Josef Bacik96303082009-07-13 21:29:25 -04001200static struct btrfs_free_space *
Li Zefan34d52cb2011-03-29 13:46:06 +08001201tree_search_offset(struct btrfs_free_space_ctl *ctl,
Josef Bacik96303082009-07-13 21:29:25 -04001202 u64 offset, int bitmap_only, int fuzzy)
Josef Bacik0f9dd462008-09-23 13:14:11 -04001203{
Li Zefan34d52cb2011-03-29 13:46:06 +08001204 struct rb_node *n = ctl->free_space_offset.rb_node;
Josef Bacik96303082009-07-13 21:29:25 -04001205 struct btrfs_free_space *entry, *prev = NULL;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001206
Josef Bacik96303082009-07-13 21:29:25 -04001207 /* find entry that is closest to the 'offset' */
1208 while (1) {
1209 if (!n) {
1210 entry = NULL;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001211 break;
1212 }
Josef Bacik96303082009-07-13 21:29:25 -04001213
1214 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1215 prev = entry;
1216
1217 if (offset < entry->offset)
1218 n = n->rb_left;
1219 else if (offset > entry->offset)
1220 n = n->rb_right;
1221 else
1222 break;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001223 }
1224
Josef Bacik96303082009-07-13 21:29:25 -04001225 if (bitmap_only) {
1226 if (!entry)
1227 return NULL;
1228 if (entry->bitmap)
1229 return entry;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001230
Josef Bacik96303082009-07-13 21:29:25 -04001231 /*
1232 * bitmap entry and extent entry may share same offset,
1233 * in that case, bitmap entry comes after extent entry.
1234 */
1235 n = rb_next(n);
1236 if (!n)
1237 return NULL;
1238 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1239 if (entry->offset != offset)
1240 return NULL;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001241
Josef Bacik96303082009-07-13 21:29:25 -04001242 WARN_ON(!entry->bitmap);
1243 return entry;
1244 } else if (entry) {
1245 if (entry->bitmap) {
Josef Bacik0f9dd462008-09-23 13:14:11 -04001246 /*
Josef Bacik96303082009-07-13 21:29:25 -04001247 * if previous extent entry covers the offset,
1248 * we should return it instead of the bitmap entry
Josef Bacik0f9dd462008-09-23 13:14:11 -04001249 */
Miao Xiede6c4112012-10-18 08:18:01 +00001250 n = rb_prev(&entry->offset_index);
1251 if (n) {
Josef Bacik96303082009-07-13 21:29:25 -04001252 prev = rb_entry(n, struct btrfs_free_space,
1253 offset_index);
Miao Xiede6c4112012-10-18 08:18:01 +00001254 if (!prev->bitmap &&
1255 prev->offset + prev->bytes > offset)
1256 entry = prev;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001257 }
Josef Bacik96303082009-07-13 21:29:25 -04001258 }
1259 return entry;
1260 }
1261
1262 if (!prev)
1263 return NULL;
1264
1265 /* find last entry before the 'offset' */
1266 entry = prev;
1267 if (entry->offset > offset) {
1268 n = rb_prev(&entry->offset_index);
1269 if (n) {
1270 entry = rb_entry(n, struct btrfs_free_space,
1271 offset_index);
Josef Bacikb12d6862013-08-26 17:14:08 -04001272 ASSERT(entry->offset <= offset);
Josef Bacik0f9dd462008-09-23 13:14:11 -04001273 } else {
Josef Bacik96303082009-07-13 21:29:25 -04001274 if (fuzzy)
1275 return entry;
1276 else
1277 return NULL;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001278 }
1279 }
1280
Josef Bacik96303082009-07-13 21:29:25 -04001281 if (entry->bitmap) {
Miao Xiede6c4112012-10-18 08:18:01 +00001282 n = rb_prev(&entry->offset_index);
1283 if (n) {
Josef Bacik96303082009-07-13 21:29:25 -04001284 prev = rb_entry(n, struct btrfs_free_space,
1285 offset_index);
Miao Xiede6c4112012-10-18 08:18:01 +00001286 if (!prev->bitmap &&
1287 prev->offset + prev->bytes > offset)
1288 return prev;
Josef Bacik96303082009-07-13 21:29:25 -04001289 }
Li Zefan34d52cb2011-03-29 13:46:06 +08001290 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
Josef Bacik96303082009-07-13 21:29:25 -04001291 return entry;
1292 } else if (entry->offset + entry->bytes > offset)
1293 return entry;
1294
1295 if (!fuzzy)
1296 return NULL;
1297
1298 while (1) {
1299 if (entry->bitmap) {
1300 if (entry->offset + BITS_PER_BITMAP *
Li Zefan34d52cb2011-03-29 13:46:06 +08001301 ctl->unit > offset)
Josef Bacik96303082009-07-13 21:29:25 -04001302 break;
1303 } else {
1304 if (entry->offset + entry->bytes > offset)
1305 break;
1306 }
1307
1308 n = rb_next(&entry->offset_index);
1309 if (!n)
1310 return NULL;
1311 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1312 }
1313 return entry;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001314}
1315
Li Zefanf333adb2010-11-09 14:57:39 +08001316static inline void
Li Zefan34d52cb2011-03-29 13:46:06 +08001317__unlink_free_space(struct btrfs_free_space_ctl *ctl,
Li Zefanf333adb2010-11-09 14:57:39 +08001318 struct btrfs_free_space *info)
Josef Bacik0f9dd462008-09-23 13:14:11 -04001319{
Li Zefan34d52cb2011-03-29 13:46:06 +08001320 rb_erase(&info->offset_index, &ctl->free_space_offset);
1321 ctl->free_extents--;
Li Zefanf333adb2010-11-09 14:57:39 +08001322}
1323
Li Zefan34d52cb2011-03-29 13:46:06 +08001324static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
Li Zefanf333adb2010-11-09 14:57:39 +08001325 struct btrfs_free_space *info)
1326{
Li Zefan34d52cb2011-03-29 13:46:06 +08001327 __unlink_free_space(ctl, info);
1328 ctl->free_space -= info->bytes;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001329}
1330
Li Zefan34d52cb2011-03-29 13:46:06 +08001331static int link_free_space(struct btrfs_free_space_ctl *ctl,
Josef Bacik0f9dd462008-09-23 13:14:11 -04001332 struct btrfs_free_space *info)
1333{
1334 int ret = 0;
1335
Josef Bacikb12d6862013-08-26 17:14:08 -04001336 ASSERT(info->bytes || info->bitmap);
Li Zefan34d52cb2011-03-29 13:46:06 +08001337 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
Josef Bacik96303082009-07-13 21:29:25 -04001338 &info->offset_index, (info->bitmap != NULL));
Josef Bacik0f9dd462008-09-23 13:14:11 -04001339 if (ret)
1340 return ret;
1341
Li Zefan34d52cb2011-03-29 13:46:06 +08001342 ctl->free_space += info->bytes;
1343 ctl->free_extents++;
Josef Bacik96303082009-07-13 21:29:25 -04001344 return ret;
1345}
1346
Li Zefan34d52cb2011-03-29 13:46:06 +08001347static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
Josef Bacik96303082009-07-13 21:29:25 -04001348{
Li Zefan34d52cb2011-03-29 13:46:06 +08001349 struct btrfs_block_group_cache *block_group = ctl->private;
Josef Bacik25891f72009-09-11 16:11:20 -04001350 u64 max_bytes;
1351 u64 bitmap_bytes;
1352 u64 extent_bytes;
Li Zefan8eb2d822010-11-09 14:48:01 +08001353 u64 size = block_group->key.offset;
Wang Sheng-Hui96009762012-11-30 06:30:14 +00001354 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
Li Zefan34d52cb2011-03-29 13:46:06 +08001355 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1356
Josef Bacikdde57402013-02-12 14:07:51 -05001357 max_bitmaps = max(max_bitmaps, 1);
1358
Josef Bacikb12d6862013-08-26 17:14:08 -04001359 ASSERT(ctl->total_bitmaps <= max_bitmaps);
Josef Bacik96303082009-07-13 21:29:25 -04001360
1361 /*
1362 * The goal is to keep the total amount of memory used per 1gb of space
1363 * at or below 32k, so we need to adjust how much memory we allow to be
1364 * used by extent based free space tracking
1365 */
Li Zefan8eb2d822010-11-09 14:48:01 +08001366 if (size < 1024 * 1024 * 1024)
1367 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1368 else
1369 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1370 div64_u64(size, 1024 * 1024 * 1024);
Josef Bacik96303082009-07-13 21:29:25 -04001371
Josef Bacik25891f72009-09-11 16:11:20 -04001372 /*
1373 * we want to account for 1 more bitmap than what we have so we can make
1374 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1375 * we add more bitmaps.
1376 */
Li Zefan34d52cb2011-03-29 13:46:06 +08001377 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
Josef Bacik96303082009-07-13 21:29:25 -04001378
Josef Bacik25891f72009-09-11 16:11:20 -04001379 if (bitmap_bytes >= max_bytes) {
Li Zefan34d52cb2011-03-29 13:46:06 +08001380 ctl->extents_thresh = 0;
Josef Bacik25891f72009-09-11 16:11:20 -04001381 return;
Josef Bacik96303082009-07-13 21:29:25 -04001382 }
Josef Bacik25891f72009-09-11 16:11:20 -04001383
1384 /*
1385 * we want the extent entry threshold to always be at most 1/2 the maxw
1386 * bytes we can have, or whatever is less than that.
1387 */
1388 extent_bytes = max_bytes - bitmap_bytes;
1389 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1390
Li Zefan34d52cb2011-03-29 13:46:06 +08001391 ctl->extents_thresh =
Josef Bacik25891f72009-09-11 16:11:20 -04001392 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
Josef Bacik96303082009-07-13 21:29:25 -04001393}
1394
Miao Xiebb3ac5a2011-08-05 09:32:35 +00001395static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1396 struct btrfs_free_space *info,
1397 u64 offset, u64 bytes)
Josef Bacik96303082009-07-13 21:29:25 -04001398{
Li Zefanf38b6e72011-03-14 13:40:51 +08001399 unsigned long start, count;
Josef Bacik96303082009-07-13 21:29:25 -04001400
Li Zefan34d52cb2011-03-29 13:46:06 +08001401 start = offset_to_bit(info->offset, ctl->unit, offset);
1402 count = bytes_to_bits(bytes, ctl->unit);
Josef Bacikb12d6862013-08-26 17:14:08 -04001403 ASSERT(start + count <= BITS_PER_BITMAP);
Josef Bacik96303082009-07-13 21:29:25 -04001404
Li Zefanf38b6e72011-03-14 13:40:51 +08001405 bitmap_clear(info->bitmap, start, count);
Josef Bacik96303082009-07-13 21:29:25 -04001406
1407 info->bytes -= bytes;
Miao Xiebb3ac5a2011-08-05 09:32:35 +00001408}
1409
1410static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1411 struct btrfs_free_space *info, u64 offset,
1412 u64 bytes)
1413{
1414 __bitmap_clear_bits(ctl, info, offset, bytes);
Li Zefan34d52cb2011-03-29 13:46:06 +08001415 ctl->free_space -= bytes;
Josef Bacik96303082009-07-13 21:29:25 -04001416}
1417
Li Zefan34d52cb2011-03-29 13:46:06 +08001418static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
Josef Bacik817d52f2009-07-13 21:29:25 -04001419 struct btrfs_free_space *info, u64 offset,
1420 u64 bytes)
Josef Bacik96303082009-07-13 21:29:25 -04001421{
Li Zefanf38b6e72011-03-14 13:40:51 +08001422 unsigned long start, count;
Josef Bacik96303082009-07-13 21:29:25 -04001423
Li Zefan34d52cb2011-03-29 13:46:06 +08001424 start = offset_to_bit(info->offset, ctl->unit, offset);
1425 count = bytes_to_bits(bytes, ctl->unit);
Josef Bacikb12d6862013-08-26 17:14:08 -04001426 ASSERT(start + count <= BITS_PER_BITMAP);
Josef Bacik96303082009-07-13 21:29:25 -04001427
Li Zefanf38b6e72011-03-14 13:40:51 +08001428 bitmap_set(info->bitmap, start, count);
Josef Bacik96303082009-07-13 21:29:25 -04001429
1430 info->bytes += bytes;
Li Zefan34d52cb2011-03-29 13:46:06 +08001431 ctl->free_space += bytes;
Josef Bacik96303082009-07-13 21:29:25 -04001432}
1433
Miao Xiea4820392013-09-09 13:19:42 +08001434/*
1435 * If we can not find suitable extent, we will use bytes to record
1436 * the size of the max extent.
1437 */
Li Zefan34d52cb2011-03-29 13:46:06 +08001438static int search_bitmap(struct btrfs_free_space_ctl *ctl,
Josef Bacik96303082009-07-13 21:29:25 -04001439 struct btrfs_free_space *bitmap_info, u64 *offset,
1440 u64 *bytes)
1441{
1442 unsigned long found_bits = 0;
Miao Xiea4820392013-09-09 13:19:42 +08001443 unsigned long max_bits = 0;
Josef Bacik96303082009-07-13 21:29:25 -04001444 unsigned long bits, i;
1445 unsigned long next_zero;
Miao Xiea4820392013-09-09 13:19:42 +08001446 unsigned long extent_bits;
Josef Bacik96303082009-07-13 21:29:25 -04001447
Li Zefan34d52cb2011-03-29 13:46:06 +08001448 i = offset_to_bit(bitmap_info->offset, ctl->unit,
Josef Bacik96303082009-07-13 21:29:25 -04001449 max_t(u64, *offset, bitmap_info->offset));
Li Zefan34d52cb2011-03-29 13:46:06 +08001450 bits = bytes_to_bits(*bytes, ctl->unit);
Josef Bacik96303082009-07-13 21:29:25 -04001451
Wei Yongjunebb3dad2012-09-13 20:29:02 -06001452 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
Josef Bacik96303082009-07-13 21:29:25 -04001453 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1454 BITS_PER_BITMAP, i);
Miao Xiea4820392013-09-09 13:19:42 +08001455 extent_bits = next_zero - i;
1456 if (extent_bits >= bits) {
1457 found_bits = extent_bits;
Josef Bacik96303082009-07-13 21:29:25 -04001458 break;
Miao Xiea4820392013-09-09 13:19:42 +08001459 } else if (extent_bits > max_bits) {
1460 max_bits = extent_bits;
Josef Bacik96303082009-07-13 21:29:25 -04001461 }
1462 i = next_zero;
1463 }
1464
1465 if (found_bits) {
Li Zefan34d52cb2011-03-29 13:46:06 +08001466 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1467 *bytes = (u64)(found_bits) * ctl->unit;
Josef Bacik96303082009-07-13 21:29:25 -04001468 return 0;
1469 }
1470
Miao Xiea4820392013-09-09 13:19:42 +08001471 *bytes = (u64)(max_bits) * ctl->unit;
Josef Bacik96303082009-07-13 21:29:25 -04001472 return -1;
1473}
1474
Miao Xiea4820392013-09-09 13:19:42 +08001475/* Cache the size of the max extent in bytes */
Li Zefan34d52cb2011-03-29 13:46:06 +08001476static struct btrfs_free_space *
David Woodhouse53b381b2013-01-29 18:40:14 -05001477find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
Miao Xiea4820392013-09-09 13:19:42 +08001478 unsigned long align, u64 *max_extent_size)
Josef Bacik96303082009-07-13 21:29:25 -04001479{
1480 struct btrfs_free_space *entry;
1481 struct rb_node *node;
David Woodhouse53b381b2013-01-29 18:40:14 -05001482 u64 tmp;
1483 u64 align_off;
Josef Bacik96303082009-07-13 21:29:25 -04001484 int ret;
1485
Li Zefan34d52cb2011-03-29 13:46:06 +08001486 if (!ctl->free_space_offset.rb_node)
Miao Xiea4820392013-09-09 13:19:42 +08001487 goto out;
Josef Bacik96303082009-07-13 21:29:25 -04001488
Li Zefan34d52cb2011-03-29 13:46:06 +08001489 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
Josef Bacik96303082009-07-13 21:29:25 -04001490 if (!entry)
Miao Xiea4820392013-09-09 13:19:42 +08001491 goto out;
Josef Bacik96303082009-07-13 21:29:25 -04001492
1493 for (node = &entry->offset_index; node; node = rb_next(node)) {
1494 entry = rb_entry(node, struct btrfs_free_space, offset_index);
Miao Xiea4820392013-09-09 13:19:42 +08001495 if (entry->bytes < *bytes) {
1496 if (entry->bytes > *max_extent_size)
1497 *max_extent_size = entry->bytes;
Josef Bacik96303082009-07-13 21:29:25 -04001498 continue;
Miao Xiea4820392013-09-09 13:19:42 +08001499 }
Josef Bacik96303082009-07-13 21:29:25 -04001500
David Woodhouse53b381b2013-01-29 18:40:14 -05001501 /* make sure the space returned is big enough
1502 * to match our requested alignment
1503 */
1504 if (*bytes >= align) {
Miao Xiea4820392013-09-09 13:19:42 +08001505 tmp = entry->offset - ctl->start + align - 1;
David Woodhouse53b381b2013-01-29 18:40:14 -05001506 do_div(tmp, align);
1507 tmp = tmp * align + ctl->start;
1508 align_off = tmp - entry->offset;
1509 } else {
1510 align_off = 0;
1511 tmp = entry->offset;
1512 }
1513
Miao Xiea4820392013-09-09 13:19:42 +08001514 if (entry->bytes < *bytes + align_off) {
1515 if (entry->bytes > *max_extent_size)
1516 *max_extent_size = entry->bytes;
David Woodhouse53b381b2013-01-29 18:40:14 -05001517 continue;
Miao Xiea4820392013-09-09 13:19:42 +08001518 }
David Woodhouse53b381b2013-01-29 18:40:14 -05001519
Josef Bacik96303082009-07-13 21:29:25 -04001520 if (entry->bitmap) {
Miao Xiea4820392013-09-09 13:19:42 +08001521 u64 size = *bytes;
1522
1523 ret = search_bitmap(ctl, entry, &tmp, &size);
David Woodhouse53b381b2013-01-29 18:40:14 -05001524 if (!ret) {
1525 *offset = tmp;
Miao Xiea4820392013-09-09 13:19:42 +08001526 *bytes = size;
Josef Bacik96303082009-07-13 21:29:25 -04001527 return entry;
Miao Xiea4820392013-09-09 13:19:42 +08001528 } else if (size > *max_extent_size) {
1529 *max_extent_size = size;
David Woodhouse53b381b2013-01-29 18:40:14 -05001530 }
Josef Bacik96303082009-07-13 21:29:25 -04001531 continue;
1532 }
1533
David Woodhouse53b381b2013-01-29 18:40:14 -05001534 *offset = tmp;
1535 *bytes = entry->bytes - align_off;
Josef Bacik96303082009-07-13 21:29:25 -04001536 return entry;
1537 }
Miao Xiea4820392013-09-09 13:19:42 +08001538out:
Josef Bacik96303082009-07-13 21:29:25 -04001539 return NULL;
1540}
1541
Li Zefan34d52cb2011-03-29 13:46:06 +08001542static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
Josef Bacik96303082009-07-13 21:29:25 -04001543 struct btrfs_free_space *info, u64 offset)
1544{
Li Zefan34d52cb2011-03-29 13:46:06 +08001545 info->offset = offset_to_bitmap(ctl, offset);
Josef Bacikf019f422009-09-11 16:11:20 -04001546 info->bytes = 0;
Alexandre Olivaf2d0f672011-11-28 12:04:43 -02001547 INIT_LIST_HEAD(&info->list);
Li Zefan34d52cb2011-03-29 13:46:06 +08001548 link_free_space(ctl, info);
1549 ctl->total_bitmaps++;
Josef Bacik96303082009-07-13 21:29:25 -04001550
Li Zefan34d52cb2011-03-29 13:46:06 +08001551 ctl->op->recalc_thresholds(ctl);
Josef Bacik96303082009-07-13 21:29:25 -04001552}
1553
Li Zefan34d52cb2011-03-29 13:46:06 +08001554static void free_bitmap(struct btrfs_free_space_ctl *ctl,
Li Zefanedf6e2d2010-11-09 14:50:07 +08001555 struct btrfs_free_space *bitmap_info)
1556{
Li Zefan34d52cb2011-03-29 13:46:06 +08001557 unlink_free_space(ctl, bitmap_info);
Li Zefanedf6e2d2010-11-09 14:50:07 +08001558 kfree(bitmap_info->bitmap);
Josef Bacikdc89e982011-01-28 17:05:48 -05001559 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
Li Zefan34d52cb2011-03-29 13:46:06 +08001560 ctl->total_bitmaps--;
1561 ctl->op->recalc_thresholds(ctl);
Li Zefanedf6e2d2010-11-09 14:50:07 +08001562}
1563
Li Zefan34d52cb2011-03-29 13:46:06 +08001564static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
Josef Bacik96303082009-07-13 21:29:25 -04001565 struct btrfs_free_space *bitmap_info,
1566 u64 *offset, u64 *bytes)
1567{
1568 u64 end;
Josef Bacik6606bb92009-07-31 11:03:58 -04001569 u64 search_start, search_bytes;
1570 int ret;
Josef Bacik96303082009-07-13 21:29:25 -04001571
1572again:
Li Zefan34d52cb2011-03-29 13:46:06 +08001573 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
Josef Bacik96303082009-07-13 21:29:25 -04001574
Josef Bacik6606bb92009-07-31 11:03:58 -04001575 /*
Josef Bacikbdb7d302012-06-27 15:10:56 -04001576 * We need to search for bits in this bitmap. We could only cover some
1577 * of the extent in this bitmap thanks to how we add space, so we need
1578 * to search for as much as it as we can and clear that amount, and then
1579 * go searching for the next bit.
Josef Bacik6606bb92009-07-31 11:03:58 -04001580 */
1581 search_start = *offset;
Josef Bacikbdb7d302012-06-27 15:10:56 -04001582 search_bytes = ctl->unit;
Josef Bacik13dbc082011-02-03 02:39:52 +00001583 search_bytes = min(search_bytes, end - search_start + 1);
Li Zefan34d52cb2011-03-29 13:46:06 +08001584 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
Josef Bacikb50c6e22013-04-25 15:55:30 -04001585 if (ret < 0 || search_start != *offset)
1586 return -EINVAL;
Josef Bacik6606bb92009-07-31 11:03:58 -04001587
Josef Bacikbdb7d302012-06-27 15:10:56 -04001588 /* We may have found more bits than what we need */
1589 search_bytes = min(search_bytes, *bytes);
1590
1591 /* Cannot clear past the end of the bitmap */
1592 search_bytes = min(search_bytes, end - search_start + 1);
1593
1594 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1595 *offset += search_bytes;
1596 *bytes -= search_bytes;
Josef Bacik96303082009-07-13 21:29:25 -04001597
1598 if (*bytes) {
Josef Bacik6606bb92009-07-31 11:03:58 -04001599 struct rb_node *next = rb_next(&bitmap_info->offset_index);
Li Zefanedf6e2d2010-11-09 14:50:07 +08001600 if (!bitmap_info->bytes)
Li Zefan34d52cb2011-03-29 13:46:06 +08001601 free_bitmap(ctl, bitmap_info);
Josef Bacik96303082009-07-13 21:29:25 -04001602
Josef Bacik6606bb92009-07-31 11:03:58 -04001603 /*
1604 * no entry after this bitmap, but we still have bytes to
1605 * remove, so something has gone wrong.
1606 */
1607 if (!next)
Josef Bacik96303082009-07-13 21:29:25 -04001608 return -EINVAL;
1609
Josef Bacik6606bb92009-07-31 11:03:58 -04001610 bitmap_info = rb_entry(next, struct btrfs_free_space,
1611 offset_index);
1612
1613 /*
1614 * if the next entry isn't a bitmap we need to return to let the
1615 * extent stuff do its work.
1616 */
Josef Bacik96303082009-07-13 21:29:25 -04001617 if (!bitmap_info->bitmap)
1618 return -EAGAIN;
1619
Josef Bacik6606bb92009-07-31 11:03:58 -04001620 /*
1621 * Ok the next item is a bitmap, but it may not actually hold
1622 * the information for the rest of this free space stuff, so
1623 * look for it, and if we don't find it return so we can try
1624 * everything over again.
1625 */
1626 search_start = *offset;
Josef Bacikbdb7d302012-06-27 15:10:56 -04001627 search_bytes = ctl->unit;
Li Zefan34d52cb2011-03-29 13:46:06 +08001628 ret = search_bitmap(ctl, bitmap_info, &search_start,
Josef Bacik6606bb92009-07-31 11:03:58 -04001629 &search_bytes);
1630 if (ret < 0 || search_start != *offset)
1631 return -EAGAIN;
1632
Josef Bacik96303082009-07-13 21:29:25 -04001633 goto again;
Li Zefanedf6e2d2010-11-09 14:50:07 +08001634 } else if (!bitmap_info->bytes)
Li Zefan34d52cb2011-03-29 13:46:06 +08001635 free_bitmap(ctl, bitmap_info);
Josef Bacik96303082009-07-13 21:29:25 -04001636
1637 return 0;
1638}
1639
Josef Bacik2cdc3422011-05-27 14:07:49 -04001640static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1641 struct btrfs_free_space *info, u64 offset,
1642 u64 bytes)
1643{
1644 u64 bytes_to_set = 0;
1645 u64 end;
1646
1647 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1648
1649 bytes_to_set = min(end - offset, bytes);
1650
1651 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1652
1653 return bytes_to_set;
1654
1655}
1656
Li Zefan34d52cb2011-03-29 13:46:06 +08001657static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1658 struct btrfs_free_space *info)
Josef Bacik96303082009-07-13 21:29:25 -04001659{
Li Zefan34d52cb2011-03-29 13:46:06 +08001660 struct btrfs_block_group_cache *block_group = ctl->private;
Josef Bacik96303082009-07-13 21:29:25 -04001661
1662 /*
1663 * If we are below the extents threshold then we can add this as an
1664 * extent, and don't have to deal with the bitmap
1665 */
Li Zefan34d52cb2011-03-29 13:46:06 +08001666 if (ctl->free_extents < ctl->extents_thresh) {
Josef Bacik32cb0842011-03-18 16:16:21 -04001667 /*
1668 * If this block group has some small extents we don't want to
1669 * use up all of our free slots in the cache with them, we want
1670 * to reserve them to larger extents, however if we have plent
1671 * of cache left then go ahead an dadd them, no sense in adding
1672 * the overhead of a bitmap if we don't have to.
1673 */
1674 if (info->bytes <= block_group->sectorsize * 4) {
Li Zefan34d52cb2011-03-29 13:46:06 +08001675 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1676 return false;
Josef Bacik32cb0842011-03-18 16:16:21 -04001677 } else {
Li Zefan34d52cb2011-03-29 13:46:06 +08001678 return false;
Josef Bacik32cb0842011-03-18 16:16:21 -04001679 }
1680 }
Josef Bacik96303082009-07-13 21:29:25 -04001681
1682 /*
Josef Bacikdde57402013-02-12 14:07:51 -05001683 * The original block groups from mkfs can be really small, like 8
1684 * megabytes, so don't bother with a bitmap for those entries. However
1685 * some block groups can be smaller than what a bitmap would cover but
1686 * are still large enough that they could overflow the 32k memory limit,
1687 * so allow those block groups to still be allowed to have a bitmap
1688 * entry.
Josef Bacik96303082009-07-13 21:29:25 -04001689 */
Josef Bacikdde57402013-02-12 14:07:51 -05001690 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
Li Zefan34d52cb2011-03-29 13:46:06 +08001691 return false;
1692
1693 return true;
1694}
1695
Josef Bacik2cdc3422011-05-27 14:07:49 -04001696static struct btrfs_free_space_op free_space_op = {
1697 .recalc_thresholds = recalculate_thresholds,
1698 .use_bitmap = use_bitmap,
1699};
1700
Li Zefan34d52cb2011-03-29 13:46:06 +08001701static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1702 struct btrfs_free_space *info)
1703{
1704 struct btrfs_free_space *bitmap_info;
Josef Bacik2cdc3422011-05-27 14:07:49 -04001705 struct btrfs_block_group_cache *block_group = NULL;
Li Zefan34d52cb2011-03-29 13:46:06 +08001706 int added = 0;
Josef Bacik2cdc3422011-05-27 14:07:49 -04001707 u64 bytes, offset, bytes_added;
Li Zefan34d52cb2011-03-29 13:46:06 +08001708 int ret;
Josef Bacik96303082009-07-13 21:29:25 -04001709
1710 bytes = info->bytes;
1711 offset = info->offset;
1712
Li Zefan34d52cb2011-03-29 13:46:06 +08001713 if (!ctl->op->use_bitmap(ctl, info))
1714 return 0;
1715
Josef Bacik2cdc3422011-05-27 14:07:49 -04001716 if (ctl->op == &free_space_op)
1717 block_group = ctl->private;
Chris Mason38e87882011-06-10 16:36:57 -04001718again:
Josef Bacik2cdc3422011-05-27 14:07:49 -04001719 /*
1720 * Since we link bitmaps right into the cluster we need to see if we
1721 * have a cluster here, and if so and it has our bitmap we need to add
1722 * the free space to that bitmap.
1723 */
1724 if (block_group && !list_empty(&block_group->cluster_list)) {
1725 struct btrfs_free_cluster *cluster;
1726 struct rb_node *node;
1727 struct btrfs_free_space *entry;
1728
1729 cluster = list_entry(block_group->cluster_list.next,
1730 struct btrfs_free_cluster,
1731 block_group_list);
1732 spin_lock(&cluster->lock);
1733 node = rb_first(&cluster->root);
1734 if (!node) {
1735 spin_unlock(&cluster->lock);
Chris Mason38e87882011-06-10 16:36:57 -04001736 goto no_cluster_bitmap;
Josef Bacik2cdc3422011-05-27 14:07:49 -04001737 }
1738
1739 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1740 if (!entry->bitmap) {
1741 spin_unlock(&cluster->lock);
Chris Mason38e87882011-06-10 16:36:57 -04001742 goto no_cluster_bitmap;
Josef Bacik2cdc3422011-05-27 14:07:49 -04001743 }
1744
1745 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1746 bytes_added = add_bytes_to_bitmap(ctl, entry,
1747 offset, bytes);
1748 bytes -= bytes_added;
1749 offset += bytes_added;
1750 }
1751 spin_unlock(&cluster->lock);
1752 if (!bytes) {
1753 ret = 1;
1754 goto out;
1755 }
1756 }
Chris Mason38e87882011-06-10 16:36:57 -04001757
1758no_cluster_bitmap:
Li Zefan34d52cb2011-03-29 13:46:06 +08001759 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
Josef Bacik96303082009-07-13 21:29:25 -04001760 1, 0);
1761 if (!bitmap_info) {
Josef Bacikb12d6862013-08-26 17:14:08 -04001762 ASSERT(added == 0);
Josef Bacik96303082009-07-13 21:29:25 -04001763 goto new_bitmap;
1764 }
1765
Josef Bacik2cdc3422011-05-27 14:07:49 -04001766 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1767 bytes -= bytes_added;
1768 offset += bytes_added;
1769 added = 0;
Josef Bacik96303082009-07-13 21:29:25 -04001770
1771 if (!bytes) {
1772 ret = 1;
1773 goto out;
1774 } else
1775 goto again;
1776
1777new_bitmap:
1778 if (info && info->bitmap) {
Li Zefan34d52cb2011-03-29 13:46:06 +08001779 add_new_bitmap(ctl, info, offset);
Josef Bacik96303082009-07-13 21:29:25 -04001780 added = 1;
1781 info = NULL;
1782 goto again;
1783 } else {
Li Zefan34d52cb2011-03-29 13:46:06 +08001784 spin_unlock(&ctl->tree_lock);
Josef Bacik96303082009-07-13 21:29:25 -04001785
1786 /* no pre-allocated info, allocate a new one */
1787 if (!info) {
Josef Bacikdc89e982011-01-28 17:05:48 -05001788 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1789 GFP_NOFS);
Josef Bacik96303082009-07-13 21:29:25 -04001790 if (!info) {
Li Zefan34d52cb2011-03-29 13:46:06 +08001791 spin_lock(&ctl->tree_lock);
Josef Bacik96303082009-07-13 21:29:25 -04001792 ret = -ENOMEM;
1793 goto out;
1794 }
1795 }
1796
1797 /* allocate the bitmap */
1798 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
Li Zefan34d52cb2011-03-29 13:46:06 +08001799 spin_lock(&ctl->tree_lock);
Josef Bacik96303082009-07-13 21:29:25 -04001800 if (!info->bitmap) {
1801 ret = -ENOMEM;
1802 goto out;
1803 }
1804 goto again;
1805 }
1806
1807out:
1808 if (info) {
1809 if (info->bitmap)
1810 kfree(info->bitmap);
Josef Bacikdc89e982011-01-28 17:05:48 -05001811 kmem_cache_free(btrfs_free_space_cachep, info);
Josef Bacik96303082009-07-13 21:29:25 -04001812 }
Josef Bacik0f9dd462008-09-23 13:14:11 -04001813
1814 return ret;
1815}
1816
Chris Mason945d8962011-05-22 12:33:42 -04001817static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
Li Zefanf333adb2010-11-09 14:57:39 +08001818 struct btrfs_free_space *info, bool update_stat)
Josef Bacik0f9dd462008-09-23 13:14:11 -04001819{
Li Zefan120d66e2010-11-09 14:56:50 +08001820 struct btrfs_free_space *left_info;
1821 struct btrfs_free_space *right_info;
1822 bool merged = false;
1823 u64 offset = info->offset;
1824 u64 bytes = info->bytes;
Josef Bacik6226cb0a2009-04-03 10:14:18 -04001825
Josef Bacik0f9dd462008-09-23 13:14:11 -04001826 /*
1827 * first we want to see if there is free space adjacent to the range we
1828 * are adding, if there is remove that struct and add a new one to
1829 * cover the entire range
1830 */
Li Zefan34d52cb2011-03-29 13:46:06 +08001831 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
Josef Bacik96303082009-07-13 21:29:25 -04001832 if (right_info && rb_prev(&right_info->offset_index))
1833 left_info = rb_entry(rb_prev(&right_info->offset_index),
1834 struct btrfs_free_space, offset_index);
1835 else
Li Zefan34d52cb2011-03-29 13:46:06 +08001836 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
Josef Bacik0f9dd462008-09-23 13:14:11 -04001837
Josef Bacik96303082009-07-13 21:29:25 -04001838 if (right_info && !right_info->bitmap) {
Li Zefanf333adb2010-11-09 14:57:39 +08001839 if (update_stat)
Li Zefan34d52cb2011-03-29 13:46:06 +08001840 unlink_free_space(ctl, right_info);
Li Zefanf333adb2010-11-09 14:57:39 +08001841 else
Li Zefan34d52cb2011-03-29 13:46:06 +08001842 __unlink_free_space(ctl, right_info);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04001843 info->bytes += right_info->bytes;
Josef Bacikdc89e982011-01-28 17:05:48 -05001844 kmem_cache_free(btrfs_free_space_cachep, right_info);
Li Zefan120d66e2010-11-09 14:56:50 +08001845 merged = true;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001846 }
1847
Josef Bacik96303082009-07-13 21:29:25 -04001848 if (left_info && !left_info->bitmap &&
1849 left_info->offset + left_info->bytes == offset) {
Li Zefanf333adb2010-11-09 14:57:39 +08001850 if (update_stat)
Li Zefan34d52cb2011-03-29 13:46:06 +08001851 unlink_free_space(ctl, left_info);
Li Zefanf333adb2010-11-09 14:57:39 +08001852 else
Li Zefan34d52cb2011-03-29 13:46:06 +08001853 __unlink_free_space(ctl, left_info);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04001854 info->offset = left_info->offset;
1855 info->bytes += left_info->bytes;
Josef Bacikdc89e982011-01-28 17:05:48 -05001856 kmem_cache_free(btrfs_free_space_cachep, left_info);
Li Zefan120d66e2010-11-09 14:56:50 +08001857 merged = true;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001858 }
1859
Li Zefan120d66e2010-11-09 14:56:50 +08001860 return merged;
1861}
1862
Li Zefan581bb052011-04-20 10:06:11 +08001863int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1864 u64 offset, u64 bytes)
Li Zefan120d66e2010-11-09 14:56:50 +08001865{
1866 struct btrfs_free_space *info;
1867 int ret = 0;
1868
Josef Bacikdc89e982011-01-28 17:05:48 -05001869 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
Li Zefan120d66e2010-11-09 14:56:50 +08001870 if (!info)
1871 return -ENOMEM;
1872
1873 info->offset = offset;
1874 info->bytes = bytes;
1875
Li Zefan34d52cb2011-03-29 13:46:06 +08001876 spin_lock(&ctl->tree_lock);
Li Zefan120d66e2010-11-09 14:56:50 +08001877
Li Zefan34d52cb2011-03-29 13:46:06 +08001878 if (try_merge_free_space(ctl, info, true))
Li Zefan120d66e2010-11-09 14:56:50 +08001879 goto link;
1880
1881 /*
1882 * There was no extent directly to the left or right of this new
1883 * extent then we know we're going to have to allocate a new extent, so
1884 * before we do that see if we need to drop this into a bitmap
1885 */
Li Zefan34d52cb2011-03-29 13:46:06 +08001886 ret = insert_into_bitmap(ctl, info);
Li Zefan120d66e2010-11-09 14:56:50 +08001887 if (ret < 0) {
1888 goto out;
1889 } else if (ret) {
1890 ret = 0;
1891 goto out;
1892 }
1893link:
Li Zefan34d52cb2011-03-29 13:46:06 +08001894 ret = link_free_space(ctl, info);
Josef Bacik0f9dd462008-09-23 13:14:11 -04001895 if (ret)
Josef Bacikdc89e982011-01-28 17:05:48 -05001896 kmem_cache_free(btrfs_free_space_cachep, info);
Josef Bacik96303082009-07-13 21:29:25 -04001897out:
Li Zefan34d52cb2011-03-29 13:46:06 +08001898 spin_unlock(&ctl->tree_lock);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04001899
Josef Bacik0f9dd462008-09-23 13:14:11 -04001900 if (ret) {
Josef Bacik96303082009-07-13 21:29:25 -04001901 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
Josef Bacikb12d6862013-08-26 17:14:08 -04001902 ASSERT(ret != -EEXIST);
Josef Bacik0f9dd462008-09-23 13:14:11 -04001903 }
1904
Josef Bacik0f9dd462008-09-23 13:14:11 -04001905 return ret;
1906}
1907
Josef Bacik6226cb0a2009-04-03 10:14:18 -04001908int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1909 u64 offset, u64 bytes)
Josef Bacik0f9dd462008-09-23 13:14:11 -04001910{
Li Zefan34d52cb2011-03-29 13:46:06 +08001911 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001912 struct btrfs_free_space *info;
Josef Bacikb0175112012-12-18 11:39:19 -05001913 int ret;
1914 bool re_search = false;
Josef Bacik0f9dd462008-09-23 13:14:11 -04001915
Li Zefan34d52cb2011-03-29 13:46:06 +08001916 spin_lock(&ctl->tree_lock);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04001917
Josef Bacik96303082009-07-13 21:29:25 -04001918again:
Josef Bacikb0175112012-12-18 11:39:19 -05001919 ret = 0;
Josef Bacikbdb7d302012-06-27 15:10:56 -04001920 if (!bytes)
1921 goto out_lock;
1922
Li Zefan34d52cb2011-03-29 13:46:06 +08001923 info = tree_search_offset(ctl, offset, 0, 0);
Josef Bacik96303082009-07-13 21:29:25 -04001924 if (!info) {
Josef Bacik6606bb92009-07-31 11:03:58 -04001925 /*
1926 * oops didn't find an extent that matched the space we wanted
1927 * to remove, look for a bitmap instead
1928 */
Li Zefan34d52cb2011-03-29 13:46:06 +08001929 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
Josef Bacik6606bb92009-07-31 11:03:58 -04001930 1, 0);
1931 if (!info) {
Josef Bacikb0175112012-12-18 11:39:19 -05001932 /*
1933 * If we found a partial bit of our free space in a
1934 * bitmap but then couldn't find the other part this may
1935 * be a problem, so WARN about it.
Chris Mason24a70312011-11-21 09:39:11 -05001936 */
Josef Bacikb0175112012-12-18 11:39:19 -05001937 WARN_ON(re_search);
Josef Bacik6606bb92009-07-31 11:03:58 -04001938 goto out_lock;
1939 }
Josef Bacik96303082009-07-13 21:29:25 -04001940 }
1941
Josef Bacikb0175112012-12-18 11:39:19 -05001942 re_search = false;
Josef Bacikbdb7d302012-06-27 15:10:56 -04001943 if (!info->bitmap) {
Li Zefan34d52cb2011-03-29 13:46:06 +08001944 unlink_free_space(ctl, info);
Josef Bacikbdb7d302012-06-27 15:10:56 -04001945 if (offset == info->offset) {
1946 u64 to_free = min(bytes, info->bytes);
Josef Bacik0f9dd462008-09-23 13:14:11 -04001947
Josef Bacikbdb7d302012-06-27 15:10:56 -04001948 info->bytes -= to_free;
1949 info->offset += to_free;
1950 if (info->bytes) {
1951 ret = link_free_space(ctl, info);
1952 WARN_ON(ret);
1953 } else {
1954 kmem_cache_free(btrfs_free_space_cachep, info);
1955 }
Josef Bacik0f9dd462008-09-23 13:14:11 -04001956
Josef Bacikbdb7d302012-06-27 15:10:56 -04001957 offset += to_free;
1958 bytes -= to_free;
1959 goto again;
1960 } else {
1961 u64 old_end = info->bytes + info->offset;
Chris Mason9b49c9b2008-09-24 11:23:25 -04001962
Josef Bacikbdb7d302012-06-27 15:10:56 -04001963 info->bytes = offset - info->offset;
Li Zefan34d52cb2011-03-29 13:46:06 +08001964 ret = link_free_space(ctl, info);
Josef Bacik96303082009-07-13 21:29:25 -04001965 WARN_ON(ret);
1966 if (ret)
1967 goto out_lock;
Josef Bacik96303082009-07-13 21:29:25 -04001968
Josef Bacikbdb7d302012-06-27 15:10:56 -04001969 /* Not enough bytes in this entry to satisfy us */
1970 if (old_end < offset + bytes) {
1971 bytes -= old_end - offset;
1972 offset = old_end;
1973 goto again;
1974 } else if (old_end == offset + bytes) {
1975 /* all done */
1976 goto out_lock;
1977 }
1978 spin_unlock(&ctl->tree_lock);
1979
1980 ret = btrfs_add_free_space(block_group, offset + bytes,
1981 old_end - (offset + bytes));
1982 WARN_ON(ret);
1983 goto out;
1984 }
Josef Bacik0f9dd462008-09-23 13:14:11 -04001985 }
Josef Bacik96303082009-07-13 21:29:25 -04001986
Li Zefan34d52cb2011-03-29 13:46:06 +08001987 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
Josef Bacikb0175112012-12-18 11:39:19 -05001988 if (ret == -EAGAIN) {
1989 re_search = true;
Josef Bacik96303082009-07-13 21:29:25 -04001990 goto again;
Josef Bacikb0175112012-12-18 11:39:19 -05001991 }
Josef Bacik96303082009-07-13 21:29:25 -04001992out_lock:
Li Zefan34d52cb2011-03-29 13:46:06 +08001993 spin_unlock(&ctl->tree_lock);
Josef Bacik0f9dd462008-09-23 13:14:11 -04001994out:
Josef Bacik25179202008-10-29 14:49:05 -04001995 return ret;
1996}
1997
Josef Bacik0f9dd462008-09-23 13:14:11 -04001998void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1999 u64 bytes)
2000{
Li Zefan34d52cb2011-03-29 13:46:06 +08002001 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik0f9dd462008-09-23 13:14:11 -04002002 struct btrfs_free_space *info;
2003 struct rb_node *n;
2004 int count = 0;
2005
Li Zefan34d52cb2011-03-29 13:46:06 +08002006 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
Josef Bacik0f9dd462008-09-23 13:14:11 -04002007 info = rb_entry(n, struct btrfs_free_space, offset_index);
Liu Bof6175ef2012-07-06 03:31:36 -06002008 if (info->bytes >= bytes && !block_group->ro)
Josef Bacik0f9dd462008-09-23 13:14:11 -04002009 count++;
Josef Bacik96303082009-07-13 21:29:25 -04002010 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +02002011 info->offset, info->bytes,
Josef Bacik96303082009-07-13 21:29:25 -04002012 (info->bitmap) ? "yes" : "no");
Josef Bacik0f9dd462008-09-23 13:14:11 -04002013 }
Josef Bacik96303082009-07-13 21:29:25 -04002014 printk(KERN_INFO "block group has cluster?: %s\n",
2015 list_empty(&block_group->cluster_list) ? "no" : "yes");
Josef Bacik0f9dd462008-09-23 13:14:11 -04002016 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
2017 "\n", count);
2018}
2019
Li Zefan34d52cb2011-03-29 13:46:06 +08002020void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
Josef Bacik0f9dd462008-09-23 13:14:11 -04002021{
Li Zefan34d52cb2011-03-29 13:46:06 +08002022 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik0f9dd462008-09-23 13:14:11 -04002023
Li Zefan34d52cb2011-03-29 13:46:06 +08002024 spin_lock_init(&ctl->tree_lock);
2025 ctl->unit = block_group->sectorsize;
2026 ctl->start = block_group->key.objectid;
2027 ctl->private = block_group;
2028 ctl->op = &free_space_op;
Josef Bacik0f9dd462008-09-23 13:14:11 -04002029
Li Zefan34d52cb2011-03-29 13:46:06 +08002030 /*
2031 * we only want to have 32k of ram per block group for keeping
2032 * track of free space, and if we pass 1/2 of that we want to
2033 * start converting things over to using bitmaps
2034 */
2035 ctl->extents_thresh = ((1024 * 32) / 2) /
2036 sizeof(struct btrfs_free_space);
Josef Bacik0f9dd462008-09-23 13:14:11 -04002037}
2038
Chris Masonfa9c0d792009-04-03 09:47:43 -04002039/*
2040 * for a given cluster, put all of its extents back into the free
2041 * space cache. If the block group passed doesn't match the block group
2042 * pointed to by the cluster, someone else raced in and freed the
2043 * cluster already. In that case, we just return without changing anything
2044 */
2045static int
2046__btrfs_return_cluster_to_free_space(
2047 struct btrfs_block_group_cache *block_group,
2048 struct btrfs_free_cluster *cluster)
2049{
Li Zefan34d52cb2011-03-29 13:46:06 +08002050 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002051 struct btrfs_free_space *entry;
2052 struct rb_node *node;
2053
2054 spin_lock(&cluster->lock);
2055 if (cluster->block_group != block_group)
2056 goto out;
2057
Josef Bacik96303082009-07-13 21:29:25 -04002058 cluster->block_group = NULL;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002059 cluster->window_start = 0;
Josef Bacik96303082009-07-13 21:29:25 -04002060 list_del_init(&cluster->block_group_list);
Josef Bacik96303082009-07-13 21:29:25 -04002061
Chris Masonfa9c0d792009-04-03 09:47:43 -04002062 node = rb_first(&cluster->root);
Josef Bacik96303082009-07-13 21:29:25 -04002063 while (node) {
Josef Bacik4e69b592011-03-21 10:11:24 -04002064 bool bitmap;
2065
Chris Masonfa9c0d792009-04-03 09:47:43 -04002066 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2067 node = rb_next(&entry->offset_index);
2068 rb_erase(&entry->offset_index, &cluster->root);
Josef Bacik4e69b592011-03-21 10:11:24 -04002069
2070 bitmap = (entry->bitmap != NULL);
2071 if (!bitmap)
Li Zefan34d52cb2011-03-29 13:46:06 +08002072 try_merge_free_space(ctl, entry, false);
2073 tree_insert_offset(&ctl->free_space_offset,
Josef Bacik4e69b592011-03-21 10:11:24 -04002074 entry->offset, &entry->offset_index, bitmap);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002075 }
Eric Paris6bef4d32010-02-23 19:43:04 +00002076 cluster->root = RB_ROOT;
Josef Bacik96303082009-07-13 21:29:25 -04002077
Chris Masonfa9c0d792009-04-03 09:47:43 -04002078out:
2079 spin_unlock(&cluster->lock);
Josef Bacik96303082009-07-13 21:29:25 -04002080 btrfs_put_block_group(block_group);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002081 return 0;
2082}
2083
Eric Sandeen48a3b632013-04-25 20:41:01 +00002084static void __btrfs_remove_free_space_cache_locked(
2085 struct btrfs_free_space_ctl *ctl)
Josef Bacik0f9dd462008-09-23 13:14:11 -04002086{
2087 struct btrfs_free_space *info;
2088 struct rb_node *node;
Li Zefan581bb052011-04-20 10:06:11 +08002089
Li Zefan581bb052011-04-20 10:06:11 +08002090 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2091 info = rb_entry(node, struct btrfs_free_space, offset_index);
Josef Bacik9b90f512011-06-24 16:02:51 +00002092 if (!info->bitmap) {
2093 unlink_free_space(ctl, info);
2094 kmem_cache_free(btrfs_free_space_cachep, info);
2095 } else {
2096 free_bitmap(ctl, info);
2097 }
Li Zefan581bb052011-04-20 10:06:11 +08002098 if (need_resched()) {
2099 spin_unlock(&ctl->tree_lock);
2100 cond_resched();
2101 spin_lock(&ctl->tree_lock);
2102 }
2103 }
Chris Mason09655372011-05-21 09:27:38 -04002104}
2105
2106void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2107{
2108 spin_lock(&ctl->tree_lock);
2109 __btrfs_remove_free_space_cache_locked(ctl);
Li Zefan581bb052011-04-20 10:06:11 +08002110 spin_unlock(&ctl->tree_lock);
2111}
2112
Josef Bacik0f9dd462008-09-23 13:14:11 -04002113void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2114{
Li Zefan34d52cb2011-03-29 13:46:06 +08002115 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002116 struct btrfs_free_cluster *cluster;
Josef Bacik96303082009-07-13 21:29:25 -04002117 struct list_head *head;
Josef Bacik0f9dd462008-09-23 13:14:11 -04002118
Li Zefan34d52cb2011-03-29 13:46:06 +08002119 spin_lock(&ctl->tree_lock);
Josef Bacik96303082009-07-13 21:29:25 -04002120 while ((head = block_group->cluster_list.next) !=
2121 &block_group->cluster_list) {
2122 cluster = list_entry(head, struct btrfs_free_cluster,
2123 block_group_list);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002124
2125 WARN_ON(cluster->block_group != block_group);
2126 __btrfs_return_cluster_to_free_space(block_group, cluster);
Josef Bacik96303082009-07-13 21:29:25 -04002127 if (need_resched()) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002128 spin_unlock(&ctl->tree_lock);
Josef Bacik96303082009-07-13 21:29:25 -04002129 cond_resched();
Li Zefan34d52cb2011-03-29 13:46:06 +08002130 spin_lock(&ctl->tree_lock);
Josef Bacik96303082009-07-13 21:29:25 -04002131 }
Chris Masonfa9c0d792009-04-03 09:47:43 -04002132 }
Chris Mason09655372011-05-21 09:27:38 -04002133 __btrfs_remove_free_space_cache_locked(ctl);
Li Zefan34d52cb2011-03-29 13:46:06 +08002134 spin_unlock(&ctl->tree_lock);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002135
Josef Bacik0f9dd462008-09-23 13:14:11 -04002136}
2137
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002138u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
Miao Xiea4820392013-09-09 13:19:42 +08002139 u64 offset, u64 bytes, u64 empty_size,
2140 u64 *max_extent_size)
Josef Bacik0f9dd462008-09-23 13:14:11 -04002141{
Li Zefan34d52cb2011-03-29 13:46:06 +08002142 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002143 struct btrfs_free_space *entry = NULL;
Josef Bacik96303082009-07-13 21:29:25 -04002144 u64 bytes_search = bytes + empty_size;
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002145 u64 ret = 0;
David Woodhouse53b381b2013-01-29 18:40:14 -05002146 u64 align_gap = 0;
2147 u64 align_gap_len = 0;
Josef Bacik0f9dd462008-09-23 13:14:11 -04002148
Li Zefan34d52cb2011-03-29 13:46:06 +08002149 spin_lock(&ctl->tree_lock);
David Woodhouse53b381b2013-01-29 18:40:14 -05002150 entry = find_free_space(ctl, &offset, &bytes_search,
Miao Xiea4820392013-09-09 13:19:42 +08002151 block_group->full_stripe_len, max_extent_size);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002152 if (!entry)
Josef Bacik96303082009-07-13 21:29:25 -04002153 goto out;
2154
2155 ret = offset;
2156 if (entry->bitmap) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002157 bitmap_clear_bits(ctl, entry, offset, bytes);
Li Zefanedf6e2d2010-11-09 14:50:07 +08002158 if (!entry->bytes)
Li Zefan34d52cb2011-03-29 13:46:06 +08002159 free_bitmap(ctl, entry);
Josef Bacik96303082009-07-13 21:29:25 -04002160 } else {
Li Zefan34d52cb2011-03-29 13:46:06 +08002161 unlink_free_space(ctl, entry);
David Woodhouse53b381b2013-01-29 18:40:14 -05002162 align_gap_len = offset - entry->offset;
2163 align_gap = entry->offset;
2164
2165 entry->offset = offset + bytes;
2166 WARN_ON(entry->bytes < bytes + align_gap_len);
2167
2168 entry->bytes -= bytes + align_gap_len;
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002169 if (!entry->bytes)
Josef Bacikdc89e982011-01-28 17:05:48 -05002170 kmem_cache_free(btrfs_free_space_cachep, entry);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002171 else
Li Zefan34d52cb2011-03-29 13:46:06 +08002172 link_free_space(ctl, entry);
Josef Bacik6226cb0a2009-04-03 10:14:18 -04002173 }
Josef Bacik96303082009-07-13 21:29:25 -04002174out:
Li Zefan34d52cb2011-03-29 13:46:06 +08002175 spin_unlock(&ctl->tree_lock);
Josef Bacik817d52f2009-07-13 21:29:25 -04002176
David Woodhouse53b381b2013-01-29 18:40:14 -05002177 if (align_gap_len)
2178 __btrfs_add_free_space(ctl, align_gap, align_gap_len);
Josef Bacik0f9dd462008-09-23 13:14:11 -04002179 return ret;
2180}
Chris Masonfa9c0d792009-04-03 09:47:43 -04002181
2182/*
2183 * given a cluster, put all of its extents back into the free space
2184 * cache. If a block group is passed, this function will only free
2185 * a cluster that belongs to the passed block group.
2186 *
2187 * Otherwise, it'll get a reference on the block group pointed to by the
2188 * cluster and remove the cluster from it.
2189 */
2190int btrfs_return_cluster_to_free_space(
2191 struct btrfs_block_group_cache *block_group,
2192 struct btrfs_free_cluster *cluster)
2193{
Li Zefan34d52cb2011-03-29 13:46:06 +08002194 struct btrfs_free_space_ctl *ctl;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002195 int ret;
2196
2197 /* first, get a safe pointer to the block group */
2198 spin_lock(&cluster->lock);
2199 if (!block_group) {
2200 block_group = cluster->block_group;
2201 if (!block_group) {
2202 spin_unlock(&cluster->lock);
2203 return 0;
2204 }
2205 } else if (cluster->block_group != block_group) {
2206 /* someone else has already freed it don't redo their work */
2207 spin_unlock(&cluster->lock);
2208 return 0;
2209 }
2210 atomic_inc(&block_group->count);
2211 spin_unlock(&cluster->lock);
2212
Li Zefan34d52cb2011-03-29 13:46:06 +08002213 ctl = block_group->free_space_ctl;
2214
Chris Masonfa9c0d792009-04-03 09:47:43 -04002215 /* now return any extents the cluster had on it */
Li Zefan34d52cb2011-03-29 13:46:06 +08002216 spin_lock(&ctl->tree_lock);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002217 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
Li Zefan34d52cb2011-03-29 13:46:06 +08002218 spin_unlock(&ctl->tree_lock);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002219
2220 /* finally drop our ref */
2221 btrfs_put_block_group(block_group);
2222 return ret;
2223}
2224
Josef Bacik96303082009-07-13 21:29:25 -04002225static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2226 struct btrfs_free_cluster *cluster,
Josef Bacik4e69b592011-03-21 10:11:24 -04002227 struct btrfs_free_space *entry,
Miao Xiea4820392013-09-09 13:19:42 +08002228 u64 bytes, u64 min_start,
2229 u64 *max_extent_size)
Josef Bacik96303082009-07-13 21:29:25 -04002230{
Li Zefan34d52cb2011-03-29 13:46:06 +08002231 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik96303082009-07-13 21:29:25 -04002232 int err;
2233 u64 search_start = cluster->window_start;
2234 u64 search_bytes = bytes;
2235 u64 ret = 0;
2236
Josef Bacik96303082009-07-13 21:29:25 -04002237 search_start = min_start;
2238 search_bytes = bytes;
2239
Li Zefan34d52cb2011-03-29 13:46:06 +08002240 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
Miao Xiea4820392013-09-09 13:19:42 +08002241 if (err) {
2242 if (search_bytes > *max_extent_size)
2243 *max_extent_size = search_bytes;
Josef Bacik4e69b592011-03-21 10:11:24 -04002244 return 0;
Miao Xiea4820392013-09-09 13:19:42 +08002245 }
Josef Bacik96303082009-07-13 21:29:25 -04002246
2247 ret = search_start;
Miao Xiebb3ac5a2011-08-05 09:32:35 +00002248 __bitmap_clear_bits(ctl, entry, ret, bytes);
Josef Bacik96303082009-07-13 21:29:25 -04002249
2250 return ret;
2251}
2252
Chris Masonfa9c0d792009-04-03 09:47:43 -04002253/*
2254 * given a cluster, try to allocate 'bytes' from it, returns 0
2255 * if it couldn't find anything suitably large, or a logical disk offset
2256 * if things worked out
2257 */
2258u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2259 struct btrfs_free_cluster *cluster, u64 bytes,
Miao Xiea4820392013-09-09 13:19:42 +08002260 u64 min_start, u64 *max_extent_size)
Chris Masonfa9c0d792009-04-03 09:47:43 -04002261{
Li Zefan34d52cb2011-03-29 13:46:06 +08002262 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002263 struct btrfs_free_space *entry = NULL;
2264 struct rb_node *node;
2265 u64 ret = 0;
2266
2267 spin_lock(&cluster->lock);
2268 if (bytes > cluster->max_size)
2269 goto out;
2270
2271 if (cluster->block_group != block_group)
2272 goto out;
2273
2274 node = rb_first(&cluster->root);
2275 if (!node)
2276 goto out;
2277
2278 entry = rb_entry(node, struct btrfs_free_space, offset_index);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002279 while(1) {
Miao Xiea4820392013-09-09 13:19:42 +08002280 if (entry->bytes < bytes && entry->bytes > *max_extent_size)
2281 *max_extent_size = entry->bytes;
2282
Josef Bacik4e69b592011-03-21 10:11:24 -04002283 if (entry->bytes < bytes ||
2284 (!entry->bitmap && entry->offset < min_start)) {
Chris Masonfa9c0d792009-04-03 09:47:43 -04002285 node = rb_next(&entry->offset_index);
2286 if (!node)
2287 break;
2288 entry = rb_entry(node, struct btrfs_free_space,
2289 offset_index);
2290 continue;
2291 }
Chris Masonfa9c0d792009-04-03 09:47:43 -04002292
Josef Bacik4e69b592011-03-21 10:11:24 -04002293 if (entry->bitmap) {
2294 ret = btrfs_alloc_from_bitmap(block_group,
2295 cluster, entry, bytes,
Miao Xiea4820392013-09-09 13:19:42 +08002296 cluster->window_start,
2297 max_extent_size);
Josef Bacik4e69b592011-03-21 10:11:24 -04002298 if (ret == 0) {
Josef Bacik4e69b592011-03-21 10:11:24 -04002299 node = rb_next(&entry->offset_index);
2300 if (!node)
2301 break;
2302 entry = rb_entry(node, struct btrfs_free_space,
2303 offset_index);
2304 continue;
2305 }
Josef Bacik9b230622012-01-26 15:01:12 -05002306 cluster->window_start += bytes;
Josef Bacik4e69b592011-03-21 10:11:24 -04002307 } else {
Josef Bacik4e69b592011-03-21 10:11:24 -04002308 ret = entry->offset;
2309
2310 entry->offset += bytes;
2311 entry->bytes -= bytes;
2312 }
Chris Masonfa9c0d792009-04-03 09:47:43 -04002313
Li Zefan5e71b5d2010-11-09 14:55:34 +08002314 if (entry->bytes == 0)
Chris Masonfa9c0d792009-04-03 09:47:43 -04002315 rb_erase(&entry->offset_index, &cluster->root);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002316 break;
2317 }
2318out:
2319 spin_unlock(&cluster->lock);
Josef Bacik96303082009-07-13 21:29:25 -04002320
Li Zefan5e71b5d2010-11-09 14:55:34 +08002321 if (!ret)
2322 return 0;
2323
Li Zefan34d52cb2011-03-29 13:46:06 +08002324 spin_lock(&ctl->tree_lock);
Li Zefan5e71b5d2010-11-09 14:55:34 +08002325
Li Zefan34d52cb2011-03-29 13:46:06 +08002326 ctl->free_space -= bytes;
Li Zefan5e71b5d2010-11-09 14:55:34 +08002327 if (entry->bytes == 0) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002328 ctl->free_extents--;
Josef Bacik4e69b592011-03-21 10:11:24 -04002329 if (entry->bitmap) {
2330 kfree(entry->bitmap);
Li Zefan34d52cb2011-03-29 13:46:06 +08002331 ctl->total_bitmaps--;
2332 ctl->op->recalc_thresholds(ctl);
Josef Bacik4e69b592011-03-21 10:11:24 -04002333 }
Josef Bacikdc89e982011-01-28 17:05:48 -05002334 kmem_cache_free(btrfs_free_space_cachep, entry);
Li Zefan5e71b5d2010-11-09 14:55:34 +08002335 }
2336
Li Zefan34d52cb2011-03-29 13:46:06 +08002337 spin_unlock(&ctl->tree_lock);
Li Zefan5e71b5d2010-11-09 14:55:34 +08002338
Chris Masonfa9c0d792009-04-03 09:47:43 -04002339 return ret;
2340}
2341
Josef Bacik96303082009-07-13 21:29:25 -04002342static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2343 struct btrfs_free_space *entry,
2344 struct btrfs_free_cluster *cluster,
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002345 u64 offset, u64 bytes,
2346 u64 cont1_bytes, u64 min_bytes)
Josef Bacik96303082009-07-13 21:29:25 -04002347{
Li Zefan34d52cb2011-03-29 13:46:06 +08002348 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik96303082009-07-13 21:29:25 -04002349 unsigned long next_zero;
2350 unsigned long i;
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002351 unsigned long want_bits;
2352 unsigned long min_bits;
Josef Bacik96303082009-07-13 21:29:25 -04002353 unsigned long found_bits;
2354 unsigned long start = 0;
2355 unsigned long total_found = 0;
Josef Bacik4e69b592011-03-21 10:11:24 -04002356 int ret;
Josef Bacik96303082009-07-13 21:29:25 -04002357
Wang Sheng-Hui96009762012-11-30 06:30:14 +00002358 i = offset_to_bit(entry->offset, ctl->unit,
Josef Bacik96303082009-07-13 21:29:25 -04002359 max_t(u64, offset, entry->offset));
Wang Sheng-Hui96009762012-11-30 06:30:14 +00002360 want_bits = bytes_to_bits(bytes, ctl->unit);
2361 min_bits = bytes_to_bits(min_bytes, ctl->unit);
Josef Bacik96303082009-07-13 21:29:25 -04002362
2363again:
2364 found_bits = 0;
Wei Yongjunebb3dad2012-09-13 20:29:02 -06002365 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
Josef Bacik96303082009-07-13 21:29:25 -04002366 next_zero = find_next_zero_bit(entry->bitmap,
2367 BITS_PER_BITMAP, i);
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002368 if (next_zero - i >= min_bits) {
Josef Bacik96303082009-07-13 21:29:25 -04002369 found_bits = next_zero - i;
2370 break;
2371 }
2372 i = next_zero;
2373 }
2374
2375 if (!found_bits)
Josef Bacik4e69b592011-03-21 10:11:24 -04002376 return -ENOSPC;
Josef Bacik96303082009-07-13 21:29:25 -04002377
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002378 if (!total_found) {
Josef Bacik96303082009-07-13 21:29:25 -04002379 start = i;
Alexandre Olivab78d09b2011-11-30 13:43:00 -05002380 cluster->max_size = 0;
Josef Bacik96303082009-07-13 21:29:25 -04002381 }
2382
2383 total_found += found_bits;
2384
Wang Sheng-Hui96009762012-11-30 06:30:14 +00002385 if (cluster->max_size < found_bits * ctl->unit)
2386 cluster->max_size = found_bits * ctl->unit;
Josef Bacik96303082009-07-13 21:29:25 -04002387
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002388 if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2389 i = next_zero + 1;
Josef Bacik96303082009-07-13 21:29:25 -04002390 goto again;
2391 }
2392
Wang Sheng-Hui96009762012-11-30 06:30:14 +00002393 cluster->window_start = start * ctl->unit + entry->offset;
Li Zefan34d52cb2011-03-29 13:46:06 +08002394 rb_erase(&entry->offset_index, &ctl->free_space_offset);
Josef Bacik4e69b592011-03-21 10:11:24 -04002395 ret = tree_insert_offset(&cluster->root, entry->offset,
2396 &entry->offset_index, 1);
Josef Bacikb12d6862013-08-26 17:14:08 -04002397 ASSERT(!ret); /* -EEXIST; Logic error */
Josef Bacik96303082009-07-13 21:29:25 -04002398
Josef Bacik3f7de032011-11-10 08:29:20 -05002399 trace_btrfs_setup_cluster(block_group, cluster,
Wang Sheng-Hui96009762012-11-30 06:30:14 +00002400 total_found * ctl->unit, 1);
Josef Bacik96303082009-07-13 21:29:25 -04002401 return 0;
2402}
2403
Chris Masonfa9c0d792009-04-03 09:47:43 -04002404/*
Josef Bacik4e69b592011-03-21 10:11:24 -04002405 * This searches the block group for just extents to fill the cluster with.
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002406 * Try to find a cluster with at least bytes total bytes, at least one
2407 * extent of cont1_bytes, and other clusters of at least min_bytes.
Josef Bacik4e69b592011-03-21 10:11:24 -04002408 */
Josef Bacik3de85bb2011-05-25 13:07:37 -04002409static noinline int
2410setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2411 struct btrfs_free_cluster *cluster,
2412 struct list_head *bitmaps, u64 offset, u64 bytes,
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002413 u64 cont1_bytes, u64 min_bytes)
Josef Bacik4e69b592011-03-21 10:11:24 -04002414{
Li Zefan34d52cb2011-03-29 13:46:06 +08002415 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik4e69b592011-03-21 10:11:24 -04002416 struct btrfs_free_space *first = NULL;
2417 struct btrfs_free_space *entry = NULL;
Josef Bacik4e69b592011-03-21 10:11:24 -04002418 struct btrfs_free_space *last;
2419 struct rb_node *node;
2420 u64 window_start;
2421 u64 window_free;
2422 u64 max_extent;
Josef Bacik3f7de032011-11-10 08:29:20 -05002423 u64 total_size = 0;
Josef Bacik4e69b592011-03-21 10:11:24 -04002424
Li Zefan34d52cb2011-03-29 13:46:06 +08002425 entry = tree_search_offset(ctl, offset, 0, 1);
Josef Bacik4e69b592011-03-21 10:11:24 -04002426 if (!entry)
2427 return -ENOSPC;
2428
2429 /*
2430 * We don't want bitmaps, so just move along until we find a normal
2431 * extent entry.
2432 */
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002433 while (entry->bitmap || entry->bytes < min_bytes) {
2434 if (entry->bitmap && list_empty(&entry->list))
Josef Bacik86d4a772011-05-25 13:03:16 -04002435 list_add_tail(&entry->list, bitmaps);
Josef Bacik4e69b592011-03-21 10:11:24 -04002436 node = rb_next(&entry->offset_index);
2437 if (!node)
2438 return -ENOSPC;
2439 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2440 }
2441
2442 window_start = entry->offset;
2443 window_free = entry->bytes;
2444 max_extent = entry->bytes;
2445 first = entry;
2446 last = entry;
Josef Bacik4e69b592011-03-21 10:11:24 -04002447
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002448 for (node = rb_next(&entry->offset_index); node;
2449 node = rb_next(&entry->offset_index)) {
Josef Bacik4e69b592011-03-21 10:11:24 -04002450 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2451
Josef Bacik86d4a772011-05-25 13:03:16 -04002452 if (entry->bitmap) {
2453 if (list_empty(&entry->list))
2454 list_add_tail(&entry->list, bitmaps);
Josef Bacik4e69b592011-03-21 10:11:24 -04002455 continue;
Josef Bacik86d4a772011-05-25 13:03:16 -04002456 }
2457
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002458 if (entry->bytes < min_bytes)
2459 continue;
2460
2461 last = entry;
2462 window_free += entry->bytes;
2463 if (entry->bytes > max_extent)
Josef Bacik4e69b592011-03-21 10:11:24 -04002464 max_extent = entry->bytes;
Josef Bacik4e69b592011-03-21 10:11:24 -04002465 }
2466
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002467 if (window_free < bytes || max_extent < cont1_bytes)
2468 return -ENOSPC;
2469
Josef Bacik4e69b592011-03-21 10:11:24 -04002470 cluster->window_start = first->offset;
2471
2472 node = &first->offset_index;
2473
2474 /*
2475 * now we've found our entries, pull them out of the free space
2476 * cache and put them into the cluster rbtree
2477 */
2478 do {
2479 int ret;
2480
2481 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2482 node = rb_next(&entry->offset_index);
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002483 if (entry->bitmap || entry->bytes < min_bytes)
Josef Bacik4e69b592011-03-21 10:11:24 -04002484 continue;
2485
Li Zefan34d52cb2011-03-29 13:46:06 +08002486 rb_erase(&entry->offset_index, &ctl->free_space_offset);
Josef Bacik4e69b592011-03-21 10:11:24 -04002487 ret = tree_insert_offset(&cluster->root, entry->offset,
2488 &entry->offset_index, 0);
Josef Bacik3f7de032011-11-10 08:29:20 -05002489 total_size += entry->bytes;
Josef Bacikb12d6862013-08-26 17:14:08 -04002490 ASSERT(!ret); /* -EEXIST; Logic error */
Josef Bacik4e69b592011-03-21 10:11:24 -04002491 } while (node && entry != last);
2492
2493 cluster->max_size = max_extent;
Josef Bacik3f7de032011-11-10 08:29:20 -05002494 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
Josef Bacik4e69b592011-03-21 10:11:24 -04002495 return 0;
2496}
2497
2498/*
2499 * This specifically looks for bitmaps that may work in the cluster, we assume
2500 * that we have already failed to find extents that will work.
2501 */
Josef Bacik3de85bb2011-05-25 13:07:37 -04002502static noinline int
2503setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2504 struct btrfs_free_cluster *cluster,
2505 struct list_head *bitmaps, u64 offset, u64 bytes,
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002506 u64 cont1_bytes, u64 min_bytes)
Josef Bacik4e69b592011-03-21 10:11:24 -04002507{
Li Zefan34d52cb2011-03-29 13:46:06 +08002508 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik4e69b592011-03-21 10:11:24 -04002509 struct btrfs_free_space *entry;
Josef Bacik4e69b592011-03-21 10:11:24 -04002510 int ret = -ENOSPC;
Li Zefan0f0fbf12011-11-20 07:33:38 -05002511 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
Josef Bacik4e69b592011-03-21 10:11:24 -04002512
Li Zefan34d52cb2011-03-29 13:46:06 +08002513 if (ctl->total_bitmaps == 0)
Josef Bacik4e69b592011-03-21 10:11:24 -04002514 return -ENOSPC;
2515
Josef Bacik86d4a772011-05-25 13:03:16 -04002516 /*
Li Zefan0f0fbf12011-11-20 07:33:38 -05002517 * The bitmap that covers offset won't be in the list unless offset
2518 * is just its start offset.
2519 */
2520 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2521 if (entry->offset != bitmap_offset) {
2522 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2523 if (entry && list_empty(&entry->list))
2524 list_add(&entry->list, bitmaps);
2525 }
2526
Josef Bacik86d4a772011-05-25 13:03:16 -04002527 list_for_each_entry(entry, bitmaps, list) {
Josef Bacik357b9782012-01-26 15:01:11 -05002528 if (entry->bytes < bytes)
Josef Bacik86d4a772011-05-25 13:03:16 -04002529 continue;
2530 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002531 bytes, cont1_bytes, min_bytes);
Josef Bacik86d4a772011-05-25 13:03:16 -04002532 if (!ret)
2533 return 0;
2534 }
2535
2536 /*
Li Zefan52621cb2011-11-20 07:33:38 -05002537 * The bitmaps list has all the bitmaps that record free space
2538 * starting after offset, so no more search is required.
Josef Bacik86d4a772011-05-25 13:03:16 -04002539 */
Li Zefan52621cb2011-11-20 07:33:38 -05002540 return -ENOSPC;
Josef Bacik4e69b592011-03-21 10:11:24 -04002541}
2542
2543/*
Chris Masonfa9c0d792009-04-03 09:47:43 -04002544 * here we try to find a cluster of blocks in a block group. The goal
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002545 * is to find at least bytes+empty_size.
Chris Masonfa9c0d792009-04-03 09:47:43 -04002546 * We might not find them all in one contiguous area.
2547 *
2548 * returns zero and sets up cluster if things worked out, otherwise
2549 * it returns -enospc
2550 */
Josef Bacik00361582013-08-14 14:02:47 -04002551int btrfs_find_space_cluster(struct btrfs_root *root,
Chris Masonfa9c0d792009-04-03 09:47:43 -04002552 struct btrfs_block_group_cache *block_group,
2553 struct btrfs_free_cluster *cluster,
2554 u64 offset, u64 bytes, u64 empty_size)
2555{
Li Zefan34d52cb2011-03-29 13:46:06 +08002556 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Josef Bacik86d4a772011-05-25 13:03:16 -04002557 struct btrfs_free_space *entry, *tmp;
Li Zefan52621cb2011-11-20 07:33:38 -05002558 LIST_HEAD(bitmaps);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002559 u64 min_bytes;
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002560 u64 cont1_bytes;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002561 int ret;
2562
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002563 /*
2564 * Choose the minimum extent size we'll require for this
2565 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2566 * For metadata, allow allocates with smaller extents. For
2567 * data, keep it dense.
2568 */
Chris Mason451d7582009-06-09 20:28:34 -04002569 if (btrfs_test_opt(root, SSD_SPREAD)) {
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002570 cont1_bytes = min_bytes = bytes + empty_size;
Chris Mason451d7582009-06-09 20:28:34 -04002571 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002572 cont1_bytes = bytes;
2573 min_bytes = block_group->sectorsize;
2574 } else {
2575 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
2576 min_bytes = block_group->sectorsize;
2577 }
Chris Masonfa9c0d792009-04-03 09:47:43 -04002578
Li Zefan34d52cb2011-03-29 13:46:06 +08002579 spin_lock(&ctl->tree_lock);
Josef Bacik7d0d2e82011-03-18 15:13:42 -04002580
2581 /*
2582 * If we know we don't have enough space to make a cluster don't even
2583 * bother doing all the work to try and find one.
2584 */
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002585 if (ctl->free_space < bytes) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002586 spin_unlock(&ctl->tree_lock);
Josef Bacik7d0d2e82011-03-18 15:13:42 -04002587 return -ENOSPC;
2588 }
2589
Chris Masonfa9c0d792009-04-03 09:47:43 -04002590 spin_lock(&cluster->lock);
2591
2592 /* someone already found a cluster, hooray */
2593 if (cluster->block_group) {
2594 ret = 0;
2595 goto out;
2596 }
Josef Bacik4e69b592011-03-21 10:11:24 -04002597
Josef Bacik3f7de032011-11-10 08:29:20 -05002598 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2599 min_bytes);
2600
2601 INIT_LIST_HEAD(&bitmaps);
Josef Bacik86d4a772011-05-25 13:03:16 -04002602 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002603 bytes + empty_size,
2604 cont1_bytes, min_bytes);
Josef Bacik4e69b592011-03-21 10:11:24 -04002605 if (ret)
Josef Bacik86d4a772011-05-25 13:03:16 -04002606 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
Alexandre Oliva1bb91902011-10-14 12:10:36 -03002607 offset, bytes + empty_size,
2608 cont1_bytes, min_bytes);
Josef Bacik86d4a772011-05-25 13:03:16 -04002609
2610 /* Clear our temporary list */
2611 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2612 list_del_init(&entry->list);
Josef Bacik4e69b592011-03-21 10:11:24 -04002613
2614 if (!ret) {
2615 atomic_inc(&block_group->count);
2616 list_add_tail(&cluster->block_group_list,
2617 &block_group->cluster_list);
2618 cluster->block_group = block_group;
Josef Bacik3f7de032011-11-10 08:29:20 -05002619 } else {
2620 trace_btrfs_failed_cluster_setup(block_group);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002621 }
Chris Masonfa9c0d792009-04-03 09:47:43 -04002622out:
2623 spin_unlock(&cluster->lock);
Li Zefan34d52cb2011-03-29 13:46:06 +08002624 spin_unlock(&ctl->tree_lock);
Chris Masonfa9c0d792009-04-03 09:47:43 -04002625
2626 return ret;
2627}
2628
2629/*
2630 * simple code to zero out a cluster
2631 */
2632void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2633{
2634 spin_lock_init(&cluster->lock);
2635 spin_lock_init(&cluster->refill_lock);
Eric Paris6bef4d32010-02-23 19:43:04 +00002636 cluster->root = RB_ROOT;
Chris Masonfa9c0d792009-04-03 09:47:43 -04002637 cluster->max_size = 0;
2638 INIT_LIST_HEAD(&cluster->block_group_list);
2639 cluster->block_group = NULL;
2640}
2641
Li Zefan7fe1e642011-12-29 14:47:27 +08002642static int do_trimming(struct btrfs_block_group_cache *block_group,
2643 u64 *total_trimmed, u64 start, u64 bytes,
2644 u64 reserved_start, u64 reserved_bytes)
2645{
2646 struct btrfs_space_info *space_info = block_group->space_info;
2647 struct btrfs_fs_info *fs_info = block_group->fs_info;
2648 int ret;
2649 int update = 0;
2650 u64 trimmed = 0;
2651
2652 spin_lock(&space_info->lock);
2653 spin_lock(&block_group->lock);
2654 if (!block_group->ro) {
2655 block_group->reserved += reserved_bytes;
2656 space_info->bytes_reserved += reserved_bytes;
2657 update = 1;
2658 }
2659 spin_unlock(&block_group->lock);
2660 spin_unlock(&space_info->lock);
2661
2662 ret = btrfs_error_discard_extent(fs_info->extent_root,
2663 start, bytes, &trimmed);
2664 if (!ret)
2665 *total_trimmed += trimmed;
2666
2667 btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2668
2669 if (update) {
2670 spin_lock(&space_info->lock);
2671 spin_lock(&block_group->lock);
2672 if (block_group->ro)
2673 space_info->bytes_readonly += reserved_bytes;
2674 block_group->reserved -= reserved_bytes;
2675 space_info->bytes_reserved -= reserved_bytes;
2676 spin_unlock(&space_info->lock);
2677 spin_unlock(&block_group->lock);
2678 }
2679
2680 return ret;
2681}
2682
2683static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2684 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
Li Dongyangf7039b12011-03-24 10:24:28 +00002685{
Li Zefan34d52cb2011-03-29 13:46:06 +08002686 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
Li Zefan7fe1e642011-12-29 14:47:27 +08002687 struct btrfs_free_space *entry;
2688 struct rb_node *node;
Li Dongyangf7039b12011-03-24 10:24:28 +00002689 int ret = 0;
Li Zefan7fe1e642011-12-29 14:47:27 +08002690 u64 extent_start;
2691 u64 extent_bytes;
2692 u64 bytes;
Li Dongyangf7039b12011-03-24 10:24:28 +00002693
2694 while (start < end) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002695 spin_lock(&ctl->tree_lock);
Li Dongyangf7039b12011-03-24 10:24:28 +00002696
Li Zefan34d52cb2011-03-29 13:46:06 +08002697 if (ctl->free_space < minlen) {
2698 spin_unlock(&ctl->tree_lock);
Li Dongyangf7039b12011-03-24 10:24:28 +00002699 break;
2700 }
2701
Li Zefan34d52cb2011-03-29 13:46:06 +08002702 entry = tree_search_offset(ctl, start, 0, 1);
Li Zefan7fe1e642011-12-29 14:47:27 +08002703 if (!entry) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002704 spin_unlock(&ctl->tree_lock);
Li Dongyangf7039b12011-03-24 10:24:28 +00002705 break;
2706 }
2707
Li Zefan7fe1e642011-12-29 14:47:27 +08002708 /* skip bitmaps */
2709 while (entry->bitmap) {
2710 node = rb_next(&entry->offset_index);
2711 if (!node) {
Li Zefan34d52cb2011-03-29 13:46:06 +08002712 spin_unlock(&ctl->tree_lock);
Li Zefan7fe1e642011-12-29 14:47:27 +08002713 goto out;
Li Dongyangf7039b12011-03-24 10:24:28 +00002714 }
Li Zefan7fe1e642011-12-29 14:47:27 +08002715 entry = rb_entry(node, struct btrfs_free_space,
2716 offset_index);
Li Dongyangf7039b12011-03-24 10:24:28 +00002717 }
2718
Li Zefan7fe1e642011-12-29 14:47:27 +08002719 if (entry->offset >= end) {
2720 spin_unlock(&ctl->tree_lock);
2721 break;
2722 }
2723
2724 extent_start = entry->offset;
2725 extent_bytes = entry->bytes;
2726 start = max(start, extent_start);
2727 bytes = min(extent_start + extent_bytes, end) - start;
2728 if (bytes < minlen) {
2729 spin_unlock(&ctl->tree_lock);
2730 goto next;
2731 }
2732
2733 unlink_free_space(ctl, entry);
2734 kmem_cache_free(btrfs_free_space_cachep, entry);
2735
Li Zefan34d52cb2011-03-29 13:46:06 +08002736 spin_unlock(&ctl->tree_lock);
Li Dongyangf7039b12011-03-24 10:24:28 +00002737
Li Zefan7fe1e642011-12-29 14:47:27 +08002738 ret = do_trimming(block_group, total_trimmed, start, bytes,
2739 extent_start, extent_bytes);
2740 if (ret)
2741 break;
2742next:
Li Dongyangf7039b12011-03-24 10:24:28 +00002743 start += bytes;
Li Dongyangf7039b12011-03-24 10:24:28 +00002744
2745 if (fatal_signal_pending(current)) {
2746 ret = -ERESTARTSYS;
2747 break;
2748 }
2749
2750 cond_resched();
2751 }
Li Zefan7fe1e642011-12-29 14:47:27 +08002752out:
2753 return ret;
2754}
2755
2756static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
2757 u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2758{
2759 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2760 struct btrfs_free_space *entry;
2761 int ret = 0;
2762 int ret2;
2763 u64 bytes;
2764 u64 offset = offset_to_bitmap(ctl, start);
2765
2766 while (offset < end) {
2767 bool next_bitmap = false;
2768
2769 spin_lock(&ctl->tree_lock);
2770
2771 if (ctl->free_space < minlen) {
2772 spin_unlock(&ctl->tree_lock);
2773 break;
2774 }
2775
2776 entry = tree_search_offset(ctl, offset, 1, 0);
2777 if (!entry) {
2778 spin_unlock(&ctl->tree_lock);
2779 next_bitmap = true;
2780 goto next;
2781 }
2782
2783 bytes = minlen;
2784 ret2 = search_bitmap(ctl, entry, &start, &bytes);
2785 if (ret2 || start >= end) {
2786 spin_unlock(&ctl->tree_lock);
2787 next_bitmap = true;
2788 goto next;
2789 }
2790
2791 bytes = min(bytes, end - start);
2792 if (bytes < minlen) {
2793 spin_unlock(&ctl->tree_lock);
2794 goto next;
2795 }
2796
2797 bitmap_clear_bits(ctl, entry, start, bytes);
2798 if (entry->bytes == 0)
2799 free_bitmap(ctl, entry);
2800
2801 spin_unlock(&ctl->tree_lock);
2802
2803 ret = do_trimming(block_group, total_trimmed, start, bytes,
2804 start, bytes);
2805 if (ret)
2806 break;
2807next:
2808 if (next_bitmap) {
2809 offset += BITS_PER_BITMAP * ctl->unit;
2810 } else {
2811 start += bytes;
2812 if (start >= offset + BITS_PER_BITMAP * ctl->unit)
2813 offset += BITS_PER_BITMAP * ctl->unit;
2814 }
2815
2816 if (fatal_signal_pending(current)) {
2817 ret = -ERESTARTSYS;
2818 break;
2819 }
2820
2821 cond_resched();
2822 }
2823
2824 return ret;
2825}
2826
2827int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2828 u64 *trimmed, u64 start, u64 end, u64 minlen)
2829{
2830 int ret;
2831
2832 *trimmed = 0;
2833
2834 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2835 if (ret)
2836 return ret;
2837
2838 ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
Li Dongyangf7039b12011-03-24 10:24:28 +00002839
2840 return ret;
2841}
Li Zefan581bb052011-04-20 10:06:11 +08002842
2843/*
2844 * Find the left-most item in the cache tree, and then return the
2845 * smallest inode number in the item.
2846 *
2847 * Note: the returned inode number may not be the smallest one in
2848 * the tree, if the left-most item is a bitmap.
2849 */
2850u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2851{
2852 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2853 struct btrfs_free_space *entry = NULL;
2854 u64 ino = 0;
2855
2856 spin_lock(&ctl->tree_lock);
2857
2858 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2859 goto out;
2860
2861 entry = rb_entry(rb_first(&ctl->free_space_offset),
2862 struct btrfs_free_space, offset_index);
2863
2864 if (!entry->bitmap) {
2865 ino = entry->offset;
2866
2867 unlink_free_space(ctl, entry);
2868 entry->offset++;
2869 entry->bytes--;
2870 if (!entry->bytes)
2871 kmem_cache_free(btrfs_free_space_cachep, entry);
2872 else
2873 link_free_space(ctl, entry);
2874 } else {
2875 u64 offset = 0;
2876 u64 count = 1;
2877 int ret;
2878
2879 ret = search_bitmap(ctl, entry, &offset, &count);
Jeff Mahoney79787ea2012-03-12 16:03:00 +01002880 /* Logic error; Should be empty if it can't find anything */
Josef Bacikb12d6862013-08-26 17:14:08 -04002881 ASSERT(!ret);
Li Zefan581bb052011-04-20 10:06:11 +08002882
2883 ino = offset;
2884 bitmap_clear_bits(ctl, entry, offset, 1);
2885 if (entry->bytes == 0)
2886 free_bitmap(ctl, entry);
2887 }
2888out:
2889 spin_unlock(&ctl->tree_lock);
2890
2891 return ino;
2892}
Li Zefan82d59022011-04-20 10:33:24 +08002893
2894struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2895 struct btrfs_path *path)
2896{
2897 struct inode *inode = NULL;
2898
2899 spin_lock(&root->cache_lock);
2900 if (root->cache_inode)
2901 inode = igrab(root->cache_inode);
2902 spin_unlock(&root->cache_lock);
2903 if (inode)
2904 return inode;
2905
2906 inode = __lookup_free_space_inode(root, path, 0);
2907 if (IS_ERR(inode))
2908 return inode;
2909
2910 spin_lock(&root->cache_lock);
David Sterba7841cb22011-05-31 18:07:27 +02002911 if (!btrfs_fs_closing(root->fs_info))
Li Zefan82d59022011-04-20 10:33:24 +08002912 root->cache_inode = igrab(inode);
2913 spin_unlock(&root->cache_lock);
2914
2915 return inode;
2916}
2917
2918int create_free_ino_inode(struct btrfs_root *root,
2919 struct btrfs_trans_handle *trans,
2920 struct btrfs_path *path)
2921{
2922 return __create_free_space_inode(root, trans, path,
2923 BTRFS_FREE_INO_OBJECTID, 0);
2924}
2925
2926int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2927{
2928 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2929 struct btrfs_path *path;
2930 struct inode *inode;
2931 int ret = 0;
2932 u64 root_gen = btrfs_root_generation(&root->root_item);
2933
Chris Mason4b9465c2011-06-03 09:36:29 -04002934 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2935 return 0;
2936
Li Zefan82d59022011-04-20 10:33:24 +08002937 /*
2938 * If we're unmounting then just return, since this does a search on the
2939 * normal root and not the commit root and we could deadlock.
2940 */
David Sterba7841cb22011-05-31 18:07:27 +02002941 if (btrfs_fs_closing(fs_info))
Li Zefan82d59022011-04-20 10:33:24 +08002942 return 0;
2943
2944 path = btrfs_alloc_path();
2945 if (!path)
2946 return 0;
2947
2948 inode = lookup_free_ino_inode(root, path);
2949 if (IS_ERR(inode))
2950 goto out;
2951
2952 if (root_gen != BTRFS_I(inode)->generation)
2953 goto out_put;
2954
2955 ret = __load_free_space_cache(root, inode, ctl, path, 0);
2956
2957 if (ret < 0)
Simon Kirbyc2cf52e2013-03-19 22:41:23 +00002958 btrfs_err(fs_info,
2959 "failed to load free ino cache for root %llu",
2960 root->root_key.objectid);
Li Zefan82d59022011-04-20 10:33:24 +08002961out_put:
2962 iput(inode);
2963out:
2964 btrfs_free_path(path);
2965 return ret;
2966}
2967
2968int btrfs_write_out_ino_cache(struct btrfs_root *root,
2969 struct btrfs_trans_handle *trans,
2970 struct btrfs_path *path)
2971{
2972 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2973 struct inode *inode;
2974 int ret;
2975
Chris Mason4b9465c2011-06-03 09:36:29 -04002976 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2977 return 0;
2978
Li Zefan82d59022011-04-20 10:33:24 +08002979 inode = lookup_free_ino_inode(root, path);
2980 if (IS_ERR(inode))
2981 return 0;
2982
2983 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
Josef Bacikc09544e2011-08-30 10:19:10 -04002984 if (ret) {
2985 btrfs_delalloc_release_metadata(inode, inode->i_size);
2986#ifdef DEBUG
Simon Kirbyc2cf52e2013-03-19 22:41:23 +00002987 btrfs_err(root->fs_info,
2988 "failed to write free ino cache for root %llu",
2989 root->root_key.objectid);
Josef Bacikc09544e2011-08-30 10:19:10 -04002990#endif
2991 }
Li Zefan82d59022011-04-20 10:33:24 +08002992
2993 iput(inode);
2994 return ret;
2995}
Josef Bacik74255aa2013-03-15 09:47:08 -04002996
2997#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04002998/*
2999 * Use this if you need to make a bitmap or extent entry specifically, it
3000 * doesn't do any of the merging that add_free_space does, this acts a lot like
3001 * how the free space cache loading stuff works, so you can get really weird
3002 * configurations.
3003 */
3004int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
3005 u64 offset, u64 bytes, bool bitmap)
Josef Bacik74255aa2013-03-15 09:47:08 -04003006{
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003007 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3008 struct btrfs_free_space *info = NULL, *bitmap_info;
3009 void *map = NULL;
3010 u64 bytes_added;
3011 int ret;
Josef Bacik74255aa2013-03-15 09:47:08 -04003012
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003013again:
3014 if (!info) {
3015 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
3016 if (!info)
3017 return -ENOMEM;
Josef Bacik74255aa2013-03-15 09:47:08 -04003018 }
3019
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003020 if (!bitmap) {
3021 spin_lock(&ctl->tree_lock);
3022 info->offset = offset;
3023 info->bytes = bytes;
3024 ret = link_free_space(ctl, info);
3025 spin_unlock(&ctl->tree_lock);
3026 if (ret)
3027 kmem_cache_free(btrfs_free_space_cachep, info);
3028 return ret;
3029 }
Josef Bacik74255aa2013-03-15 09:47:08 -04003030
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003031 if (!map) {
3032 map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
3033 if (!map) {
3034 kmem_cache_free(btrfs_free_space_cachep, info);
3035 return -ENOMEM;
3036 }
3037 }
Josef Bacik74255aa2013-03-15 09:47:08 -04003038
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003039 spin_lock(&ctl->tree_lock);
3040 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3041 1, 0);
3042 if (!bitmap_info) {
3043 info->bitmap = map;
3044 map = NULL;
3045 add_new_bitmap(ctl, info, offset);
3046 bitmap_info = info;
3047 }
Josef Bacik74255aa2013-03-15 09:47:08 -04003048
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003049 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3050 bytes -= bytes_added;
3051 offset += bytes_added;
3052 spin_unlock(&ctl->tree_lock);
3053
3054 if (bytes)
3055 goto again;
3056
3057 if (map)
3058 kfree(map);
3059 return 0;
Josef Bacik74255aa2013-03-15 09:47:08 -04003060}
3061
3062/*
3063 * Checks to see if the given range is in the free space cache. This is really
3064 * just used to check the absence of space, so if there is free space in the
3065 * range at all we will return 1.
3066 */
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003067int test_check_exists(struct btrfs_block_group_cache *cache,
3068 u64 offset, u64 bytes)
Josef Bacik74255aa2013-03-15 09:47:08 -04003069{
3070 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3071 struct btrfs_free_space *info;
3072 int ret = 0;
3073
3074 spin_lock(&ctl->tree_lock);
3075 info = tree_search_offset(ctl, offset, 0, 0);
3076 if (!info) {
3077 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3078 1, 0);
3079 if (!info)
3080 goto out;
3081 }
3082
3083have_info:
3084 if (info->bitmap) {
3085 u64 bit_off, bit_bytes;
3086 struct rb_node *n;
3087 struct btrfs_free_space *tmp;
3088
3089 bit_off = offset;
3090 bit_bytes = ctl->unit;
3091 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes);
3092 if (!ret) {
3093 if (bit_off == offset) {
3094 ret = 1;
3095 goto out;
3096 } else if (bit_off > offset &&
3097 offset + bytes > bit_off) {
3098 ret = 1;
3099 goto out;
3100 }
3101 }
3102
3103 n = rb_prev(&info->offset_index);
3104 while (n) {
3105 tmp = rb_entry(n, struct btrfs_free_space,
3106 offset_index);
3107 if (tmp->offset + tmp->bytes < offset)
3108 break;
3109 if (offset + bytes < tmp->offset) {
3110 n = rb_prev(&info->offset_index);
3111 continue;
3112 }
3113 info = tmp;
3114 goto have_info;
3115 }
3116
3117 n = rb_next(&info->offset_index);
3118 while (n) {
3119 tmp = rb_entry(n, struct btrfs_free_space,
3120 offset_index);
3121 if (offset + bytes < tmp->offset)
3122 break;
3123 if (tmp->offset + tmp->bytes < offset) {
3124 n = rb_next(&info->offset_index);
3125 continue;
3126 }
3127 info = tmp;
3128 goto have_info;
3129 }
3130
3131 goto out;
3132 }
3133
3134 if (info->offset == offset) {
3135 ret = 1;
3136 goto out;
3137 }
3138
3139 if (offset > info->offset && offset < info->offset + info->bytes)
3140 ret = 1;
3141out:
3142 spin_unlock(&ctl->tree_lock);
3143 return ret;
3144}
Josef Bacikdc11dd5d2013-08-14 15:05:12 -04003145#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */