blob: 669b58201e36881fb1434d76fd787a45329f042e [file] [log] [blame]
Josef Bacik294e30f2013-10-09 12:00:56 -04001/*
2 * Copyright (C) 2013 Fusion IO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/pagemap.h>
20#include <linux/sched.h>
Omar Sandoval0f331222015-09-29 20:50:31 -070021#include <linux/slab.h>
Byongho Leeee221842015-12-15 01:42:10 +090022#include <linux/sizes.h>
Josef Bacik294e30f2013-10-09 12:00:56 -040023#include "btrfs-tests.h"
24#include "../extent_io.h"
25
26#define PROCESS_UNLOCK (1 << 0)
27#define PROCESS_RELEASE (1 << 1)
28#define PROCESS_TEST_LOCKED (1 << 2)
29
30static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
31 unsigned long flags)
32{
33 int ret;
34 struct page *pages[16];
35 unsigned long index = start >> PAGE_CACHE_SHIFT;
36 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
37 unsigned long nr_pages = end_index - index + 1;
38 int i;
39 int count = 0;
40 int loops = 0;
41
42 while (nr_pages > 0) {
43 ret = find_get_pages_contig(inode->i_mapping, index,
44 min_t(unsigned long, nr_pages,
45 ARRAY_SIZE(pages)), pages);
46 for (i = 0; i < ret; i++) {
47 if (flags & PROCESS_TEST_LOCKED &&
48 !PageLocked(pages[i]))
49 count++;
50 if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
51 unlock_page(pages[i]);
52 page_cache_release(pages[i]);
53 if (flags & PROCESS_RELEASE)
54 page_cache_release(pages[i]);
55 }
56 nr_pages -= ret;
57 index += ret;
58 cond_resched();
59 loops++;
60 if (loops > 100000) {
61 printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
62 break;
63 }
64 }
65 return count;
66}
67
68static int test_find_delalloc(void)
69{
70 struct inode *inode;
71 struct extent_io_tree tmp;
72 struct page *page;
73 struct page *locked_page = NULL;
74 unsigned long index = 0;
Byongho Leeee221842015-12-15 01:42:10 +090075 u64 total_dirty = SZ_256M;
76 u64 max_bytes = SZ_128M;
Josef Bacik294e30f2013-10-09 12:00:56 -040077 u64 start, end, test_start;
78 u64 found;
79 int ret = -EINVAL;
80
Omar Sandoval0f331222015-09-29 20:50:31 -070081 test_msg("Running find delalloc tests\n");
82
Josef Bacik294e30f2013-10-09 12:00:56 -040083 inode = btrfs_new_test_inode();
84 if (!inode) {
85 test_msg("Failed to allocate test inode\n");
86 return -ENOMEM;
87 }
88
89 extent_io_tree_init(&tmp, &inode->i_data);
90
91 /*
92 * First go through and create and mark all of our pages dirty, we pin
93 * everything to make sure our pages don't get evicted and screw up our
94 * test.
95 */
96 for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
David Sterba8cce83b2016-01-22 10:28:24 +010097 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
Josef Bacik294e30f2013-10-09 12:00:56 -040098 if (!page) {
99 test_msg("Failed to allocate test page\n");
100 ret = -ENOMEM;
101 goto out;
102 }
103 SetPageDirty(page);
104 if (index) {
105 unlock_page(page);
106 } else {
107 page_cache_get(page);
108 locked_page = page;
109 }
110 }
111
112 /* Test this scenario
113 * |--- delalloc ---|
114 * |--- search ---|
115 */
David Sterba8cce83b2016-01-22 10:28:24 +0100116 set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
Josef Bacik294e30f2013-10-09 12:00:56 -0400117 start = 0;
118 end = 0;
119 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
120 &end, max_bytes);
121 if (!found) {
122 test_msg("Should have found at least one delalloc\n");
123 goto out_bits;
124 }
125 if (start != 0 || end != 4095) {
126 test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n",
127 start, end);
128 goto out_bits;
129 }
130 unlock_extent(&tmp, start, end);
131 unlock_page(locked_page);
132 page_cache_release(locked_page);
133
134 /*
135 * Test this scenario
136 *
137 * |--- delalloc ---|
138 * |--- search ---|
139 */
Byongho Leeee221842015-12-15 01:42:10 +0900140 test_start = SZ_64M;
Josef Bacik294e30f2013-10-09 12:00:56 -0400141 locked_page = find_lock_page(inode->i_mapping,
142 test_start >> PAGE_CACHE_SHIFT);
143 if (!locked_page) {
144 test_msg("Couldn't find the locked page\n");
145 goto out_bits;
146 }
David Sterba8cce83b2016-01-22 10:28:24 +0100147 set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
Josef Bacik294e30f2013-10-09 12:00:56 -0400148 start = test_start;
149 end = 0;
150 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
151 &end, max_bytes);
152 if (!found) {
153 test_msg("Couldn't find delalloc in our range\n");
154 goto out_bits;
155 }
156 if (start != test_start || end != max_bytes - 1) {
157 test_msg("Expected start %Lu end %Lu, got start %Lu, end "
158 "%Lu\n", test_start, max_bytes - 1, start, end);
159 goto out_bits;
160 }
161 if (process_page_range(inode, start, end,
162 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
163 test_msg("There were unlocked pages in the range\n");
164 goto out_bits;
165 }
166 unlock_extent(&tmp, start, end);
167 /* locked_page was unlocked above */
168 page_cache_release(locked_page);
169
170 /*
171 * Test this scenario
172 * |--- delalloc ---|
173 * |--- search ---|
174 */
175 test_start = max_bytes + 4096;
176 locked_page = find_lock_page(inode->i_mapping, test_start >>
177 PAGE_CACHE_SHIFT);
178 if (!locked_page) {
179 test_msg("Could'nt find the locked page\n");
180 goto out_bits;
181 }
182 start = test_start;
183 end = 0;
184 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
185 &end, max_bytes);
186 if (found) {
187 test_msg("Found range when we shouldn't have\n");
188 goto out_bits;
189 }
190 if (end != (u64)-1) {
191 test_msg("Did not return the proper end offset\n");
192 goto out_bits;
193 }
194
195 /*
196 * Test this scenario
197 * [------- delalloc -------|
198 * [max_bytes]|-- search--|
199 *
200 * We are re-using our test_start from above since it works out well.
201 */
David Sterba8cce83b2016-01-22 10:28:24 +0100202 set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
Josef Bacik294e30f2013-10-09 12:00:56 -0400203 start = test_start;
204 end = 0;
205 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
206 &end, max_bytes);
207 if (!found) {
208 test_msg("Didn't find our range\n");
209 goto out_bits;
210 }
211 if (start != test_start || end != total_dirty - 1) {
212 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
213 test_start, total_dirty - 1, start, end);
214 goto out_bits;
215 }
216 if (process_page_range(inode, start, end,
217 PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
218 test_msg("Pages in range were not all locked\n");
219 goto out_bits;
220 }
221 unlock_extent(&tmp, start, end);
222
223 /*
224 * Now to test where we run into a page that is no longer dirty in the
225 * range we want to find.
226 */
Byongho Leeee221842015-12-15 01:42:10 +0900227 page = find_get_page(inode->i_mapping,
228 (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT);
Josef Bacik294e30f2013-10-09 12:00:56 -0400229 if (!page) {
230 test_msg("Couldn't find our page\n");
231 goto out_bits;
232 }
233 ClearPageDirty(page);
234 page_cache_release(page);
235
236 /* We unlocked it in the previous test */
237 lock_page(locked_page);
238 start = test_start;
239 end = 0;
240 /*
241 * Currently if we fail to find dirty pages in the delalloc range we
242 * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If
243 * this changes at any point in the future we will need to fix this
244 * tests expected behavior.
245 */
246 found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
247 &end, max_bytes);
248 if (!found) {
249 test_msg("Didn't find our range\n");
250 goto out_bits;
251 }
252 if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) {
253 test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
254 test_start, test_start + PAGE_CACHE_SIZE - 1, start,
255 end);
256 goto out_bits;
257 }
258 if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
259 PROCESS_UNLOCK)) {
260 test_msg("Pages in range were not all locked\n");
261 goto out_bits;
262 }
263 ret = 0;
264out_bits:
David Sterba8cce83b2016-01-22 10:28:24 +0100265 clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
Josef Bacik294e30f2013-10-09 12:00:56 -0400266out:
267 if (locked_page)
268 page_cache_release(locked_page);
269 process_page_range(inode, 0, total_dirty - 1,
270 PROCESS_UNLOCK | PROCESS_RELEASE);
271 iput(inode);
272 return ret;
273}
274
Omar Sandoval0f331222015-09-29 20:50:31 -0700275static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
276 unsigned long len)
277{
278 unsigned long i, x;
279
280 memset(bitmap, 0, len);
281 memset_extent_buffer(eb, 0, 0, len);
282 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
283 test_msg("Bitmap was not zeroed\n");
284 return -EINVAL;
285 }
286
287 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
288 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
289 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
290 test_msg("Setting all bits failed\n");
291 return -EINVAL;
292 }
293
294 bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
295 extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
296 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
297 test_msg("Clearing all bits failed\n");
298 return -EINVAL;
299 }
300
301 bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
302 sizeof(long) * BITS_PER_BYTE);
303 extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
304 sizeof(long) * BITS_PER_BYTE);
305 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
306 test_msg("Setting straddling pages failed\n");
307 return -EINVAL;
308 }
309
310 bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
311 bitmap_clear(bitmap,
312 (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
313 sizeof(long) * BITS_PER_BYTE);
314 extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
315 extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0,
316 sizeof(long) * BITS_PER_BYTE);
317 if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
318 test_msg("Clearing straddling pages failed\n");
319 return -EINVAL;
320 }
321
322 /*
323 * Generate a wonky pseudo-random bit pattern for the sake of not using
324 * something repetitive that could miss some hypothetical off-by-n bug.
325 */
326 x = 0;
327 for (i = 0; i < len / sizeof(long); i++) {
328 x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
329 bitmap[i] = x;
330 }
331 write_extent_buffer(eb, bitmap, 0, len);
332
333 for (i = 0; i < len * BITS_PER_BYTE; i++) {
334 int bit, bit1;
335
336 bit = !!test_bit(i, bitmap);
337 bit1 = !!extent_buffer_test_bit(eb, 0, i);
338 if (bit1 != bit) {
339 test_msg("Testing bit pattern failed\n");
340 return -EINVAL;
341 }
342
343 bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
344 i % BITS_PER_BYTE);
345 if (bit1 != bit) {
346 test_msg("Testing bit pattern with offset failed\n");
347 return -EINVAL;
348 }
349 }
350
351 return 0;
352}
353
354static int test_eb_bitmaps(void)
355{
356 unsigned long len = PAGE_CACHE_SIZE * 4;
357 unsigned long *bitmap;
358 struct extent_buffer *eb;
359 int ret;
360
361 test_msg("Running extent buffer bitmap tests\n");
362
David Sterba8cce83b2016-01-22 10:28:24 +0100363 bitmap = kmalloc(len, GFP_KERNEL);
Omar Sandoval0f331222015-09-29 20:50:31 -0700364 if (!bitmap) {
365 test_msg("Couldn't allocate test bitmap\n");
366 return -ENOMEM;
367 }
368
369 eb = __alloc_dummy_extent_buffer(NULL, 0, len);
370 if (!eb) {
371 test_msg("Couldn't allocate test extent buffer\n");
372 kfree(bitmap);
373 return -ENOMEM;
374 }
375
376 ret = __test_eb_bitmaps(bitmap, eb, len);
377 if (ret)
378 goto out;
379
380 /* Do it over again with an extent buffer which isn't page-aligned. */
381 free_extent_buffer(eb);
382 eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len);
383 if (!eb) {
384 test_msg("Couldn't allocate test extent buffer\n");
385 kfree(bitmap);
386 return -ENOMEM;
387 }
388
389 ret = __test_eb_bitmaps(bitmap, eb, len);
390out:
391 free_extent_buffer(eb);
392 kfree(bitmap);
393 return ret;
394}
395
Josef Bacik294e30f2013-10-09 12:00:56 -0400396int btrfs_test_extent_io(void)
397{
Omar Sandoval0f331222015-09-29 20:50:31 -0700398 int ret;
399
400 test_msg("Running extent I/O tests\n");
401
402 ret = test_find_delalloc();
403 if (ret)
404 goto out;
405
406 ret = test_eb_bitmaps();
407out:
408 test_msg("Extent I/O tests finished\n");
409 return ret;
Josef Bacik294e30f2013-10-09 12:00:56 -0400410}