David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2013 Fusion IO. All rights reserved. |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/pagemap.h> |
| 7 | #include <linux/sched.h> |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 8 | #include <linux/slab.h> |
Byongho Lee | ee22184 | 2015-12-15 01:42:10 +0900 | [diff] [blame] | 9 | #include <linux/sizes.h> |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 10 | #include "btrfs-tests.h" |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 11 | #include "../ctree.h" |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 12 | #include "../extent_io.h" |
| 13 | |
| 14 | #define PROCESS_UNLOCK (1 << 0) |
| 15 | #define PROCESS_RELEASE (1 << 1) |
| 16 | #define PROCESS_TEST_LOCKED (1 << 2) |
| 17 | |
| 18 | static noinline int process_page_range(struct inode *inode, u64 start, u64 end, |
| 19 | unsigned long flags) |
| 20 | { |
| 21 | int ret; |
| 22 | struct page *pages[16]; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 23 | unsigned long index = start >> PAGE_SHIFT; |
| 24 | unsigned long end_index = end >> PAGE_SHIFT; |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 25 | unsigned long nr_pages = end_index - index + 1; |
| 26 | int i; |
| 27 | int count = 0; |
| 28 | int loops = 0; |
| 29 | |
| 30 | while (nr_pages > 0) { |
| 31 | ret = find_get_pages_contig(inode->i_mapping, index, |
| 32 | min_t(unsigned long, nr_pages, |
| 33 | ARRAY_SIZE(pages)), pages); |
| 34 | for (i = 0; i < ret; i++) { |
| 35 | if (flags & PROCESS_TEST_LOCKED && |
| 36 | !PageLocked(pages[i])) |
| 37 | count++; |
| 38 | if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) |
| 39 | unlock_page(pages[i]); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 40 | put_page(pages[i]); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 41 | if (flags & PROCESS_RELEASE) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 42 | put_page(pages[i]); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 43 | } |
| 44 | nr_pages -= ret; |
| 45 | index += ret; |
| 46 | cond_resched(); |
| 47 | loops++; |
| 48 | if (loops > 100000) { |
| 49 | printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret); |
| 50 | break; |
| 51 | } |
| 52 | } |
| 53 | return count; |
| 54 | } |
| 55 | |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 56 | static int test_find_delalloc(u32 sectorsize) |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 57 | { |
| 58 | struct inode *inode; |
| 59 | struct extent_io_tree tmp; |
| 60 | struct page *page; |
| 61 | struct page *locked_page = NULL; |
| 62 | unsigned long index = 0; |
Byongho Lee | ee22184 | 2015-12-15 01:42:10 +0900 | [diff] [blame] | 63 | u64 total_dirty = SZ_256M; |
| 64 | u64 max_bytes = SZ_128M; |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 65 | u64 start, end, test_start; |
| 66 | u64 found; |
| 67 | int ret = -EINVAL; |
| 68 | |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 69 | test_msg("Running find delalloc tests\n"); |
| 70 | |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 71 | inode = btrfs_new_test_inode(); |
| 72 | if (!inode) { |
| 73 | test_msg("Failed to allocate test inode\n"); |
| 74 | return -ENOMEM; |
| 75 | } |
| 76 | |
Josef Bacik | c6100a4 | 2017-05-05 11:57:13 -0400 | [diff] [blame] | 77 | extent_io_tree_init(&tmp, inode); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 78 | |
| 79 | /* |
| 80 | * First go through and create and mark all of our pages dirty, we pin |
| 81 | * everything to make sure our pages don't get evicted and screw up our |
| 82 | * test. |
| 83 | */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 84 | for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { |
David Sterba | 8cce83b | 2016-01-22 10:28:24 +0100 | [diff] [blame] | 85 | page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 86 | if (!page) { |
| 87 | test_msg("Failed to allocate test page\n"); |
| 88 | ret = -ENOMEM; |
| 89 | goto out; |
| 90 | } |
| 91 | SetPageDirty(page); |
| 92 | if (index) { |
| 93 | unlock_page(page); |
| 94 | } else { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 95 | get_page(page); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 96 | locked_page = page; |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | /* Test this scenario |
| 101 | * |--- delalloc ---| |
| 102 | * |--- search ---| |
| 103 | */ |
Filipe Manana | e3b8a48 | 2017-11-04 00:16:59 +0000 | [diff] [blame] | 104 | set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 105 | start = 0; |
| 106 | end = 0; |
| 107 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| 108 | &end, max_bytes); |
| 109 | if (!found) { |
| 110 | test_msg("Should have found at least one delalloc\n"); |
| 111 | goto out_bits; |
| 112 | } |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 113 | if (start != 0 || end != (sectorsize - 1)) { |
| 114 | test_msg("Expected start 0 end %u, got start %llu end %llu\n", |
| 115 | sectorsize - 1, start, end); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 116 | goto out_bits; |
| 117 | } |
| 118 | unlock_extent(&tmp, start, end); |
| 119 | unlock_page(locked_page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 120 | put_page(locked_page); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 121 | |
| 122 | /* |
| 123 | * Test this scenario |
| 124 | * |
| 125 | * |--- delalloc ---| |
| 126 | * |--- search ---| |
| 127 | */ |
Byongho Lee | ee22184 | 2015-12-15 01:42:10 +0900 | [diff] [blame] | 128 | test_start = SZ_64M; |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 129 | locked_page = find_lock_page(inode->i_mapping, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 130 | test_start >> PAGE_SHIFT); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 131 | if (!locked_page) { |
| 132 | test_msg("Couldn't find the locked page\n"); |
| 133 | goto out_bits; |
| 134 | } |
Filipe Manana | e3b8a48 | 2017-11-04 00:16:59 +0000 | [diff] [blame] | 135 | set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 136 | start = test_start; |
| 137 | end = 0; |
| 138 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| 139 | &end, max_bytes); |
| 140 | if (!found) { |
| 141 | test_msg("Couldn't find delalloc in our range\n"); |
| 142 | goto out_bits; |
| 143 | } |
| 144 | if (start != test_start || end != max_bytes - 1) { |
| 145 | test_msg("Expected start %Lu end %Lu, got start %Lu, end " |
| 146 | "%Lu\n", test_start, max_bytes - 1, start, end); |
| 147 | goto out_bits; |
| 148 | } |
| 149 | if (process_page_range(inode, start, end, |
| 150 | PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { |
| 151 | test_msg("There were unlocked pages in the range\n"); |
| 152 | goto out_bits; |
| 153 | } |
| 154 | unlock_extent(&tmp, start, end); |
| 155 | /* locked_page was unlocked above */ |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 156 | put_page(locked_page); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 157 | |
| 158 | /* |
| 159 | * Test this scenario |
| 160 | * |--- delalloc ---| |
| 161 | * |--- search ---| |
| 162 | */ |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 163 | test_start = max_bytes + sectorsize; |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 164 | locked_page = find_lock_page(inode->i_mapping, test_start >> |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 165 | PAGE_SHIFT); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 166 | if (!locked_page) { |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 167 | test_msg("Couldn't find the locked page\n"); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 168 | goto out_bits; |
| 169 | } |
| 170 | start = test_start; |
| 171 | end = 0; |
| 172 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| 173 | &end, max_bytes); |
| 174 | if (found) { |
| 175 | test_msg("Found range when we shouldn't have\n"); |
| 176 | goto out_bits; |
| 177 | } |
| 178 | if (end != (u64)-1) { |
| 179 | test_msg("Did not return the proper end offset\n"); |
| 180 | goto out_bits; |
| 181 | } |
| 182 | |
| 183 | /* |
| 184 | * Test this scenario |
| 185 | * [------- delalloc -------| |
| 186 | * [max_bytes]|-- search--| |
| 187 | * |
| 188 | * We are re-using our test_start from above since it works out well. |
| 189 | */ |
Filipe Manana | e3b8a48 | 2017-11-04 00:16:59 +0000 | [diff] [blame] | 190 | set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 191 | start = test_start; |
| 192 | end = 0; |
| 193 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| 194 | &end, max_bytes); |
| 195 | if (!found) { |
| 196 | test_msg("Didn't find our range\n"); |
| 197 | goto out_bits; |
| 198 | } |
| 199 | if (start != test_start || end != total_dirty - 1) { |
| 200 | test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", |
| 201 | test_start, total_dirty - 1, start, end); |
| 202 | goto out_bits; |
| 203 | } |
| 204 | if (process_page_range(inode, start, end, |
| 205 | PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) { |
| 206 | test_msg("Pages in range were not all locked\n"); |
| 207 | goto out_bits; |
| 208 | } |
| 209 | unlock_extent(&tmp, start, end); |
| 210 | |
| 211 | /* |
| 212 | * Now to test where we run into a page that is no longer dirty in the |
| 213 | * range we want to find. |
| 214 | */ |
Byongho Lee | ee22184 | 2015-12-15 01:42:10 +0900 | [diff] [blame] | 215 | page = find_get_page(inode->i_mapping, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 216 | (max_bytes + SZ_1M) >> PAGE_SHIFT); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 217 | if (!page) { |
| 218 | test_msg("Couldn't find our page\n"); |
| 219 | goto out_bits; |
| 220 | } |
| 221 | ClearPageDirty(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 222 | put_page(page); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 223 | |
| 224 | /* We unlocked it in the previous test */ |
| 225 | lock_page(locked_page); |
| 226 | start = test_start; |
| 227 | end = 0; |
| 228 | /* |
| 229 | * Currently if we fail to find dirty pages in the delalloc range we |
Kirill A. Shutemov | ea1754a | 2016-04-01 15:29:48 +0300 | [diff] [blame] | 230 | * will adjust max_bytes down to PAGE_SIZE and then re-search. If |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 231 | * this changes at any point in the future we will need to fix this |
| 232 | * tests expected behavior. |
| 233 | */ |
| 234 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| 235 | &end, max_bytes); |
| 236 | if (!found) { |
| 237 | test_msg("Didn't find our range\n"); |
| 238 | goto out_bits; |
| 239 | } |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 240 | if (start != test_start && end != test_start + PAGE_SIZE - 1) { |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 241 | test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 242 | test_start, test_start + PAGE_SIZE - 1, start, |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 243 | end); |
| 244 | goto out_bits; |
| 245 | } |
| 246 | if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED | |
| 247 | PROCESS_UNLOCK)) { |
| 248 | test_msg("Pages in range were not all locked\n"); |
| 249 | goto out_bits; |
| 250 | } |
| 251 | ret = 0; |
| 252 | out_bits: |
David Sterba | 9116621 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 253 | clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 254 | out: |
| 255 | if (locked_page) |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 256 | put_page(locked_page); |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 257 | process_page_range(inode, 0, total_dirty - 1, |
| 258 | PROCESS_UNLOCK | PROCESS_RELEASE); |
| 259 | iput(inode); |
| 260 | return ret; |
| 261 | } |
| 262 | |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 263 | static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb, |
| 264 | unsigned long len) |
Feifei Xu | 34b3e6c | 2016-06-01 19:18:30 +0800 | [diff] [blame] | 265 | { |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 266 | unsigned long i; |
| 267 | |
| 268 | for (i = 0; i < len * BITS_PER_BYTE; i++) { |
| 269 | int bit, bit1; |
| 270 | |
| 271 | bit = !!test_bit(i, bitmap); |
| 272 | bit1 = !!extent_buffer_test_bit(eb, 0, i); |
| 273 | if (bit1 != bit) { |
| 274 | test_msg("Bits do not match\n"); |
| 275 | return -EINVAL; |
| 276 | } |
| 277 | |
| 278 | bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE, |
| 279 | i % BITS_PER_BYTE); |
| 280 | if (bit1 != bit) { |
| 281 | test_msg("Offset bits do not match\n"); |
| 282 | return -EINVAL; |
| 283 | } |
| 284 | } |
| 285 | return 0; |
Feifei Xu | 34b3e6c | 2016-06-01 19:18:30 +0800 | [diff] [blame] | 286 | } |
| 287 | |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 288 | static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, |
| 289 | unsigned long len) |
| 290 | { |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 291 | unsigned long i, j; |
| 292 | u32 x; |
| 293 | int ret; |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 294 | |
| 295 | memset(bitmap, 0, len); |
David Sterba | b159fa2 | 2016-11-08 18:09:03 +0100 | [diff] [blame] | 296 | memzero_extent_buffer(eb, 0, len); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 297 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { |
| 298 | test_msg("Bitmap was not zeroed\n"); |
| 299 | return -EINVAL; |
| 300 | } |
| 301 | |
| 302 | bitmap_set(bitmap, 0, len * BITS_PER_BYTE); |
| 303 | extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 304 | ret = check_eb_bitmap(bitmap, eb, len); |
| 305 | if (ret) { |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 306 | test_msg("Setting all bits failed\n"); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 307 | return ret; |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | bitmap_clear(bitmap, 0, len * BITS_PER_BYTE); |
| 311 | extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 312 | ret = check_eb_bitmap(bitmap, eb, len); |
| 313 | if (ret) { |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 314 | test_msg("Clearing all bits failed\n"); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 315 | return ret; |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 316 | } |
| 317 | |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 318 | /* Straddling pages test */ |
| 319 | if (len > PAGE_SIZE) { |
| 320 | bitmap_set(bitmap, |
| 321 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, |
| 322 | sizeof(long) * BITS_PER_BYTE); |
| 323 | extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, |
| 324 | sizeof(long) * BITS_PER_BYTE); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 325 | ret = check_eb_bitmap(bitmap, eb, len); |
| 326 | if (ret) { |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 327 | test_msg("Setting straddling pages failed\n"); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 328 | return ret; |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 329 | } |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 330 | |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 331 | bitmap_set(bitmap, 0, len * BITS_PER_BYTE); |
| 332 | bitmap_clear(bitmap, |
| 333 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, |
| 334 | sizeof(long) * BITS_PER_BYTE); |
| 335 | extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); |
| 336 | extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, |
| 337 | sizeof(long) * BITS_PER_BYTE); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 338 | ret = check_eb_bitmap(bitmap, eb, len); |
| 339 | if (ret) { |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 340 | test_msg("Clearing straddling pages failed\n"); |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 341 | return ret; |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 342 | } |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | /* |
| 346 | * Generate a wonky pseudo-random bit pattern for the sake of not using |
| 347 | * something repetitive that could miss some hypothetical off-by-n bug. |
| 348 | */ |
| 349 | x = 0; |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 350 | bitmap_clear(bitmap, 0, len * BITS_PER_BYTE); |
| 351 | extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE); |
| 352 | for (i = 0; i < len * BITS_PER_BYTE / 32; i++) { |
| 353 | x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU; |
| 354 | for (j = 0; j < 32; j++) { |
| 355 | if (x & (1U << j)) { |
| 356 | bitmap_set(bitmap, i * 32 + j, 1); |
| 357 | extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1); |
| 358 | } |
| 359 | } |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 360 | } |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 361 | |
Omar Sandoval | 9426ce7 | 2016-09-22 17:24:23 -0700 | [diff] [blame] | 362 | ret = check_eb_bitmap(bitmap, eb, len); |
| 363 | if (ret) { |
| 364 | test_msg("Random bit pattern failed\n"); |
| 365 | return ret; |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 371 | static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 372 | { |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 373 | struct btrfs_fs_info *fs_info; |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 374 | unsigned long len; |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 375 | unsigned long *bitmap; |
| 376 | struct extent_buffer *eb; |
| 377 | int ret; |
| 378 | |
| 379 | test_msg("Running extent buffer bitmap tests\n"); |
Feifei Xu | ed9e4af | 2016-06-01 19:18:26 +0800 | [diff] [blame] | 380 | |
| 381 | /* |
| 382 | * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than |
| 383 | * BTRFS_MAX_METADATA_BLOCKSIZE. |
| 384 | */ |
| 385 | len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE) |
| 386 | ? sectorsize * 4 : sectorsize; |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 387 | |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 388 | fs_info = btrfs_alloc_dummy_fs_info(len, len); |
| 389 | |
David Sterba | 8cce83b | 2016-01-22 10:28:24 +0100 | [diff] [blame] | 390 | bitmap = kmalloc(len, GFP_KERNEL); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 391 | if (!bitmap) { |
| 392 | test_msg("Couldn't allocate test bitmap\n"); |
| 393 | return -ENOMEM; |
| 394 | } |
| 395 | |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 396 | eb = __alloc_dummy_extent_buffer(fs_info, 0, len); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 397 | if (!eb) { |
| 398 | test_msg("Couldn't allocate test extent buffer\n"); |
| 399 | kfree(bitmap); |
| 400 | return -ENOMEM; |
| 401 | } |
| 402 | |
| 403 | ret = __test_eb_bitmaps(bitmap, eb, len); |
| 404 | if (ret) |
| 405 | goto out; |
| 406 | |
| 407 | /* Do it over again with an extent buffer which isn't page-aligned. */ |
| 408 | free_extent_buffer(eb); |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 409 | eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 410 | if (!eb) { |
| 411 | test_msg("Couldn't allocate test extent buffer\n"); |
| 412 | kfree(bitmap); |
| 413 | return -ENOMEM; |
| 414 | } |
| 415 | |
| 416 | ret = __test_eb_bitmaps(bitmap, eb, len); |
| 417 | out: |
| 418 | free_extent_buffer(eb); |
| 419 | kfree(bitmap); |
| 420 | return ret; |
| 421 | } |
| 422 | |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 423 | int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 424 | { |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 425 | int ret; |
| 426 | |
| 427 | test_msg("Running extent I/O tests\n"); |
| 428 | |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 429 | ret = test_find_delalloc(sectorsize); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 430 | if (ret) |
| 431 | goto out; |
| 432 | |
Feifei Xu | b9ef22d | 2016-06-01 19:18:25 +0800 | [diff] [blame] | 433 | ret = test_eb_bitmaps(sectorsize, nodesize); |
Omar Sandoval | 0f33122 | 2015-09-29 20:50:31 -0700 | [diff] [blame] | 434 | out: |
| 435 | test_msg("Extent I/O tests finished\n"); |
| 436 | return ret; |
Josef Bacik | 294e30f | 2013-10-09 12:00:56 -0400 | [diff] [blame] | 437 | } |