blob: 1b63bdb7688ffdf2762cfd6e282876df323cc669 [file] [log] [blame]
Matthew Wilcox0a835c42016-12-20 10:27:56 -05001/*
2 * idr-test.c: Test the IDR API
3 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -050014#include <linux/bitmap.h>
Matthew Wilcox0a835c42016-12-20 10:27:56 -050015#include <linux/idr.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19
20#include "test.h"
21
Matthew Wilcox3159f942017-11-03 13:30:42 -040022#define DUMMY_PTR ((void *)0x10)
Matthew Wilcox0a835c42016-12-20 10:27:56 -050023
24int item_idr_free(int id, void *p, void *data)
25{
26 struct item *item = p;
27 assert(item->index == id);
28 free(p);
29
30 return 0;
31}
32
33void item_idr_remove(struct idr *idr, int id)
34{
35 struct item *item = idr_find(idr, id);
36 assert(item->index == id);
37 idr_remove(idr, id);
38 free(item);
39}
40
41void idr_alloc_test(void)
42{
43 unsigned long i;
44 DEFINE_IDR(idr);
45
46 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
47 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
48 idr_remove(&idr, 0x3ffd);
49 idr_remove(&idr, 0);
50
51 for (i = 0x3ffe; i < 0x4003; i++) {
52 int id;
53 struct item *item;
54
55 if (i < 0x4000)
56 item = item_create(i, 0);
57 else
58 item = item_create(i - 0x3fff, 0);
59
60 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
61 assert(id == item->index);
62 }
63
64 idr_for_each(&idr, item_idr_free, &idr);
65 idr_destroy(&idr);
66}
67
68void idr_replace_test(void)
69{
70 DEFINE_IDR(idr);
71
72 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
73 idr_replace(&idr, &idr, 10);
74
75 idr_destroy(&idr);
76}
77
78/*
79 * Unlike the radix tree, you can put a NULL pointer -- with care -- into
80 * the IDR. Some interfaces, like idr_find() do not distinguish between
81 * "present, value is NULL" and "not present", but that's exactly what some
82 * users want.
83 */
84void idr_null_test(void)
85{
86 int i;
87 DEFINE_IDR(idr);
88
89 assert(idr_is_empty(&idr));
90
91 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
92 assert(!idr_is_empty(&idr));
93 idr_remove(&idr, 0);
94 assert(idr_is_empty(&idr));
95
96 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
97 assert(!idr_is_empty(&idr));
98 idr_destroy(&idr);
99 assert(idr_is_empty(&idr));
100
101 for (i = 0; i < 10; i++) {
102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
103 }
104
105 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
106 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
107 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
108 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
109 idr_remove(&idr, 5);
110 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
111 idr_remove(&idr, 5);
112
113 for (i = 0; i < 9; i++) {
114 idr_remove(&idr, i);
115 assert(!idr_is_empty(&idr));
116 }
117 idr_remove(&idr, 8);
118 assert(!idr_is_empty(&idr));
119 idr_remove(&idr, 9);
120 assert(idr_is_empty(&idr));
121
122 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
123 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
124 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
125 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
126
127 idr_destroy(&idr);
128 assert(idr_is_empty(&idr));
129
130 for (i = 1; i < 10; i++) {
131 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
132 }
133
134 idr_destroy(&idr);
135 assert(idr_is_empty(&idr));
136}
137
138void idr_nowait_test(void)
139{
140 unsigned int i;
141 DEFINE_IDR(idr);
142
143 idr_preload(GFP_KERNEL);
144
145 for (i = 0; i < 3; i++) {
146 struct item *item = item_create(i, 0);
147 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
148 }
149
150 idr_preload_end();
151
152 idr_for_each(&idr, item_idr_free, &idr);
153 idr_destroy(&idr);
154}
155
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500156void idr_get_next_test(int base)
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500157{
158 unsigned long i;
159 int nextid;
160 DEFINE_IDR(idr);
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500161 idr_init_base(&idr, base);
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500162
163 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
164
165 for(i = 0; indices[i]; i++) {
166 struct item *item = item_create(indices[i], 0);
167 assert(idr_alloc(&idr, item, indices[i], indices[i+1],
168 GFP_KERNEL) == indices[i]);
169 }
170
171 for(i = 0, nextid = 0; indices[i]; i++) {
172 idr_get_next(&idr, &nextid);
173 assert(nextid == indices[i]);
174 nextid++;
175 }
176
177 idr_for_each(&idr, item_idr_free, &idr);
178 idr_destroy(&idr);
179}
180
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500181int idr_u32_cb(int id, void *ptr, void *data)
182{
183 BUG_ON(id < 0);
184 BUG_ON(ptr != DUMMY_PTR);
185 return 0;
186}
187
188void idr_u32_test1(struct idr *idr, u32 handle)
189{
190 static bool warned = false;
191 u32 id = handle;
192 int sid = 0;
193 void *ptr;
194
195 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
196 BUG_ON(id != handle);
197 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
198 BUG_ON(id != handle);
199 if (!warned && id > INT_MAX)
200 printk("vvv Ignore these warnings\n");
201 ptr = idr_get_next(idr, &sid);
202 if (id > INT_MAX) {
203 BUG_ON(ptr != NULL);
204 BUG_ON(sid != 0);
205 } else {
206 BUG_ON(ptr != DUMMY_PTR);
207 BUG_ON(sid != id);
208 }
209 idr_for_each(idr, idr_u32_cb, NULL);
210 if (!warned && id > INT_MAX) {
211 printk("^^^ Warnings over\n");
212 warned = true;
213 }
214 BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
215 BUG_ON(!idr_is_empty(idr));
216}
217
218void idr_u32_test(int base)
219{
220 DEFINE_IDR(idr);
221 idr_init_base(&idr, base);
222 idr_u32_test1(&idr, 10);
223 idr_u32_test1(&idr, 0x7fffffff);
224 idr_u32_test1(&idr, 0x80000000);
225 idr_u32_test1(&idr, 0x80000001);
226 idr_u32_test1(&idr, 0xffe00000);
227 idr_u32_test1(&idr, 0xffffffff);
228}
229
Matthew Wilcox66ee6202018-06-25 06:56:50 -0400230static void idr_align_test(struct idr *idr)
231{
232 char name[] = "Motorola 68000";
233 int i, id;
234 void *entry;
235
236 for (i = 0; i < 9; i++) {
237 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i);
238 idr_for_each_entry(idr, entry, id);
239 }
240 idr_destroy(idr);
241
242 for (i = 1; i < 10; i++) {
243 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1);
244 idr_for_each_entry(idr, entry, id);
245 }
246 idr_destroy(idr);
247
248 for (i = 2; i < 11; i++) {
249 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2);
250 idr_for_each_entry(idr, entry, id);
251 }
252 idr_destroy(idr);
253
254 for (i = 3; i < 12; i++) {
255 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3);
256 idr_for_each_entry(idr, entry, id);
257 }
258 idr_destroy(idr);
259
260 for (i = 0; i < 8; i++) {
261 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
262 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1);
263 idr_for_each_entry(idr, entry, id);
264 idr_remove(idr, 1);
265 idr_for_each_entry(idr, entry, id);
266 idr_remove(idr, 0);
267 BUG_ON(!idr_is_empty(idr));
268 }
269
270 for (i = 0; i < 8; i++) {
271 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0);
272 idr_for_each_entry(idr, entry, id);
273 idr_replace(idr, &name[i], 0);
274 idr_for_each_entry(idr, entry, id);
275 BUG_ON(idr_find(idr, 0) != &name[i]);
276 idr_remove(idr, 0);
277 }
278
279 for (i = 0; i < 8; i++) {
280 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
281 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1);
282 idr_remove(idr, 1);
283 idr_for_each_entry(idr, entry, id);
284 idr_replace(idr, &name[i + 1], 0);
285 idr_for_each_entry(idr, entry, id);
286 idr_remove(idr, 0);
287 }
288}
289
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500290void idr_checks(void)
291{
292 unsigned long i;
293 DEFINE_IDR(idr);
294
295 for (i = 0; i < 10000; i++) {
296 struct item *item = item_create(i, 0);
297 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
298 }
299
300 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
301
302 for (i = 0; i < 5000; i++)
303 item_idr_remove(&idr, i);
304
305 idr_remove(&idr, 3);
306
307 idr_for_each(&idr, item_idr_free, &idr);
308 idr_destroy(&idr);
309
310 assert(idr_is_empty(&idr));
311
312 idr_remove(&idr, 3);
313 idr_remove(&idr, 0);
314
Matthew Wilcox7a4deea2018-05-25 14:47:24 -0700315 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
316 idr_remove(&idr, 1);
317 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
318 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
319 idr_remove(&idr, 1 << 30);
320 idr_destroy(&idr);
321
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500322 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
323 struct item *item = item_create(i, 0);
324 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
325 }
326 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox6e6d3012017-11-28 14:27:14 -0500327 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500328
329 idr_for_each(&idr, item_idr_free, &idr);
330 idr_destroy(&idr);
331 idr_destroy(&idr);
332
333 assert(idr_is_empty(&idr));
334
Matthew Wilcox460488c2017-11-28 15:16:24 -0500335 idr_set_cursor(&idr, INT_MAX - 3UL);
336 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
337 struct item *item;
338 unsigned int id;
339 if (i <= INT_MAX)
340 item = item_create(i, 0);
341 else
342 item = item_create(i - INT_MAX - 1, 0);
343
344 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
345 assert(id == item->index);
346 }
347
348 idr_for_each(&idr, item_idr_free, &idr);
349 idr_destroy(&idr);
350 assert(idr_is_empty(&idr));
351
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500352 for (i = 1; i < 10000; i++) {
353 struct item *item = item_create(i, 0);
354 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
355 }
356
357 idr_for_each(&idr, item_idr_free, &idr);
358 idr_destroy(&idr);
359
360 idr_replace_test();
361 idr_alloc_test();
362 idr_null_test();
363 idr_nowait_test();
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500364 idr_get_next_test(0);
365 idr_get_next_test(1);
366 idr_get_next_test(4);
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500367 idr_u32_test(4);
368 idr_u32_test(1);
369 idr_u32_test(0);
Matthew Wilcox66ee6202018-06-25 06:56:50 -0400370 idr_align_test(&idr);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500371}
372
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400373#define module_init(x)
374#define module_exit(x)
375#define MODULE_AUTHOR(x)
376#define MODULE_LICENSE(x)
377#define dump_stack() assert(0)
378void ida_dump(struct ida *);
379
380#include "../../../lib/test_ida.c"
381
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500382/*
383 * Check that we get the correct error when we run out of memory doing
Matthew Wilcox06b01112018-06-18 17:06:58 -0400384 * allocations. In userspace, GFP_NOWAIT will always fail an allocation.
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500385 * The first test is for not having a bitmap available, and the second test
386 * is for not being able to allocate a level of the radix tree.
387 */
388void ida_check_nomem(void)
389{
390 DEFINE_IDA(ida);
Matthew Wilcox06b01112018-06-18 17:06:58 -0400391 int id;
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500392
Matthew Wilcox06b01112018-06-18 17:06:58 -0400393 id = ida_alloc_min(&ida, 256, GFP_NOWAIT);
394 IDA_BUG_ON(&ida, id != -ENOMEM);
395 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT);
396 IDA_BUG_ON(&ida, id != -ENOMEM);
397 IDA_BUG_ON(&ida, !ida_is_empty(&ida));
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500398}
399
400/*
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500401 * Check handling of conversions between exceptional entries and full bitmaps.
402 */
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400403void ida_check_conv_user(void)
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500404{
405 DEFINE_IDA(ida);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500406 unsigned long i;
407
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500408 for (i = 0; i < 1000000; i++) {
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400409 int id = ida_alloc(&ida, GFP_NOWAIT);
410 if (id == -ENOMEM) {
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400411 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) !=
412 BITS_PER_XA_VALUE) &&
413 ((i % IDA_BITMAP_BITS) != 0));
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400414 id = ida_alloc(&ida, GFP_KERNEL);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500415 } else {
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400416 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
Matthew Wilcox3159f942017-11-03 13:30:42 -0400417 BITS_PER_XA_VALUE);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500418 }
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400419 IDA_BUG_ON(&ida, id != i);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500420 }
421 ida_destroy(&ida);
422}
423
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500424void ida_check_random(void)
425{
426 DEFINE_IDA(ida);
427 DECLARE_BITMAP(bitmap, 2048);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500428 unsigned int i;
429 time_t s = time(NULL);
430
431 repeat:
432 memset(bitmap, 0, sizeof(bitmap));
433 for (i = 0; i < 100000; i++) {
434 int i = rand();
435 int bit = i & 2047;
436 if (test_bit(bit, bitmap)) {
437 __clear_bit(bit, bitmap);
Matthew Wilcoxf2726682018-06-18 18:39:28 -0400438 ida_free(&ida, bit);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500439 } else {
440 __set_bit(bit, bitmap);
Matthew Wilcoxf2726682018-06-18 18:39:28 -0400441 IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL)
442 != bit);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500443 }
444 }
445 ida_destroy(&ida);
446 if (time(NULL) < s + 10)
447 goto repeat;
448}
449
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500450void ida_simple_get_remove_test(void)
451{
452 DEFINE_IDA(ida);
453 unsigned long i;
454
455 for (i = 0; i < 10000; i++) {
456 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
457 }
458 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
459
460 for (i = 0; i < 10000; i++) {
461 ida_simple_remove(&ida, i);
462 }
463 assert(ida_is_empty(&ida));
464
465 ida_destroy(&ida);
466}
467
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400468void user_ida_checks(void)
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500469{
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500470 radix_tree_cpu_dead(1);
Matthew Wilcoxf2726682018-06-18 18:39:28 -0400471
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500472 ida_check_nomem();
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400473 ida_check_conv_user();
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500474 ida_check_random();
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500475 ida_simple_get_remove_test();
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500476
477 radix_tree_cpu_dead(1);
478}
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500479
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500480static void *ida_random_fn(void *arg)
481{
482 rcu_register_thread();
483 ida_check_random();
484 rcu_unregister_thread();
485 return NULL;
486}
487
488void ida_thread_tests(void)
489{
Matthew Wilcox490645d2017-11-09 20:15:14 -0500490 pthread_t threads[20];
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500491 int i;
492
493 for (i = 0; i < ARRAY_SIZE(threads); i++)
494 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
495 perror("creating ida thread");
496 exit(1);
497 }
498
499 while (i--)
500 pthread_join(threads[i], NULL);
501}
502
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400503void ida_tests(void)
504{
505 user_ida_checks();
506 ida_checks();
507 ida_exit();
508 ida_thread_tests();
509}
510
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500511int __weak main(void)
512{
513 radix_tree_init();
514 idr_checks();
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400515 ida_tests();
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500516 radix_tree_cpu_dead(1);
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500517 rcu_barrier();
518 if (nr_allocated)
519 printf("nr_allocated = %d\n", nr_allocated);
520 return 0;
521}