blob: 36437ade429cb8c85872c262b2d1c80193fc647e [file] [log] [blame]
Matthew Wilcox0a835c42016-12-20 10:27:56 -05001/*
2 * idr-test.c: Test the IDR API
3 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -050014#include <linux/bitmap.h>
Matthew Wilcox0a835c42016-12-20 10:27:56 -050015#include <linux/idr.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19
20#include "test.h"
21
22#define DUMMY_PTR ((void *)0x12)
23
24int item_idr_free(int id, void *p, void *data)
25{
26 struct item *item = p;
27 assert(item->index == id);
28 free(p);
29
30 return 0;
31}
32
33void item_idr_remove(struct idr *idr, int id)
34{
35 struct item *item = idr_find(idr, id);
36 assert(item->index == id);
37 idr_remove(idr, id);
38 free(item);
39}
40
41void idr_alloc_test(void)
42{
43 unsigned long i;
44 DEFINE_IDR(idr);
45
46 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
47 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
48 idr_remove(&idr, 0x3ffd);
49 idr_remove(&idr, 0);
50
51 for (i = 0x3ffe; i < 0x4003; i++) {
52 int id;
53 struct item *item;
54
55 if (i < 0x4000)
56 item = item_create(i, 0);
57 else
58 item = item_create(i - 0x3fff, 0);
59
60 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
61 assert(id == item->index);
62 }
63
64 idr_for_each(&idr, item_idr_free, &idr);
65 idr_destroy(&idr);
66}
67
68void idr_replace_test(void)
69{
70 DEFINE_IDR(idr);
71
72 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
73 idr_replace(&idr, &idr, 10);
74
75 idr_destroy(&idr);
76}
77
78/*
79 * Unlike the radix tree, you can put a NULL pointer -- with care -- into
80 * the IDR. Some interfaces, like idr_find() do not distinguish between
81 * "present, value is NULL" and "not present", but that's exactly what some
82 * users want.
83 */
84void idr_null_test(void)
85{
86 int i;
87 DEFINE_IDR(idr);
88
89 assert(idr_is_empty(&idr));
90
91 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
92 assert(!idr_is_empty(&idr));
93 idr_remove(&idr, 0);
94 assert(idr_is_empty(&idr));
95
96 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
97 assert(!idr_is_empty(&idr));
98 idr_destroy(&idr);
99 assert(idr_is_empty(&idr));
100
101 for (i = 0; i < 10; i++) {
102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
103 }
104
105 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
106 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
107 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
108 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
109 idr_remove(&idr, 5);
110 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
111 idr_remove(&idr, 5);
112
113 for (i = 0; i < 9; i++) {
114 idr_remove(&idr, i);
115 assert(!idr_is_empty(&idr));
116 }
117 idr_remove(&idr, 8);
118 assert(!idr_is_empty(&idr));
119 idr_remove(&idr, 9);
120 assert(idr_is_empty(&idr));
121
122 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
123 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
124 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
125 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
126
127 idr_destroy(&idr);
128 assert(idr_is_empty(&idr));
129
130 for (i = 1; i < 10; i++) {
131 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
132 }
133
134 idr_destroy(&idr);
135 assert(idr_is_empty(&idr));
136}
137
138void idr_nowait_test(void)
139{
140 unsigned int i;
141 DEFINE_IDR(idr);
142
143 idr_preload(GFP_KERNEL);
144
145 for (i = 0; i < 3; i++) {
146 struct item *item = item_create(i, 0);
147 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
148 }
149
150 idr_preload_end();
151
152 idr_for_each(&idr, item_idr_free, &idr);
153 idr_destroy(&idr);
154}
155
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500156void idr_get_next_test(void)
157{
158 unsigned long i;
159 int nextid;
160 DEFINE_IDR(idr);
161
162 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
163
164 for(i = 0; indices[i]; i++) {
165 struct item *item = item_create(indices[i], 0);
166 assert(idr_alloc(&idr, item, indices[i], indices[i+1],
167 GFP_KERNEL) == indices[i]);
168 }
169
170 for(i = 0, nextid = 0; indices[i]; i++) {
171 idr_get_next(&idr, &nextid);
172 assert(nextid == indices[i]);
173 nextid++;
174 }
175
176 idr_for_each(&idr, item_idr_free, &idr);
177 idr_destroy(&idr);
178}
179
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500180void idr_checks(void)
181{
182 unsigned long i;
183 DEFINE_IDR(idr);
184
185 for (i = 0; i < 10000; i++) {
186 struct item *item = item_create(i, 0);
187 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
188 }
189
190 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
191
192 for (i = 0; i < 5000; i++)
193 item_idr_remove(&idr, i);
194
195 idr_remove(&idr, 3);
196
197 idr_for_each(&idr, item_idr_free, &idr);
198 idr_destroy(&idr);
199
200 assert(idr_is_empty(&idr));
201
202 idr_remove(&idr, 3);
203 idr_remove(&idr, 0);
204
205 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
206 struct item *item = item_create(i, 0);
207 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
208 }
209 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox6e6d3012017-11-28 14:27:14 -0500210 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500211
212 idr_for_each(&idr, item_idr_free, &idr);
213 idr_destroy(&idr);
214 idr_destroy(&idr);
215
216 assert(idr_is_empty(&idr));
217
Matthew Wilcox460488c2017-11-28 15:16:24 -0500218 idr_set_cursor(&idr, INT_MAX - 3UL);
219 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
220 struct item *item;
221 unsigned int id;
222 if (i <= INT_MAX)
223 item = item_create(i, 0);
224 else
225 item = item_create(i - INT_MAX - 1, 0);
226
227 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
228 assert(id == item->index);
229 }
230
231 idr_for_each(&idr, item_idr_free, &idr);
232 idr_destroy(&idr);
233 assert(idr_is_empty(&idr));
234
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500235 for (i = 1; i < 10000; i++) {
236 struct item *item = item_create(i, 0);
237 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
238 }
239
240 idr_for_each(&idr, item_idr_free, &idr);
241 idr_destroy(&idr);
242
243 idr_replace_test();
244 idr_alloc_test();
245 idr_null_test();
246 idr_nowait_test();
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500247 idr_get_next_test();
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500248}
249
250/*
251 * Check that we get the correct error when we run out of memory doing
252 * allocations. To ensure we run out of memory, just "forget" to preload.
253 * The first test is for not having a bitmap available, and the second test
254 * is for not being able to allocate a level of the radix tree.
255 */
256void ida_check_nomem(void)
257{
258 DEFINE_IDA(ida);
259 int id, err;
260
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500261 err = ida_get_new_above(&ida, 256, &id);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500262 assert(err == -EAGAIN);
263 err = ida_get_new_above(&ida, 1UL << 30, &id);
264 assert(err == -EAGAIN);
265}
266
267/*
268 * Check what happens when we fill a leaf and then delete it. This may
269 * discover mishandling of IDR_FREE.
270 */
271void ida_check_leaf(void)
272{
273 DEFINE_IDA(ida);
274 int id;
275 unsigned long i;
276
277 for (i = 0; i < IDA_BITMAP_BITS; i++) {
278 assert(ida_pre_get(&ida, GFP_KERNEL));
279 assert(!ida_get_new(&ida, &id));
280 assert(id == i);
281 }
282
283 ida_destroy(&ida);
284 assert(ida_is_empty(&ida));
285
286 assert(ida_pre_get(&ida, GFP_KERNEL));
287 assert(!ida_get_new(&ida, &id));
288 assert(id == 0);
289 ida_destroy(&ida);
290 assert(ida_is_empty(&ida));
291}
292
293/*
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500294 * Check handling of conversions between exceptional entries and full bitmaps.
295 */
296void ida_check_conv(void)
297{
298 DEFINE_IDA(ida);
299 int id;
300 unsigned long i;
301
302 for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
303 assert(ida_pre_get(&ida, GFP_KERNEL));
304 assert(!ida_get_new_above(&ida, i + 1, &id));
305 assert(id == i + 1);
306 assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
307 assert(id == i + BITS_PER_LONG);
308 ida_remove(&ida, i + 1);
309 ida_remove(&ida, i + BITS_PER_LONG);
310 assert(ida_is_empty(&ida));
311 }
312
313 assert(ida_pre_get(&ida, GFP_KERNEL));
314
315 for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
316 assert(ida_pre_get(&ida, GFP_KERNEL));
317 assert(!ida_get_new(&ida, &id));
318 assert(id == i);
319 }
320
321 for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
322 ida_remove(&ida, i - 1);
323 }
324 assert(ida_is_empty(&ida));
325
326 for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
327 assert(ida_pre_get(&ida, GFP_KERNEL));
328 assert(!ida_get_new(&ida, &id));
329 assert(id == i);
330 }
331
332 for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
333 ida_remove(&ida, i - 1);
334 }
335 assert(ida_is_empty(&ida));
336
337 radix_tree_cpu_dead(1);
338 for (i = 0; i < 1000000; i++) {
339 int err = ida_get_new(&ida, &id);
340 if (err == -EAGAIN) {
341 assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2));
342 assert(ida_pre_get(&ida, GFP_KERNEL));
343 err = ida_get_new(&ida, &id);
344 } else {
345 assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2));
346 }
347 assert(!err);
348 assert(id == i);
349 }
350 ida_destroy(&ida);
351}
352
353/*
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500354 * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
355 * Allocating up to 2^31-1 should succeed, and then allocating the next one
356 * should fail.
357 */
358void ida_check_max(void)
359{
360 DEFINE_IDA(ida);
361 int id, err;
362 unsigned long i, j;
363
364 for (j = 1; j < 65537; j *= 2) {
365 unsigned long base = (1UL << 31) - j;
366 for (i = 0; i < j; i++) {
367 assert(ida_pre_get(&ida, GFP_KERNEL));
368 assert(!ida_get_new_above(&ida, base, &id));
369 assert(id == base + i);
370 }
371 assert(ida_pre_get(&ida, GFP_KERNEL));
372 err = ida_get_new_above(&ida, base, &id);
373 assert(err == -ENOSPC);
374 ida_destroy(&ida);
375 assert(ida_is_empty(&ida));
376 rcu_barrier();
377 }
378}
379
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500380void ida_check_random(void)
381{
382 DEFINE_IDA(ida);
383 DECLARE_BITMAP(bitmap, 2048);
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500384 int id, err;
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500385 unsigned int i;
386 time_t s = time(NULL);
387
388 repeat:
389 memset(bitmap, 0, sizeof(bitmap));
390 for (i = 0; i < 100000; i++) {
391 int i = rand();
392 int bit = i & 2047;
393 if (test_bit(bit, bitmap)) {
394 __clear_bit(bit, bitmap);
395 ida_remove(&ida, bit);
396 } else {
397 __set_bit(bit, bitmap);
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500398 do {
399 ida_pre_get(&ida, GFP_KERNEL);
400 err = ida_get_new_above(&ida, bit, &id);
Matthew Wilcox490645d2017-11-09 20:15:14 -0500401 } while (err == -EAGAIN);
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500402 assert(!err);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500403 assert(id == bit);
404 }
405 }
406 ida_destroy(&ida);
407 if (time(NULL) < s + 10)
408 goto repeat;
409}
410
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500411void ida_simple_get_remove_test(void)
412{
413 DEFINE_IDA(ida);
414 unsigned long i;
415
416 for (i = 0; i < 10000; i++) {
417 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
418 }
419 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
420
421 for (i = 0; i < 10000; i++) {
422 ida_simple_remove(&ida, i);
423 }
424 assert(ida_is_empty(&ida));
425
426 ida_destroy(&ida);
427}
428
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500429void ida_checks(void)
430{
431 DEFINE_IDA(ida);
432 int id;
433 unsigned long i;
434
435 radix_tree_cpu_dead(1);
436 ida_check_nomem();
437
438 for (i = 0; i < 10000; i++) {
439 assert(ida_pre_get(&ida, GFP_KERNEL));
440 assert(!ida_get_new(&ida, &id));
441 assert(id == i);
442 }
443
444 ida_remove(&ida, 20);
445 ida_remove(&ida, 21);
446 for (i = 0; i < 3; i++) {
447 assert(ida_pre_get(&ida, GFP_KERNEL));
448 assert(!ida_get_new(&ida, &id));
449 if (i == 2)
450 assert(id == 10000);
451 }
452
453 for (i = 0; i < 5000; i++)
454 ida_remove(&ida, i);
455
456 assert(ida_pre_get(&ida, GFP_KERNEL));
457 assert(!ida_get_new_above(&ida, 5000, &id));
458 assert(id == 10001);
459
460 ida_destroy(&ida);
461
462 assert(ida_is_empty(&ida));
463
464 assert(ida_pre_get(&ida, GFP_KERNEL));
465 assert(!ida_get_new_above(&ida, 1, &id));
466 assert(id == 1);
467
468 ida_remove(&ida, id);
469 assert(ida_is_empty(&ida));
470 ida_destroy(&ida);
471 assert(ida_is_empty(&ida));
472
473 assert(ida_pre_get(&ida, GFP_KERNEL));
474 assert(!ida_get_new_above(&ida, 1, &id));
475 ida_destroy(&ida);
476 assert(ida_is_empty(&ida));
477
478 assert(ida_pre_get(&ida, GFP_KERNEL));
479 assert(!ida_get_new_above(&ida, 1, &id));
480 assert(id == 1);
481 assert(ida_pre_get(&ida, GFP_KERNEL));
482 assert(!ida_get_new_above(&ida, 1025, &id));
483 assert(id == 1025);
484 assert(ida_pre_get(&ida, GFP_KERNEL));
485 assert(!ida_get_new_above(&ida, 10000, &id));
486 assert(id == 10000);
487 ida_remove(&ida, 1025);
488 ida_destroy(&ida);
489 assert(ida_is_empty(&ida));
490
491 ida_check_leaf();
492 ida_check_max();
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500493 ida_check_conv();
494 ida_check_random();
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500495 ida_simple_get_remove_test();
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500496
497 radix_tree_cpu_dead(1);
498}
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500499
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500500static void *ida_random_fn(void *arg)
501{
502 rcu_register_thread();
503 ida_check_random();
504 rcu_unregister_thread();
505 return NULL;
506}
507
508void ida_thread_tests(void)
509{
Matthew Wilcox490645d2017-11-09 20:15:14 -0500510 pthread_t threads[20];
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500511 int i;
512
513 for (i = 0; i < ARRAY_SIZE(threads); i++)
514 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
515 perror("creating ida thread");
516 exit(1);
517 }
518
519 while (i--)
520 pthread_join(threads[i], NULL);
521}
522
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500523int __weak main(void)
524{
525 radix_tree_init();
526 idr_checks();
527 ida_checks();
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500528 ida_thread_tests();
529 radix_tree_cpu_dead(1);
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500530 rcu_barrier();
531 if (nr_allocated)
532 printf("nr_allocated = %d\n", nr_allocated);
533 return 0;
534}