blob: 0f557784327ddbcc6dae344e9821789c26511c4f [file] [log] [blame]
Matthew Wilcox0a835c42016-12-20 10:27:56 -05001/*
2 * idr-test.c: Test the IDR API
3 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -050014#include <linux/bitmap.h>
Matthew Wilcox0a835c42016-12-20 10:27:56 -050015#include <linux/idr.h>
16#include <linux/slab.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19
20#include "test.h"
21
22#define DUMMY_PTR ((void *)0x12)
23
24int item_idr_free(int id, void *p, void *data)
25{
26 struct item *item = p;
27 assert(item->index == id);
28 free(p);
29
30 return 0;
31}
32
33void item_idr_remove(struct idr *idr, int id)
34{
35 struct item *item = idr_find(idr, id);
36 assert(item->index == id);
37 idr_remove(idr, id);
38 free(item);
39}
40
41void idr_alloc_test(void)
42{
43 unsigned long i;
44 DEFINE_IDR(idr);
45
46 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
47 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
48 idr_remove(&idr, 0x3ffd);
49 idr_remove(&idr, 0);
50
51 for (i = 0x3ffe; i < 0x4003; i++) {
52 int id;
53 struct item *item;
54
55 if (i < 0x4000)
56 item = item_create(i, 0);
57 else
58 item = item_create(i - 0x3fff, 0);
59
60 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
61 assert(id == item->index);
62 }
63
64 idr_for_each(&idr, item_idr_free, &idr);
65 idr_destroy(&idr);
66}
67
68void idr_replace_test(void)
69{
70 DEFINE_IDR(idr);
71
72 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
73 idr_replace(&idr, &idr, 10);
74
75 idr_destroy(&idr);
76}
77
78/*
79 * Unlike the radix tree, you can put a NULL pointer -- with care -- into
80 * the IDR. Some interfaces, like idr_find() do not distinguish between
81 * "present, value is NULL" and "not present", but that's exactly what some
82 * users want.
83 */
84void idr_null_test(void)
85{
86 int i;
87 DEFINE_IDR(idr);
88
89 assert(idr_is_empty(&idr));
90
91 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
92 assert(!idr_is_empty(&idr));
93 idr_remove(&idr, 0);
94 assert(idr_is_empty(&idr));
95
96 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
97 assert(!idr_is_empty(&idr));
98 idr_destroy(&idr);
99 assert(idr_is_empty(&idr));
100
101 for (i = 0; i < 10; i++) {
102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
103 }
104
105 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
106 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
107 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
108 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
109 idr_remove(&idr, 5);
110 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
111 idr_remove(&idr, 5);
112
113 for (i = 0; i < 9; i++) {
114 idr_remove(&idr, i);
115 assert(!idr_is_empty(&idr));
116 }
117 idr_remove(&idr, 8);
118 assert(!idr_is_empty(&idr));
119 idr_remove(&idr, 9);
120 assert(idr_is_empty(&idr));
121
122 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
123 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
124 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
125 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
126
127 idr_destroy(&idr);
128 assert(idr_is_empty(&idr));
129
130 for (i = 1; i < 10; i++) {
131 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
132 }
133
134 idr_destroy(&idr);
135 assert(idr_is_empty(&idr));
136}
137
138void idr_nowait_test(void)
139{
140 unsigned int i;
141 DEFINE_IDR(idr);
142
143 idr_preload(GFP_KERNEL);
144
145 for (i = 0; i < 3; i++) {
146 struct item *item = item_create(i, 0);
147 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
148 }
149
150 idr_preload_end();
151
152 idr_for_each(&idr, item_idr_free, &idr);
153 idr_destroy(&idr);
154}
155
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500156void idr_get_next_test(int base)
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500157{
158 unsigned long i;
159 int nextid;
160 DEFINE_IDR(idr);
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500161 idr_init_base(&idr, base);
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500162
163 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
164
165 for(i = 0; indices[i]; i++) {
166 struct item *item = item_create(indices[i], 0);
167 assert(idr_alloc(&idr, item, indices[i], indices[i+1],
168 GFP_KERNEL) == indices[i]);
169 }
170
171 for(i = 0, nextid = 0; indices[i]; i++) {
172 idr_get_next(&idr, &nextid);
173 assert(nextid == indices[i]);
174 nextid++;
175 }
176
177 idr_for_each(&idr, item_idr_free, &idr);
178 idr_destroy(&idr);
179}
180
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500181int idr_u32_cb(int id, void *ptr, void *data)
182{
183 BUG_ON(id < 0);
184 BUG_ON(ptr != DUMMY_PTR);
185 return 0;
186}
187
188void idr_u32_test1(struct idr *idr, u32 handle)
189{
190 static bool warned = false;
191 u32 id = handle;
192 int sid = 0;
193 void *ptr;
194
195 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
196 BUG_ON(id != handle);
197 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
198 BUG_ON(id != handle);
199 if (!warned && id > INT_MAX)
200 printk("vvv Ignore these warnings\n");
201 ptr = idr_get_next(idr, &sid);
202 if (id > INT_MAX) {
203 BUG_ON(ptr != NULL);
204 BUG_ON(sid != 0);
205 } else {
206 BUG_ON(ptr != DUMMY_PTR);
207 BUG_ON(sid != id);
208 }
209 idr_for_each(idr, idr_u32_cb, NULL);
210 if (!warned && id > INT_MAX) {
211 printk("^^^ Warnings over\n");
212 warned = true;
213 }
214 BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
215 BUG_ON(!idr_is_empty(idr));
216}
217
218void idr_u32_test(int base)
219{
220 DEFINE_IDR(idr);
221 idr_init_base(&idr, base);
222 idr_u32_test1(&idr, 10);
223 idr_u32_test1(&idr, 0x7fffffff);
224 idr_u32_test1(&idr, 0x80000000);
225 idr_u32_test1(&idr, 0x80000001);
226 idr_u32_test1(&idr, 0xffe00000);
227 idr_u32_test1(&idr, 0xffffffff);
228}
229
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500230void idr_checks(void)
231{
232 unsigned long i;
233 DEFINE_IDR(idr);
234
235 for (i = 0; i < 10000; i++) {
236 struct item *item = item_create(i, 0);
237 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
238 }
239
240 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
241
242 for (i = 0; i < 5000; i++)
243 item_idr_remove(&idr, i);
244
245 idr_remove(&idr, 3);
246
247 idr_for_each(&idr, item_idr_free, &idr);
248 idr_destroy(&idr);
249
250 assert(idr_is_empty(&idr));
251
252 idr_remove(&idr, 3);
253 idr_remove(&idr, 0);
254
Matthew Wilcox7a4deea2018-05-25 14:47:24 -0700255 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
256 idr_remove(&idr, 1);
257 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
258 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
259 idr_remove(&idr, 1 << 30);
260 idr_destroy(&idr);
261
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500262 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
263 struct item *item = item_create(i, 0);
264 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
265 }
266 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox6e6d3012017-11-28 14:27:14 -0500267 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500268
269 idr_for_each(&idr, item_idr_free, &idr);
270 idr_destroy(&idr);
271 idr_destroy(&idr);
272
273 assert(idr_is_empty(&idr));
274
Matthew Wilcox460488c2017-11-28 15:16:24 -0500275 idr_set_cursor(&idr, INT_MAX - 3UL);
276 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
277 struct item *item;
278 unsigned int id;
279 if (i <= INT_MAX)
280 item = item_create(i, 0);
281 else
282 item = item_create(i - INT_MAX - 1, 0);
283
284 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
285 assert(id == item->index);
286 }
287
288 idr_for_each(&idr, item_idr_free, &idr);
289 idr_destroy(&idr);
290 assert(idr_is_empty(&idr));
291
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500292 for (i = 1; i < 10000; i++) {
293 struct item *item = item_create(i, 0);
294 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
295 }
296
297 idr_for_each(&idr, item_idr_free, &idr);
298 idr_destroy(&idr);
299
300 idr_replace_test();
301 idr_alloc_test();
302 idr_null_test();
303 idr_nowait_test();
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500304 idr_get_next_test(0);
305 idr_get_next_test(1);
306 idr_get_next_test(4);
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500307 idr_u32_test(4);
308 idr_u32_test(1);
309 idr_u32_test(0);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500310}
311
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400312#define module_init(x)
313#define module_exit(x)
314#define MODULE_AUTHOR(x)
315#define MODULE_LICENSE(x)
316#define dump_stack() assert(0)
317void ida_dump(struct ida *);
318
319#include "../../../lib/test_ida.c"
320
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500321/*
322 * Check that we get the correct error when we run out of memory doing
Matthew Wilcox06b01112018-06-18 17:06:58 -0400323 * allocations. In userspace, GFP_NOWAIT will always fail an allocation.
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500324 * The first test is for not having a bitmap available, and the second test
325 * is for not being able to allocate a level of the radix tree.
326 */
327void ida_check_nomem(void)
328{
329 DEFINE_IDA(ida);
Matthew Wilcox06b01112018-06-18 17:06:58 -0400330 int id;
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500331
Matthew Wilcox06b01112018-06-18 17:06:58 -0400332 id = ida_alloc_min(&ida, 256, GFP_NOWAIT);
333 IDA_BUG_ON(&ida, id != -ENOMEM);
334 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT);
335 IDA_BUG_ON(&ida, id != -ENOMEM);
336 IDA_BUG_ON(&ida, !ida_is_empty(&ida));
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500337}
338
339/*
340 * Check what happens when we fill a leaf and then delete it. This may
341 * discover mishandling of IDR_FREE.
342 */
343void ida_check_leaf(void)
344{
345 DEFINE_IDA(ida);
346 int id;
347 unsigned long i;
348
349 for (i = 0; i < IDA_BITMAP_BITS; i++) {
350 assert(ida_pre_get(&ida, GFP_KERNEL));
351 assert(!ida_get_new(&ida, &id));
352 assert(id == i);
353 }
354
355 ida_destroy(&ida);
356 assert(ida_is_empty(&ida));
357
358 assert(ida_pre_get(&ida, GFP_KERNEL));
359 assert(!ida_get_new(&ida, &id));
360 assert(id == 0);
361 ida_destroy(&ida);
362 assert(ida_is_empty(&ida));
363}
364
365/*
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500366 * Check handling of conversions between exceptional entries and full bitmaps.
367 */
368void ida_check_conv(void)
369{
370 DEFINE_IDA(ida);
371 int id;
372 unsigned long i;
373
374 for (i = 0; i < IDA_BITMAP_BITS * 2; i += IDA_BITMAP_BITS) {
375 assert(ida_pre_get(&ida, GFP_KERNEL));
376 assert(!ida_get_new_above(&ida, i + 1, &id));
377 assert(id == i + 1);
378 assert(!ida_get_new_above(&ida, i + BITS_PER_LONG, &id));
379 assert(id == i + BITS_PER_LONG);
380 ida_remove(&ida, i + 1);
381 ida_remove(&ida, i + BITS_PER_LONG);
382 assert(ida_is_empty(&ida));
383 }
384
385 assert(ida_pre_get(&ida, GFP_KERNEL));
386
387 for (i = 0; i < IDA_BITMAP_BITS * 2; i++) {
388 assert(ida_pre_get(&ida, GFP_KERNEL));
389 assert(!ida_get_new(&ida, &id));
390 assert(id == i);
391 }
392
393 for (i = IDA_BITMAP_BITS * 2; i > 0; i--) {
394 ida_remove(&ida, i - 1);
395 }
396 assert(ida_is_empty(&ida));
397
398 for (i = 0; i < IDA_BITMAP_BITS + BITS_PER_LONG - 4; i++) {
399 assert(ida_pre_get(&ida, GFP_KERNEL));
400 assert(!ida_get_new(&ida, &id));
401 assert(id == i);
402 }
403
404 for (i = IDA_BITMAP_BITS + BITS_PER_LONG - 4; i > 0; i--) {
405 ida_remove(&ida, i - 1);
406 }
407 assert(ida_is_empty(&ida));
408
409 radix_tree_cpu_dead(1);
410 for (i = 0; i < 1000000; i++) {
411 int err = ida_get_new(&ida, &id);
412 if (err == -EAGAIN) {
413 assert((i % IDA_BITMAP_BITS) == (BITS_PER_LONG - 2));
414 assert(ida_pre_get(&ida, GFP_KERNEL));
415 err = ida_get_new(&ida, &id);
416 } else {
417 assert((i % IDA_BITMAP_BITS) != (BITS_PER_LONG - 2));
418 }
419 assert(!err);
420 assert(id == i);
421 }
422 ida_destroy(&ida);
423}
424
425/*
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500426 * Check allocations up to and slightly above the maximum allowed (2^31-1) ID.
427 * Allocating up to 2^31-1 should succeed, and then allocating the next one
428 * should fail.
429 */
430void ida_check_max(void)
431{
432 DEFINE_IDA(ida);
433 int id, err;
434 unsigned long i, j;
435
436 for (j = 1; j < 65537; j *= 2) {
437 unsigned long base = (1UL << 31) - j;
438 for (i = 0; i < j; i++) {
439 assert(ida_pre_get(&ida, GFP_KERNEL));
440 assert(!ida_get_new_above(&ida, base, &id));
441 assert(id == base + i);
442 }
443 assert(ida_pre_get(&ida, GFP_KERNEL));
444 err = ida_get_new_above(&ida, base, &id);
445 assert(err == -ENOSPC);
446 ida_destroy(&ida);
447 assert(ida_is_empty(&ida));
448 rcu_barrier();
449 }
450}
451
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500452void ida_check_random(void)
453{
454 DEFINE_IDA(ida);
455 DECLARE_BITMAP(bitmap, 2048);
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500456 int id, err;
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500457 unsigned int i;
458 time_t s = time(NULL);
459
460 repeat:
461 memset(bitmap, 0, sizeof(bitmap));
462 for (i = 0; i < 100000; i++) {
463 int i = rand();
464 int bit = i & 2047;
465 if (test_bit(bit, bitmap)) {
466 __clear_bit(bit, bitmap);
467 ida_remove(&ida, bit);
468 } else {
469 __set_bit(bit, bitmap);
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500470 do {
471 ida_pre_get(&ida, GFP_KERNEL);
472 err = ida_get_new_above(&ida, bit, &id);
Matthew Wilcox490645d2017-11-09 20:15:14 -0500473 } while (err == -EAGAIN);
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500474 assert(!err);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500475 assert(id == bit);
476 }
477 }
478 ida_destroy(&ida);
479 if (time(NULL) < s + 10)
480 goto repeat;
481}
482
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500483void ida_simple_get_remove_test(void)
484{
485 DEFINE_IDA(ida);
486 unsigned long i;
487
488 for (i = 0; i < 10000; i++) {
489 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
490 }
491 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
492
493 for (i = 0; i < 10000; i++) {
494 ida_simple_remove(&ida, i);
495 }
496 assert(ida_is_empty(&ida));
497
498 ida_destroy(&ida);
499}
500
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400501void user_ida_checks(void)
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500502{
503 DEFINE_IDA(ida);
504 int id;
505 unsigned long i;
506
507 radix_tree_cpu_dead(1);
508 ida_check_nomem();
509
510 for (i = 0; i < 10000; i++) {
511 assert(ida_pre_get(&ida, GFP_KERNEL));
512 assert(!ida_get_new(&ida, &id));
513 assert(id == i);
514 }
515
516 ida_remove(&ida, 20);
517 ida_remove(&ida, 21);
518 for (i = 0; i < 3; i++) {
519 assert(ida_pre_get(&ida, GFP_KERNEL));
520 assert(!ida_get_new(&ida, &id));
521 if (i == 2)
522 assert(id == 10000);
523 }
524
525 for (i = 0; i < 5000; i++)
526 ida_remove(&ida, i);
527
528 assert(ida_pre_get(&ida, GFP_KERNEL));
529 assert(!ida_get_new_above(&ida, 5000, &id));
530 assert(id == 10001);
531
532 ida_destroy(&ida);
533
534 assert(ida_is_empty(&ida));
535
536 assert(ida_pre_get(&ida, GFP_KERNEL));
537 assert(!ida_get_new_above(&ida, 1, &id));
538 assert(id == 1);
539
540 ida_remove(&ida, id);
541 assert(ida_is_empty(&ida));
542 ida_destroy(&ida);
543 assert(ida_is_empty(&ida));
544
545 assert(ida_pre_get(&ida, GFP_KERNEL));
546 assert(!ida_get_new_above(&ida, 1, &id));
547 ida_destroy(&ida);
548 assert(ida_is_empty(&ida));
549
550 assert(ida_pre_get(&ida, GFP_KERNEL));
551 assert(!ida_get_new_above(&ida, 1, &id));
552 assert(id == 1);
553 assert(ida_pre_get(&ida, GFP_KERNEL));
554 assert(!ida_get_new_above(&ida, 1025, &id));
555 assert(id == 1025);
556 assert(ida_pre_get(&ida, GFP_KERNEL));
557 assert(!ida_get_new_above(&ida, 10000, &id));
558 assert(id == 10000);
559 ida_remove(&ida, 1025);
560 ida_destroy(&ida);
561 assert(ida_is_empty(&ida));
562
563 ida_check_leaf();
564 ida_check_max();
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500565 ida_check_conv();
566 ida_check_random();
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500567 ida_simple_get_remove_test();
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500568
569 radix_tree_cpu_dead(1);
570}
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500571
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500572static void *ida_random_fn(void *arg)
573{
574 rcu_register_thread();
575 ida_check_random();
576 rcu_unregister_thread();
577 return NULL;
578}
579
580void ida_thread_tests(void)
581{
Matthew Wilcox490645d2017-11-09 20:15:14 -0500582 pthread_t threads[20];
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500583 int i;
584
585 for (i = 0; i < ARRAY_SIZE(threads); i++)
586 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
587 perror("creating ida thread");
588 exit(1);
589 }
590
591 while (i--)
592 pthread_join(threads[i], NULL);
593}
594
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400595void ida_tests(void)
596{
597 user_ida_checks();
598 ida_checks();
599 ida_exit();
600 ida_thread_tests();
601}
602
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500603int __weak main(void)
604{
605 radix_tree_init();
606 idr_checks();
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400607 ida_tests();
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500608 radix_tree_cpu_dead(1);
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500609 rcu_barrier();
610 if (nr_allocated)
611 printf("nr_allocated = %d\n", nr_allocated);
612 return 0;
613}