blob: f609e4f1eeafe0ffa25a86790539b0b19d64f7c2 [file] [log] [blame]
Chris Wilson50f00332016-12-22 08:36:09 +00001/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
Chris Wilson78866922016-12-22 08:36:13 +000024enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST,
28};
29
30static const struct insert_mode {
31 const char *name;
32 unsigned int search_flags;
33 unsigned int create_flags;
34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
38 {}
Chris Wilson560b3282016-12-22 08:36:17 +000039}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
42 {}
Chris Wilson78866922016-12-22 08:36:13 +000043};
44
Chris Wilson50f00332016-12-22 08:36:09 +000045static int igt_sanitycheck(void *ignored)
46{
47 pr_info("%s - ok!\n", __func__);
48 return 0;
49}
50
Chris Wilson393b50f2016-12-22 08:36:10 +000051static bool assert_no_holes(const struct drm_mm *mm)
52{
53 struct drm_mm_node *hole;
54 u64 hole_start, hole_end;
55 unsigned long count;
56
57 count = 0;
58 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
59 count++;
60 if (count) {
61 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
62 return false;
63 }
64
65 drm_mm_for_each_node(hole, mm) {
66 if (hole->hole_follows) {
67 pr_err("Hole follows node, expected none!\n");
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
76{
77 struct drm_mm_node *hole;
78 u64 hole_start, hole_end;
79 unsigned long count;
80 bool ok = true;
81
82 if (end <= start)
83 return true;
84
85 count = 0;
86 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
87 if (start != hole_start || end != hole_end) {
88 if (ok)
89 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
90 hole_start, hole_end,
91 start, end);
92 ok = false;
93 }
94 count++;
95 }
96 if (count != 1) {
97 pr_err("Expected to find one hole, found %lu instead\n", count);
98 ok = false;
99 }
100
101 return ok;
102}
103
Chris Wilson900537d2016-12-22 08:36:12 +0000104static bool assert_continuous(const struct drm_mm *mm, u64 size)
105{
106 struct drm_mm_node *node, *check, *found;
107 unsigned long n;
108 u64 addr;
109
110 if (!assert_no_holes(mm))
111 return false;
112
113 n = 0;
114 addr = 0;
115 drm_mm_for_each_node(node, mm) {
116 if (node->start != addr) {
117 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
118 n, addr, node->start);
119 return false;
120 }
121
122 if (node->size != size) {
123 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
124 n, size, node->size);
125 return false;
126 }
127
128 if (node->hole_follows) {
129 pr_err("node[%ld] is followed by a hole!\n", n);
130 return false;
131 }
132
133 found = NULL;
134 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
135 if (node != check) {
136 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
137 node->start, check->start);
138 return false;
139 }
140 found = check;
141 }
142 if (!found) {
143 pr_err("lookup failed for node %llx + %llx\n",
144 addr, size);
145 return false;
146 }
147
148 addr += size;
149 n++;
150 }
151
152 return true;
153}
154
Chris Wilson78866922016-12-22 08:36:13 +0000155static u64 misalignment(struct drm_mm_node *node, u64 alignment)
156{
157 u64 rem;
158
159 if (!alignment)
160 return 0;
161
162 div64_u64_rem(node->start, alignment, &rem);
163 return rem;
164}
165
166static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
167 u64 size, u64 alignment, unsigned long color)
168{
169 bool ok = true;
170
171 if (!drm_mm_node_allocated(node) || node->mm != mm) {
172 pr_err("node not allocated\n");
173 ok = false;
174 }
175
176 if (node->size != size) {
177 pr_err("node has wrong size, found %llu, expected %llu\n",
178 node->size, size);
179 ok = false;
180 }
181
182 if (misalignment(node, alignment)) {
183 pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
184 node->start, misalignment(node, alignment), alignment);
185 ok = false;
186 }
187
188 if (node->color != color) {
189 pr_err("node has wrong color, found %lu, expected %lu\n",
190 node->color, color);
191 ok = false;
192 }
193
194 return ok;
195}
196
Chris Wilson393b50f2016-12-22 08:36:10 +0000197static int igt_init(void *ignored)
198{
199 const unsigned int size = 4096;
200 struct drm_mm mm;
201 struct drm_mm_node tmp;
202 int ret = -EINVAL;
203
204 /* Start with some simple checks on initialising the struct drm_mm */
205 memset(&mm, 0, sizeof(mm));
206 if (drm_mm_initialized(&mm)) {
207 pr_err("zeroed mm claims to be initialized\n");
208 return ret;
209 }
210
211 memset(&mm, 0xff, sizeof(mm));
212 drm_mm_init(&mm, 0, size);
213 if (!drm_mm_initialized(&mm)) {
214 pr_err("mm claims not to be initialized\n");
215 goto out;
216 }
217
218 if (!drm_mm_clean(&mm)) {
219 pr_err("mm not empty on creation\n");
220 goto out;
221 }
222
223 /* After creation, it should all be one massive hole */
224 if (!assert_one_hole(&mm, 0, size)) {
225 ret = -EINVAL;
226 goto out;
227 }
228
229 memset(&tmp, 0, sizeof(tmp));
230 tmp.start = 0;
231 tmp.size = size;
232 ret = drm_mm_reserve_node(&mm, &tmp);
233 if (ret) {
234 pr_err("failed to reserve whole drm_mm\n");
235 goto out;
236 }
237
238 /* After filling the range entirely, there should be no holes */
239 if (!assert_no_holes(&mm)) {
240 ret = -EINVAL;
241 goto out;
242 }
243
244 /* And then after emptying it again, the massive hole should be back */
245 drm_mm_remove_node(&tmp);
246 if (!assert_one_hole(&mm, 0, size)) {
247 ret = -EINVAL;
248 goto out;
249 }
250
251out:
252 if (ret)
253 drm_mm_debug_table(&mm, __func__);
254 drm_mm_takedown(&mm);
255 return ret;
256}
257
Chris Wilson06df8ac2016-12-22 08:36:11 +0000258static int igt_debug(void *ignored)
259{
260 struct drm_mm mm;
261 struct drm_mm_node nodes[2];
262 int ret;
263
264 /* Create a small drm_mm with a couple of nodes and a few holes, and
265 * check that the debug iterator doesn't explode over a trivial drm_mm.
266 */
267
268 drm_mm_init(&mm, 0, 4096);
269
270 memset(nodes, 0, sizeof(nodes));
271 nodes[0].start = 512;
272 nodes[0].size = 1024;
273 ret = drm_mm_reserve_node(&mm, &nodes[0]);
274 if (ret) {
275 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
276 nodes[0].start, nodes[0].size);
277 return ret;
278 }
279
280 nodes[1].size = 1024;
281 nodes[1].start = 4096 - 512 - nodes[1].size;
282 ret = drm_mm_reserve_node(&mm, &nodes[1]);
283 if (ret) {
284 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
285 nodes[1].start, nodes[1].size);
286 return ret;
287 }
288
289 drm_mm_debug_table(&mm, __func__);
290 return 0;
291}
292
Chris Wilson900537d2016-12-22 08:36:12 +0000293static struct drm_mm_node *set_node(struct drm_mm_node *node,
294 u64 start, u64 size)
295{
296 node->start = start;
297 node->size = size;
298 return node;
299}
300
301static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
302{
303 int err;
304
305 err = drm_mm_reserve_node(mm, node);
306 if (likely(err == -ENOSPC))
307 return true;
308
309 if (!err) {
310 pr_err("impossible reserve succeeded, node %llu + %llu\n",
311 node->start, node->size);
312 drm_mm_remove_node(node);
313 } else {
314 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
315 err, -ENOSPC, node->start, node->size);
316 }
317 return false;
318}
319
320static bool check_reserve_boundaries(struct drm_mm *mm,
321 unsigned int count,
322 u64 size)
323{
324 const struct boundary {
325 u64 start, size;
326 const char *name;
327 } boundaries[] = {
328#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
329 B(0, 0),
330 B(-size, 0),
331 B(size, 0),
332 B(size * count, 0),
333 B(-size, size),
334 B(-size, -size),
335 B(-size, 2*size),
336 B(0, -size),
337 B(size, -size),
338 B(count*size, size),
339 B(count*size, -size),
340 B(count*size, count*size),
341 B(count*size, -count*size),
342 B(count*size, -(count+1)*size),
343 B((count+1)*size, size),
344 B((count+1)*size, -size),
345 B((count+1)*size, -2*size),
346#undef B
347 };
348 struct drm_mm_node tmp = {};
349 int n;
350
351 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
352 if (!expect_reserve_fail(mm,
353 set_node(&tmp,
354 boundaries[n].start,
355 boundaries[n].size))) {
356 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
357 n, boundaries[n].name, count, size);
358 return false;
359 }
360 }
361
362 return true;
363}
364
365static int __igt_reserve(unsigned int count, u64 size)
366{
367 DRM_RND_STATE(prng, random_seed);
368 struct drm_mm mm;
369 struct drm_mm_node tmp, *nodes, *node, *next;
370 unsigned int *order, n, m, o = 0;
371 int ret, err;
372
373 /* For exercising drm_mm_reserve_node(), we want to check that
374 * reservations outside of the drm_mm range are rejected, and to
375 * overlapping and otherwise already occupied ranges. Afterwards,
376 * the tree and nodes should be intact.
377 */
378
379 DRM_MM_BUG_ON(!count);
380 DRM_MM_BUG_ON(!size);
381
382 ret = -ENOMEM;
383 order = drm_random_order(count, &prng);
384 if (!order)
385 goto err;
386
387 nodes = vzalloc(sizeof(*nodes) * count);
388 if (!nodes)
389 goto err_order;
390
391 ret = -EINVAL;
392 drm_mm_init(&mm, 0, count * size);
393
394 if (!check_reserve_boundaries(&mm, count, size))
395 goto out;
396
397 for (n = 0; n < count; n++) {
398 nodes[n].start = order[n] * size;
399 nodes[n].size = size;
400
401 err = drm_mm_reserve_node(&mm, &nodes[n]);
402 if (err) {
403 pr_err("reserve failed, step %d, start %llu\n",
404 n, nodes[n].start);
405 ret = err;
406 goto out;
407 }
408
409 if (!drm_mm_node_allocated(&nodes[n])) {
410 pr_err("reserved node not allocated! step %d, start %llu\n",
411 n, nodes[n].start);
412 goto out;
413 }
414
415 if (!expect_reserve_fail(&mm, &nodes[n]))
416 goto out;
417 }
418
419 /* After random insertion the nodes should be in order */
420 if (!assert_continuous(&mm, size))
421 goto out;
422
423 /* Repeated use should then fail */
424 drm_random_reorder(order, count, &prng);
425 for (n = 0; n < count; n++) {
426 if (!expect_reserve_fail(&mm,
427 set_node(&tmp, order[n] * size, 1)))
428 goto out;
429
430 /* Remove and reinsert should work */
431 drm_mm_remove_node(&nodes[order[n]]);
432 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
433 if (err) {
434 pr_err("reserve failed, step %d, start %llu\n",
435 n, nodes[n].start);
436 ret = err;
437 goto out;
438 }
439 }
440
441 if (!assert_continuous(&mm, size))
442 goto out;
443
444 /* Overlapping use should then fail */
445 for (n = 0; n < count; n++) {
446 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
447 goto out;
448 }
449 for (n = 0; n < count; n++) {
450 if (!expect_reserve_fail(&mm,
451 set_node(&tmp,
452 size * n,
453 size * (count - n))))
454 goto out;
455 }
456
457 /* Remove several, reinsert, check full */
458 for_each_prime_number(n, min(max_prime, count)) {
459 for (m = 0; m < n; m++) {
460 node = &nodes[order[(o + m) % count]];
461 drm_mm_remove_node(node);
462 }
463
464 for (m = 0; m < n; m++) {
465 node = &nodes[order[(o + m) % count]];
466 err = drm_mm_reserve_node(&mm, node);
467 if (err) {
468 pr_err("reserve failed, step %d/%d, start %llu\n",
469 m, n, node->start);
470 ret = err;
471 goto out;
472 }
473 }
474
475 o += n;
476
477 if (!assert_continuous(&mm, size))
478 goto out;
479 }
480
481 ret = 0;
482out:
483 drm_mm_for_each_node_safe(node, next, &mm)
484 drm_mm_remove_node(node);
485 drm_mm_takedown(&mm);
486 vfree(nodes);
487err_order:
488 kfree(order);
489err:
490 return ret;
491}
492
493static int igt_reserve(void *ignored)
494{
495 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
496 int n, ret;
497
498 for_each_prime_number_from(n, 1, 54) {
499 u64 size = BIT_ULL(n);
500
501 ret = __igt_reserve(count, size - 1);
502 if (ret)
503 return ret;
504
505 ret = __igt_reserve(count, size);
506 if (ret)
507 return ret;
508
509 ret = __igt_reserve(count, size + 1);
510 if (ret)
511 return ret;
512 }
513
514 return 0;
515}
516
Chris Wilson78866922016-12-22 08:36:13 +0000517static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
518 u64 size, u64 alignment, unsigned long color,
519 const struct insert_mode *mode)
520{
521 int err;
522
523 err = drm_mm_insert_node_generic(mm, node,
524 size, alignment, color,
525 mode->search_flags,
526 mode->create_flags);
527 if (err) {
528 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
529 size, alignment, color, mode->name, err);
530 return false;
531 }
532
533 if (!assert_node(node, mm, size, alignment, color)) {
534 drm_mm_remove_node(node);
535 return false;
536 }
537
538 return true;
539}
540
541static bool expect_insert_fail(struct drm_mm *mm, u64 size)
542{
543 struct drm_mm_node tmp = {};
544 int err;
545
546 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
547 if (likely(err == -ENOSPC))
548 return true;
549
550 if (!err) {
551 pr_err("impossible insert succeeded, node %llu + %llu\n",
552 tmp.start, tmp.size);
553 drm_mm_remove_node(&tmp);
554 } else {
555 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
556 err, -ENOSPC, size);
557 }
558 return false;
559}
560
Chris Wilson2bd966d2016-12-22 08:36:14 +0000561static int __igt_insert(unsigned int count, u64 size, bool replace)
Chris Wilson78866922016-12-22 08:36:13 +0000562{
563 DRM_RND_STATE(prng, random_seed);
564 const struct insert_mode *mode;
565 struct drm_mm mm;
566 struct drm_mm_node *nodes, *node, *next;
567 unsigned int *order, n, m, o = 0;
568 int ret;
569
570 /* Fill a range with lots of nodes, check it doesn't fail too early */
571
572 DRM_MM_BUG_ON(!count);
573 DRM_MM_BUG_ON(!size);
574
575 ret = -ENOMEM;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000576 nodes = vmalloc(count * sizeof(*nodes));
Chris Wilson78866922016-12-22 08:36:13 +0000577 if (!nodes)
578 goto err;
579
580 order = drm_random_order(count, &prng);
581 if (!order)
582 goto err_nodes;
583
584 ret = -EINVAL;
585 drm_mm_init(&mm, 0, count * size);
586
587 for (mode = insert_modes; mode->name; mode++) {
588 for (n = 0; n < count; n++) {
Chris Wilson2bd966d2016-12-22 08:36:14 +0000589 struct drm_mm_node tmp;
590
591 node = replace ? &tmp : &nodes[n];
592 memset(node, 0, sizeof(*node));
593 if (!expect_insert(&mm, node, size, 0, n, mode)) {
Chris Wilson78866922016-12-22 08:36:13 +0000594 pr_err("%s insert failed, size %llu step %d\n",
595 mode->name, size, n);
596 goto out;
597 }
Chris Wilson2bd966d2016-12-22 08:36:14 +0000598
599 if (replace) {
600 drm_mm_replace_node(&tmp, &nodes[n]);
601 if (drm_mm_node_allocated(&tmp)) {
602 pr_err("replaced old-node still allocated! step %d\n",
603 n);
604 goto out;
605 }
606
607 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
608 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
609 size, n);
610 goto out;
611 }
612
613 if (tmp.start != nodes[n].start) {
614 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
615 tmp.start, size,
616 nodes[n].start, nodes[n].size);
617 goto out;
618 }
619 }
Chris Wilson78866922016-12-22 08:36:13 +0000620 }
621
622 /* After random insertion the nodes should be in order */
623 if (!assert_continuous(&mm, size))
624 goto out;
625
626 /* Repeated use should then fail */
627 if (!expect_insert_fail(&mm, size))
628 goto out;
629
630 /* Remove one and reinsert, as the only hole it should refill itself */
631 for (n = 0; n < count; n++) {
632 u64 addr = nodes[n].start;
633
634 drm_mm_remove_node(&nodes[n]);
635 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
636 pr_err("%s reinsert failed, size %llu step %d\n",
637 mode->name, size, n);
638 goto out;
639 }
640
641 if (nodes[n].start != addr) {
642 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
643 mode->name, n, addr, nodes[n].start);
644 goto out;
645 }
646
647 if (!assert_continuous(&mm, size))
648 goto out;
649 }
650
651 /* Remove several, reinsert, check full */
652 for_each_prime_number(n, min(max_prime, count)) {
653 for (m = 0; m < n; m++) {
654 node = &nodes[order[(o + m) % count]];
655 drm_mm_remove_node(node);
656 }
657
658 for (m = 0; m < n; m++) {
659 node = &nodes[order[(o + m) % count]];
660 if (!expect_insert(&mm, node, size, 0, n, mode)) {
661 pr_err("%s multiple reinsert failed, size %llu step %d\n",
662 mode->name, size, n);
663 goto out;
664 }
665 }
666
667 o += n;
668
669 if (!assert_continuous(&mm, size))
670 goto out;
671
672 if (!expect_insert_fail(&mm, size))
673 goto out;
674 }
675
676 drm_mm_for_each_node_safe(node, next, &mm)
677 drm_mm_remove_node(node);
678 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
679 }
680
681 ret = 0;
682out:
683 drm_mm_for_each_node_safe(node, next, &mm)
684 drm_mm_remove_node(node);
685 drm_mm_takedown(&mm);
686 kfree(order);
687err_nodes:
688 vfree(nodes);
689err:
690 return ret;
691}
692
693static int igt_insert(void *ignored)
694{
695 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
696 unsigned int n;
697 int ret;
698
699 for_each_prime_number_from(n, 1, 54) {
700 u64 size = BIT_ULL(n);
701
Chris Wilson2bd966d2016-12-22 08:36:14 +0000702 ret = __igt_insert(count, size - 1, false);
Chris Wilson78866922016-12-22 08:36:13 +0000703 if (ret)
704 return ret;
705
Chris Wilson2bd966d2016-12-22 08:36:14 +0000706 ret = __igt_insert(count, size, false);
Chris Wilson78866922016-12-22 08:36:13 +0000707 if (ret)
708 return ret;
709
Chris Wilson2bd966d2016-12-22 08:36:14 +0000710 ret = __igt_insert(count, size + 1, false);
711 }
712
713 return 0;
714}
715
716static int igt_replace(void *ignored)
717{
718 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
719 unsigned int n;
720 int ret;
721
722 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
723 * then replacing it with the intended node. We want to check that
724 * the tree is intact and all the information we need is carried
725 * across to the target node.
726 */
727
728 for_each_prime_number_from(n, 1, 54) {
729 u64 size = BIT_ULL(n);
730
731 ret = __igt_insert(count, size - 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000732 if (ret)
733 return ret;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000734
735 ret = __igt_insert(count, size, true);
736 if (ret)
737 return ret;
738
739 ret = __igt_insert(count, size + 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000740 }
741
742 return 0;
743}
744
Chris Wilson2fba0de2016-12-22 08:36:15 +0000745static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
746 u64 size, u64 alignment, unsigned long color,
747 u64 range_start, u64 range_end,
748 const struct insert_mode *mode)
749{
750 int err;
751
752 err = drm_mm_insert_node_in_range_generic(mm, node,
753 size, alignment, color,
754 range_start, range_end,
755 mode->search_flags,
756 mode->create_flags);
757 if (err) {
758 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
759 size, alignment, color, mode->name,
760 range_start, range_end, err);
761 return false;
762 }
763
764 if (!assert_node(node, mm, size, alignment, color)) {
765 drm_mm_remove_node(node);
766 return false;
767 }
768
769 return true;
770}
771
772static bool expect_insert_in_range_fail(struct drm_mm *mm,
773 u64 size,
774 u64 range_start,
775 u64 range_end)
776{
777 struct drm_mm_node tmp = {};
778 int err;
779
780 err = drm_mm_insert_node_in_range_generic(mm, &tmp,
781 size, 0, 0,
782 range_start, range_end,
783 DRM_MM_SEARCH_DEFAULT,
784 DRM_MM_CREATE_DEFAULT);
785 if (likely(err == -ENOSPC))
786 return true;
787
788 if (!err) {
789 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
790 tmp.start, tmp.size, range_start, range_end);
791 drm_mm_remove_node(&tmp);
792 } else {
793 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
794 err, -ENOSPC, size, range_start, range_end);
795 }
796
797 return false;
798}
799
800static bool assert_contiguous_in_range(struct drm_mm *mm,
801 u64 size,
802 u64 start,
803 u64 end)
804{
805 struct drm_mm_node *node;
806 unsigned int n;
807
808 if (!expect_insert_in_range_fail(mm, size, start, end))
809 return false;
810
811 n = div64_u64(start + size - 1, size);
812 drm_mm_for_each_node(node, mm) {
813 if (node->start < start || node->start + node->size > end) {
814 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
815 n, node->start, node->start + node->size, start, end);
816 return false;
817 }
818
819 if (node->start != n * size) {
820 pr_err("node %d out of order, expected start %llx, found %llx\n",
821 n, n * size, node->start);
822 return false;
823 }
824
825 if (node->size != size) {
826 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
827 n, size, node->size);
828 return false;
829 }
830
831 if (node->hole_follows && drm_mm_hole_node_end(node) < end) {
832 pr_err("node %d is followed by a hole!\n", n);
833 return false;
834 }
835
836 n++;
837 }
838
839 drm_mm_for_each_node_in_range(node, mm, 0, start) {
840 if (node) {
841 pr_err("node before start: node=%llx+%llu, start=%llx\n",
842 node->start, node->size, start);
843 return false;
844 }
845 }
846
847 drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
848 if (node) {
849 pr_err("node after end: node=%llx+%llu, end=%llx\n",
850 node->start, node->size, end);
851 return false;
852 }
853 }
854
855 return true;
856}
857
858static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
859{
860 const struct insert_mode *mode;
861 struct drm_mm mm;
862 struct drm_mm_node *nodes, *node, *next;
863 unsigned int n, start_n, end_n;
864 int ret;
865
866 DRM_MM_BUG_ON(!count);
867 DRM_MM_BUG_ON(!size);
868 DRM_MM_BUG_ON(end <= start);
869
870 /* Very similar to __igt_insert(), but now instead of populating the
871 * full range of the drm_mm, we try to fill a small portion of it.
872 */
873
874 ret = -ENOMEM;
875 nodes = vzalloc(count * sizeof(*nodes));
876 if (!nodes)
877 goto err;
878
879 ret = -EINVAL;
880 drm_mm_init(&mm, 0, count * size);
881
882 start_n = div64_u64(start + size - 1, size);
883 end_n = div64_u64(end - size, size);
884
885 for (mode = insert_modes; mode->name; mode++) {
886 for (n = start_n; n <= end_n; n++) {
887 if (!expect_insert_in_range(&mm, &nodes[n],
888 size, size, n,
889 start, end, mode)) {
890 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
891 mode->name, size, n,
892 start_n, end_n,
893 start, end);
894 goto out;
895 }
896 }
897
898 if (!assert_contiguous_in_range(&mm, size, start, end)) {
899 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
900 mode->name, start, end, size);
901 goto out;
902 }
903
904 /* Remove one and reinsert, it should refill itself */
905 for (n = start_n; n <= end_n; n++) {
906 u64 addr = nodes[n].start;
907
908 drm_mm_remove_node(&nodes[n]);
909 if (!expect_insert_in_range(&mm, &nodes[n],
910 size, size, n,
911 start, end, mode)) {
912 pr_err("%s reinsert failed, step %d\n", mode->name, n);
913 goto out;
914 }
915
916 if (nodes[n].start != addr) {
917 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
918 mode->name, n, addr, nodes[n].start);
919 goto out;
920 }
921 }
922
923 if (!assert_contiguous_in_range(&mm, size, start, end)) {
924 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
925 mode->name, start, end, size);
926 goto out;
927 }
928
929 drm_mm_for_each_node_safe(node, next, &mm)
930 drm_mm_remove_node(node);
931 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
932 }
933
934 ret = 0;
935out:
936 drm_mm_for_each_node_safe(node, next, &mm)
937 drm_mm_remove_node(node);
938 drm_mm_takedown(&mm);
939 vfree(nodes);
940err:
941 return ret;
942}
943
944static int insert_outside_range(void)
945{
946 struct drm_mm mm;
947 const unsigned int start = 1024;
948 const unsigned int end = 2048;
949 const unsigned int size = end - start;
950
951 drm_mm_init(&mm, start, size);
952
953 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
954 return -EINVAL;
955
956 if (!expect_insert_in_range_fail(&mm, size,
957 start - size/2, start + (size+1)/2))
958 return -EINVAL;
959
960 if (!expect_insert_in_range_fail(&mm, size,
961 end - (size+1)/2, end + size/2))
962 return -EINVAL;
963
964 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
965 return -EINVAL;
966
967 drm_mm_takedown(&mm);
968 return 0;
969}
970
971static int igt_insert_range(void *ignored)
972{
973 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
974 unsigned int n;
975 int ret;
976
977 /* Check that requests outside the bounds of drm_mm are rejected. */
978 ret = insert_outside_range();
979 if (ret)
980 return ret;
981
982 for_each_prime_number_from(n, 1, 50) {
983 const u64 size = BIT_ULL(n);
984 const u64 max = count * size;
985
986 ret = __igt_insert_range(count, size, 0, max);
987 if (ret)
988 return ret;
989
990 ret = __igt_insert_range(count, size, 1, max);
991 if (ret)
992 return ret;
993
994 ret = __igt_insert_range(count, size, 0, max - 1);
995 if (ret)
996 return ret;
997
998 ret = __igt_insert_range(count, size, 0, max/2);
999 if (ret)
1000 return ret;
1001
1002 ret = __igt_insert_range(count, size, max/2, max);
1003 if (ret)
1004 return ret;
1005
1006 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1007 if (ret)
1008 return ret;
1009 }
1010
1011 return 0;
1012}
1013
Chris Wilson9b26f2e2016-12-22 08:36:16 +00001014static int igt_align(void *ignored)
1015{
1016 const struct insert_mode *mode;
1017 const unsigned int max_count = min(8192u, max_prime);
1018 struct drm_mm mm;
1019 struct drm_mm_node *nodes, *node, *next;
1020 unsigned int prime;
1021 int ret = -EINVAL;
1022
1023 /* For each of the possible insertion modes, we pick a few
1024 * arbitrary alignments and check that the inserted node
1025 * meets our requirements.
1026 */
1027
1028 nodes = vzalloc(max_count * sizeof(*nodes));
1029 if (!nodes)
1030 goto err;
1031
1032 drm_mm_init(&mm, 1, U64_MAX - 2);
1033
1034 for (mode = insert_modes; mode->name; mode++) {
1035 unsigned int i = 0;
1036
1037 for_each_prime_number_from(prime, 1, max_count) {
1038 u64 size = next_prime_number(prime);
1039
1040 if (!expect_insert(&mm, &nodes[i],
1041 size, prime, i,
1042 mode)) {
1043 pr_err("%s insert failed with alignment=%d",
1044 mode->name, prime);
1045 goto out;
1046 }
1047
1048 i++;
1049 }
1050
1051 drm_mm_for_each_node_safe(node, next, &mm)
1052 drm_mm_remove_node(node);
1053 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1054 }
1055
1056 ret = 0;
1057out:
1058 drm_mm_for_each_node_safe(node, next, &mm)
1059 drm_mm_remove_node(node);
1060 drm_mm_takedown(&mm);
1061 vfree(nodes);
1062err:
1063 return ret;
1064}
1065
1066static int igt_align_pot(int max)
1067{
1068 struct drm_mm mm;
1069 struct drm_mm_node *node, *next;
1070 int bit;
1071 int ret = -EINVAL;
1072
1073 /* Check that we can align to the full u64 address space */
1074
1075 drm_mm_init(&mm, 1, U64_MAX - 2);
1076
1077 for (bit = max - 1; bit; bit--) {
1078 u64 align, size;
1079
1080 node = kzalloc(sizeof(*node), GFP_KERNEL);
1081 if (!node) {
1082 ret = -ENOMEM;
1083 goto out;
1084 }
1085
1086 align = BIT_ULL(bit);
1087 size = BIT_ULL(bit-1) + 1;
1088 if (!expect_insert(&mm, node,
1089 size, align, bit,
1090 &insert_modes[0])) {
1091 pr_err("insert failed with alignment=%llx [%d]",
1092 align, bit);
1093 goto out;
1094 }
1095 }
1096
1097 ret = 0;
1098out:
1099 drm_mm_for_each_node_safe(node, next, &mm) {
1100 drm_mm_remove_node(node);
1101 kfree(node);
1102 }
1103 drm_mm_takedown(&mm);
1104 return ret;
1105}
1106
1107static int igt_align32(void *ignored)
1108{
1109 return igt_align_pot(32);
1110}
1111
1112static int igt_align64(void *ignored)
1113{
1114 return igt_align_pot(64);
1115}
1116
Chris Wilson9a71e272016-12-22 08:36:29 +00001117static void show_scan(const struct drm_mm_scan *scan)
Chris Wilson560b3282016-12-22 08:36:17 +00001118{
Chris Wilson71733202016-12-22 08:36:24 +00001119 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001120 scan->hit_start, scan->hit_end,
1121 scan->size, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001122}
1123
1124static void show_holes(const struct drm_mm *mm, int count)
1125{
1126 u64 hole_start, hole_end;
1127 struct drm_mm_node *hole;
1128
1129 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1130 struct drm_mm_node *next = list_next_entry(hole, node_list);
1131 const char *node1 = NULL, *node2 = NULL;
1132
1133 if (hole->allocated)
1134 node1 = kasprintf(GFP_KERNEL,
1135 "[%llx + %lld, color=%ld], ",
1136 hole->start, hole->size, hole->color);
1137
1138 if (next->allocated)
1139 node2 = kasprintf(GFP_KERNEL,
1140 ", [%llx + %lld, color=%ld]",
1141 next->start, next->size, next->color);
1142
1143 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1144 node1,
1145 hole_start, hole_end, hole_end - hole_start,
1146 node2);
1147
1148 kfree(node2);
1149 kfree(node1);
1150
1151 if (!--count)
1152 break;
1153 }
1154}
1155
1156struct evict_node {
1157 struct drm_mm_node node;
1158 struct list_head link;
1159};
1160
Chris Wilson9a71e272016-12-22 08:36:29 +00001161static bool evict_nodes(struct drm_mm_scan *scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001162 struct evict_node *nodes,
1163 unsigned int *order,
1164 unsigned int count,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001165 bool use_color,
Chris Wilson560b3282016-12-22 08:36:17 +00001166 struct list_head *evict_list)
1167{
1168 struct evict_node *e, *en;
1169 unsigned int i;
1170
1171 for (i = 0; i < count; i++) {
1172 e = &nodes[order ? order[i] : i];
1173 list_add(&e->link, evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001174 if (drm_mm_scan_add_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001175 break;
1176 }
1177 list_for_each_entry_safe(e, en, evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001178 if (!drm_mm_scan_remove_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001179 list_del(&e->link);
1180 }
1181 if (list_empty(evict_list)) {
Chris Wilson71733202016-12-22 08:36:24 +00001182 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001183 scan->size, count, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001184 return false;
1185 }
1186
1187 list_for_each_entry(e, evict_list, link)
1188 drm_mm_remove_node(&e->node);
1189
Chris Wilson3fa489d2016-12-22 08:36:36 +00001190 if (use_color) {
1191 struct drm_mm_node *node;
1192
1193 while ((node = drm_mm_scan_color_evict(scan))) {
1194 e = container_of(node, typeof(*e), node);
1195 drm_mm_remove_node(&e->node);
1196 list_add(&e->link, evict_list);
1197 }
1198 } else {
1199 if (drm_mm_scan_color_evict(scan)) {
1200 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1201 return false;
1202 }
1203 }
1204
Chris Wilson560b3282016-12-22 08:36:17 +00001205 return true;
1206}
1207
1208static bool evict_nothing(struct drm_mm *mm,
1209 unsigned int total_size,
1210 struct evict_node *nodes)
1211{
Chris Wilson9a71e272016-12-22 08:36:29 +00001212 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001213 LIST_HEAD(evict_list);
1214 struct evict_node *e;
1215 struct drm_mm_node *node;
1216 unsigned int n;
1217
Chris Wilson0b04d472016-12-22 08:36:33 +00001218 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001219 for (n = 0; n < total_size; n++) {
1220 e = &nodes[n];
1221 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001222 drm_mm_scan_add_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001223 }
1224 list_for_each_entry(e, &evict_list, link)
Chris Wilson9a71e272016-12-22 08:36:29 +00001225 drm_mm_scan_remove_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001226
1227 for (n = 0; n < total_size; n++) {
1228 e = &nodes[n];
1229
1230 if (!drm_mm_node_allocated(&e->node)) {
1231 pr_err("node[%d] no longer allocated!\n", n);
1232 return false;
1233 }
1234
1235 e->link.next = NULL;
1236 }
1237
1238 drm_mm_for_each_node(node, mm) {
1239 e = container_of(node, typeof(*e), node);
1240 e->link.next = &e->link;
1241 }
1242
1243 for (n = 0; n < total_size; n++) {
1244 e = &nodes[n];
1245
1246 if (!e->link.next) {
1247 pr_err("node[%d] no longer connected!\n", n);
1248 return false;
1249 }
1250 }
1251
1252 return assert_continuous(mm, nodes[0].node.size);
1253}
1254
1255static bool evict_everything(struct drm_mm *mm,
1256 unsigned int total_size,
1257 struct evict_node *nodes)
1258{
Chris Wilson9a71e272016-12-22 08:36:29 +00001259 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001260 LIST_HEAD(evict_list);
1261 struct evict_node *e;
1262 unsigned int n;
1263 int err;
1264
Chris Wilson0b04d472016-12-22 08:36:33 +00001265 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001266 for (n = 0; n < total_size; n++) {
1267 e = &nodes[n];
1268 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001269 if (drm_mm_scan_add_block(&scan, &e->node))
1270 break;
Chris Wilson560b3282016-12-22 08:36:17 +00001271 }
1272 list_for_each_entry(e, &evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001273 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001274 pr_err("Node %lld not marked for eviction!\n",
1275 e->node.start);
1276 list_del(&e->link);
1277 }
1278 }
1279
1280 list_for_each_entry(e, &evict_list, link)
1281 drm_mm_remove_node(&e->node);
1282
1283 if (!assert_one_hole(mm, 0, total_size))
1284 return false;
1285
1286 list_for_each_entry(e, &evict_list, link) {
1287 err = drm_mm_reserve_node(mm, &e->node);
1288 if (err) {
1289 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1290 e->node.start);
1291 return false;
1292 }
1293 }
1294
1295 return assert_continuous(mm, nodes[0].node.size);
1296}
1297
1298static int evict_something(struct drm_mm *mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001299 u64 range_start, u64 range_end,
Chris Wilson560b3282016-12-22 08:36:17 +00001300 struct evict_node *nodes,
1301 unsigned int *order,
1302 unsigned int count,
1303 unsigned int size,
1304 unsigned int alignment,
1305 const struct insert_mode *mode)
1306{
Chris Wilson9a71e272016-12-22 08:36:29 +00001307 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001308 LIST_HEAD(evict_list);
1309 struct evict_node *e;
1310 struct drm_mm_node tmp;
1311 int err;
1312
Chris Wilson9a71e272016-12-22 08:36:29 +00001313 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001314 size, alignment, 0,
Chris Wilson0b04d472016-12-22 08:36:33 +00001315 range_start, range_end,
1316 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001317 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001318 nodes, order, count, false,
Chris Wilson560b3282016-12-22 08:36:17 +00001319 &evict_list))
1320 return -EINVAL;
1321
1322 memset(&tmp, 0, sizeof(tmp));
1323 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1324 mode->search_flags,
1325 mode->create_flags);
1326 if (err) {
1327 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1328 size, alignment);
Chris Wilson9a71e272016-12-22 08:36:29 +00001329 show_scan(&scan);
Chris Wilson560b3282016-12-22 08:36:17 +00001330 show_holes(mm, 3);
1331 return err;
1332 }
1333
Chris Wilson0e483252016-12-22 08:36:18 +00001334 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1335 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1336 tmp.start, tmp.size, range_start, range_end);
1337 err = -EINVAL;
1338 }
1339
Chris Wilson560b3282016-12-22 08:36:17 +00001340 if (!assert_node(&tmp, mm, size, alignment, 0) || tmp.hole_follows) {
1341 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1342 tmp.size, size,
1343 alignment, misalignment(&tmp, alignment),
1344 tmp.start, tmp.hole_follows);
1345 err = -EINVAL;
1346 }
1347
1348 drm_mm_remove_node(&tmp);
1349 if (err)
1350 return err;
1351
1352 list_for_each_entry(e, &evict_list, link) {
1353 err = drm_mm_reserve_node(mm, &e->node);
1354 if (err) {
1355 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1356 e->node.start);
1357 return err;
1358 }
1359 }
1360
1361 if (!assert_continuous(mm, nodes[0].node.size)) {
1362 pr_err("range is no longer continuous\n");
1363 return -EINVAL;
1364 }
1365
1366 return 0;
1367}
1368
1369static int igt_evict(void *ignored)
1370{
1371 DRM_RND_STATE(prng, random_seed);
1372 const unsigned int size = 8192;
1373 const struct insert_mode *mode;
1374 struct drm_mm mm;
1375 struct evict_node *nodes;
1376 struct drm_mm_node *node, *next;
1377 unsigned int *order, n;
1378 int ret, err;
1379
1380 /* Here we populate a full drm_mm and then try and insert a new node
1381 * by evicting other nodes in a random order. The drm_mm_scan should
1382 * pick the first matching hole it finds from the random list. We
1383 * repeat that for different allocation strategies, alignments and
1384 * sizes to try and stress the hole finder.
1385 */
1386
1387 ret = -ENOMEM;
1388 nodes = vzalloc(size * sizeof(*nodes));
1389 if (!nodes)
1390 goto err;
1391
1392 order = drm_random_order(size, &prng);
1393 if (!order)
1394 goto err_nodes;
1395
1396 ret = -EINVAL;
1397 drm_mm_init(&mm, 0, size);
1398 for (n = 0; n < size; n++) {
1399 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1400 DRM_MM_SEARCH_DEFAULT);
1401 if (err) {
1402 pr_err("insert failed, step %d\n", n);
1403 ret = err;
1404 goto out;
1405 }
1406 }
1407
1408 /* First check that using the scanner doesn't break the mm */
1409 if (!evict_nothing(&mm, size, nodes)) {
1410 pr_err("evict_nothing() failed\n");
1411 goto out;
1412 }
1413 if (!evict_everything(&mm, size, nodes)) {
1414 pr_err("evict_everything() failed\n");
1415 goto out;
1416 }
1417
1418 for (mode = evict_modes; mode->name; mode++) {
1419 for (n = 1; n <= size; n <<= 1) {
1420 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001421 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001422 nodes, order, size,
1423 n, 1,
1424 mode);
1425 if (err) {
1426 pr_err("%s evict_something(size=%u) failed\n",
1427 mode->name, n);
1428 ret = err;
1429 goto out;
1430 }
1431 }
1432
1433 for (n = 1; n < size; n <<= 1) {
1434 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001435 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001436 nodes, order, size,
1437 size/2, n,
1438 mode);
1439 if (err) {
1440 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1441 mode->name, size/2, n);
1442 ret = err;
1443 goto out;
1444 }
1445 }
1446
1447 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1448 unsigned int nsize = (size - n + 1) / 2;
1449
1450 DRM_MM_BUG_ON(!nsize);
1451
1452 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001453 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001454 nodes, order, size,
1455 nsize, n,
1456 mode);
1457 if (err) {
1458 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1459 mode->name, nsize, n);
1460 ret = err;
1461 goto out;
1462 }
1463 }
1464 }
1465
1466 ret = 0;
1467out:
1468 drm_mm_for_each_node_safe(node, next, &mm)
1469 drm_mm_remove_node(node);
1470 drm_mm_takedown(&mm);
1471 kfree(order);
1472err_nodes:
1473 vfree(nodes);
1474err:
1475 return ret;
1476}
1477
Chris Wilson0e483252016-12-22 08:36:18 +00001478static int igt_evict_range(void *ignored)
1479{
1480 DRM_RND_STATE(prng, random_seed);
1481 const unsigned int size = 8192;
1482 const unsigned int range_size = size / 2;
1483 const unsigned int range_start = size / 4;
1484 const unsigned int range_end = range_start + range_size;
1485 const struct insert_mode *mode;
1486 struct drm_mm mm;
1487 struct evict_node *nodes;
1488 struct drm_mm_node *node, *next;
1489 unsigned int *order, n;
1490 int ret, err;
1491
1492 /* Like igt_evict() but now we are limiting the search to a
1493 * small portion of the full drm_mm.
1494 */
1495
1496 ret = -ENOMEM;
1497 nodes = vzalloc(size * sizeof(*nodes));
1498 if (!nodes)
1499 goto err;
1500
1501 order = drm_random_order(size, &prng);
1502 if (!order)
1503 goto err_nodes;
1504
1505 ret = -EINVAL;
1506 drm_mm_init(&mm, 0, size);
1507 for (n = 0; n < size; n++) {
1508 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1509 DRM_MM_SEARCH_DEFAULT);
1510 if (err) {
1511 pr_err("insert failed, step %d\n", n);
1512 ret = err;
1513 goto out;
1514 }
1515 }
1516
1517 for (mode = evict_modes; mode->name; mode++) {
1518 for (n = 1; n <= range_size; n <<= 1) {
1519 drm_random_reorder(order, size, &prng);
1520 err = evict_something(&mm, range_start, range_end,
1521 nodes, order, size,
1522 n, 1,
1523 mode);
1524 if (err) {
1525 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1526 mode->name, n, range_start, range_end);
1527 goto out;
1528 }
1529 }
1530
1531 for (n = 1; n <= range_size; n <<= 1) {
1532 drm_random_reorder(order, size, &prng);
1533 err = evict_something(&mm, range_start, range_end,
1534 nodes, order, size,
1535 range_size/2, n,
1536 mode);
1537 if (err) {
1538 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1539 mode->name, range_size/2, n, range_start, range_end);
1540 goto out;
1541 }
1542 }
1543
1544 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1545 unsigned int nsize = (range_size - n + 1) / 2;
1546
1547 DRM_MM_BUG_ON(!nsize);
1548
1549 drm_random_reorder(order, size, &prng);
1550 err = evict_something(&mm, range_start, range_end,
1551 nodes, order, size,
1552 nsize, n,
1553 mode);
1554 if (err) {
1555 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1556 mode->name, nsize, n, range_start, range_end);
1557 goto out;
1558 }
1559 }
1560 }
1561
1562 ret = 0;
1563out:
1564 drm_mm_for_each_node_safe(node, next, &mm)
1565 drm_mm_remove_node(node);
1566 drm_mm_takedown(&mm);
1567 kfree(order);
1568err_nodes:
1569 vfree(nodes);
1570err:
1571 return ret;
1572}
1573
Chris Wilson05ab3c22016-12-22 08:36:19 +00001574static unsigned int node_index(const struct drm_mm_node *node)
1575{
1576 return div64_u64(node->start, node->size);
1577}
1578
1579static int igt_topdown(void *ignored)
1580{
1581 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1582 DRM_RND_STATE(prng, random_seed);
1583 const unsigned int count = 8192;
1584 unsigned int size;
1585 unsigned long *bitmap = NULL;
1586 struct drm_mm mm;
1587 struct drm_mm_node *nodes, *node, *next;
1588 unsigned int *order, n, m, o = 0;
1589 int ret;
1590
1591 /* When allocating top-down, we expect to be returned a node
1592 * from a suitable hole at the top of the drm_mm. We check that
1593 * the returned node does match the highest available slot.
1594 */
1595
1596 ret = -ENOMEM;
1597 nodes = vzalloc(count * sizeof(*nodes));
1598 if (!nodes)
1599 goto err;
1600
1601 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1602 GFP_TEMPORARY);
1603 if (!bitmap)
1604 goto err_nodes;
1605
1606 order = drm_random_order(count, &prng);
1607 if (!order)
1608 goto err_bitmap;
1609
1610 ret = -EINVAL;
1611 for (size = 1; size <= 64; size <<= 1) {
1612 drm_mm_init(&mm, 0, size*count);
1613 for (n = 0; n < count; n++) {
1614 if (!expect_insert(&mm, &nodes[n],
1615 size, 0, n,
1616 topdown)) {
1617 pr_err("insert failed, size %u step %d\n", size, n);
1618 goto out;
1619 }
1620
1621 if (nodes[n].hole_follows) {
1622 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1623 n, nodes[n].start, size);
1624 goto out;
1625 }
1626
1627 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1628 goto out;
1629 }
1630
1631 if (!assert_continuous(&mm, size))
1632 goto out;
1633
1634 drm_random_reorder(order, count, &prng);
1635 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1636 for (m = 0; m < n; m++) {
1637 node = &nodes[order[(o + m) % count]];
1638 drm_mm_remove_node(node);
1639 __set_bit(node_index(node), bitmap);
1640 }
1641
1642 for (m = 0; m < n; m++) {
1643 unsigned int last;
1644
1645 node = &nodes[order[(o + m) % count]];
1646 if (!expect_insert(&mm, node,
1647 size, 0, 0,
1648 topdown)) {
1649 pr_err("insert failed, step %d/%d\n", m, n);
1650 goto out;
1651 }
1652
1653 if (node->hole_follows) {
1654 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1655 m, n, node->start);
1656 goto out;
1657 }
1658
1659 last = find_last_bit(bitmap, count);
1660 if (node_index(node) != last) {
1661 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1662 m, n, size, last, node_index(node));
1663 goto out;
1664 }
1665
1666 __clear_bit(last, bitmap);
1667 }
1668
1669 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1670
1671 o += n;
1672 }
1673
1674 drm_mm_for_each_node_safe(node, next, &mm)
1675 drm_mm_remove_node(node);
1676 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1677 }
1678
1679 ret = 0;
1680out:
1681 drm_mm_for_each_node_safe(node, next, &mm)
1682 drm_mm_remove_node(node);
1683 drm_mm_takedown(&mm);
1684 kfree(order);
1685err_bitmap:
1686 kfree(bitmap);
1687err_nodes:
1688 vfree(nodes);
1689err:
1690 return ret;
1691}
1692
Chris Wilson4c2ba552016-12-22 08:36:20 +00001693static void separate_adjacent_colors(const struct drm_mm_node *node,
1694 unsigned long color,
1695 u64 *start,
1696 u64 *end)
1697{
1698 if (node->allocated && node->color != color)
1699 ++*start;
1700
1701 node = list_next_entry(node, node_list);
1702 if (node->allocated && node->color != color)
1703 --*end;
1704}
1705
1706static bool colors_abutt(const struct drm_mm_node *node)
1707{
1708 if (!node->hole_follows &&
1709 list_next_entry(node, node_list)->allocated) {
1710 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1711 node->color, node->start, node->size,
1712 list_next_entry(node, node_list)->color,
1713 list_next_entry(node, node_list)->start,
1714 list_next_entry(node, node_list)->size);
1715 return true;
1716 }
1717
1718 return false;
1719}
1720
1721static int igt_color(void *ignored)
1722{
1723 const unsigned int count = min(4096u, max_iterations);
1724 const struct insert_mode *mode;
1725 struct drm_mm mm;
1726 struct drm_mm_node *node, *nn;
1727 unsigned int n;
1728 int ret = -EINVAL, err;
1729
1730 /* Color adjustment complicates everything. First we just check
1731 * that when we insert a node we apply any color_adjustment callback.
1732 * The callback we use should ensure that there is a gap between
1733 * any two nodes, and so after each insertion we check that those
1734 * holes are inserted and that they are preserved.
1735 */
1736
1737 drm_mm_init(&mm, 0, U64_MAX);
1738
1739 for (n = 1; n <= count; n++) {
1740 node = kzalloc(sizeof(*node), GFP_KERNEL);
1741 if (!node) {
1742 ret = -ENOMEM;
1743 goto out;
1744 }
1745
1746 if (!expect_insert(&mm, node,
1747 n, 0, n,
1748 &insert_modes[0])) {
1749 pr_err("insert failed, step %d\n", n);
1750 kfree(node);
1751 goto out;
1752 }
1753 }
1754
1755 drm_mm_for_each_node_safe(node, nn, &mm) {
1756 if (node->color != node->size) {
1757 pr_err("invalid color stored: expected %lld, found %ld\n",
1758 node->size, node->color);
1759
1760 goto out;
1761 }
1762
1763 drm_mm_remove_node(node);
1764 kfree(node);
1765 }
1766
1767 /* Now, let's start experimenting with applying a color callback */
1768 mm.color_adjust = separate_adjacent_colors;
1769 for (mode = insert_modes; mode->name; mode++) {
1770 u64 last;
1771
1772 node = kzalloc(sizeof(*node), GFP_KERNEL);
1773 if (!node) {
1774 ret = -ENOMEM;
1775 goto out;
1776 }
1777
1778 node->size = 1 + 2*count;
1779 node->color = node->size;
1780
1781 err = drm_mm_reserve_node(&mm, node);
1782 if (err) {
1783 pr_err("initial reserve failed!\n");
1784 ret = err;
1785 goto out;
1786 }
1787
1788 last = node->start + node->size;
1789
1790 for (n = 1; n <= count; n++) {
1791 int rem;
1792
1793 node = kzalloc(sizeof(*node), GFP_KERNEL);
1794 if (!node) {
1795 ret = -ENOMEM;
1796 goto out;
1797 }
1798
1799 node->start = last;
1800 node->size = n + count;
1801 node->color = node->size;
1802
1803 err = drm_mm_reserve_node(&mm, node);
1804 if (err != -ENOSPC) {
1805 pr_err("reserve %d did not report color overlap! err=%d\n",
1806 n, err);
1807 goto out;
1808 }
1809
1810 node->start += n + 1;
1811 rem = misalignment(node, n + count);
1812 node->start += n + count - rem;
1813
1814 err = drm_mm_reserve_node(&mm, node);
1815 if (err) {
1816 pr_err("reserve %d failed, err=%d\n", n, err);
1817 ret = err;
1818 goto out;
1819 }
1820
1821 last = node->start + node->size;
1822 }
1823
1824 for (n = 1; n <= count; n++) {
1825 node = kzalloc(sizeof(*node), GFP_KERNEL);
1826 if (!node) {
1827 ret = -ENOMEM;
1828 goto out;
1829 }
1830
1831 if (!expect_insert(&mm, node,
1832 n, n, n,
1833 mode)) {
1834 pr_err("%s insert failed, step %d\n",
1835 mode->name, n);
1836 kfree(node);
1837 goto out;
1838 }
1839 }
1840
1841 drm_mm_for_each_node_safe(node, nn, &mm) {
1842 u64 rem;
1843
1844 if (node->color != node->size) {
1845 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1846 mode->name, node->size, node->color);
1847
1848 goto out;
1849 }
1850
1851 if (colors_abutt(node))
1852 goto out;
1853
1854 div64_u64_rem(node->start, node->size, &rem);
1855 if (rem) {
1856 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1857 mode->name, node->start, node->size, rem);
1858 goto out;
1859 }
1860
1861 drm_mm_remove_node(node);
1862 kfree(node);
1863 }
1864 }
1865
1866 ret = 0;
1867out:
1868 drm_mm_for_each_node_safe(node, nn, &mm) {
1869 drm_mm_remove_node(node);
1870 kfree(node);
1871 }
1872 drm_mm_takedown(&mm);
1873 return ret;
1874}
1875
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001876static int evict_color(struct drm_mm *mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001877 u64 range_start, u64 range_end,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001878 struct evict_node *nodes,
1879 unsigned int *order,
1880 unsigned int count,
1881 unsigned int size,
1882 unsigned int alignment,
1883 unsigned long color,
1884 const struct insert_mode *mode)
1885{
Chris Wilson9a71e272016-12-22 08:36:29 +00001886 struct drm_mm_scan scan;
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001887 LIST_HEAD(evict_list);
1888 struct evict_node *e;
1889 struct drm_mm_node tmp;
1890 int err;
1891
Chris Wilson9a71e272016-12-22 08:36:29 +00001892 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001893 size, alignment, color,
Chris Wilson0b04d472016-12-22 08:36:33 +00001894 range_start, range_end,
1895 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001896 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001897 nodes, order, count, true,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001898 &evict_list))
1899 return -EINVAL;
1900
1901 memset(&tmp, 0, sizeof(tmp));
1902 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1903 mode->search_flags,
1904 mode->create_flags);
1905 if (err) {
1906 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1907 size, alignment, color, err);
Chris Wilson9a71e272016-12-22 08:36:29 +00001908 show_scan(&scan);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001909 show_holes(mm, 3);
1910 return err;
1911 }
1912
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001913 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1914 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1915 tmp.start, tmp.size, range_start, range_end);
1916 err = -EINVAL;
1917 }
1918
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001919 if (colors_abutt(&tmp))
1920 err = -EINVAL;
1921
1922 if (!assert_node(&tmp, mm, size, alignment, color)) {
1923 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
1924 tmp.size, size,
1925 alignment, misalignment(&tmp, alignment), tmp.start);
1926 err = -EINVAL;
1927 }
1928
1929 drm_mm_remove_node(&tmp);
1930 if (err)
1931 return err;
1932
1933 list_for_each_entry(e, &evict_list, link) {
1934 err = drm_mm_reserve_node(mm, &e->node);
1935 if (err) {
1936 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1937 e->node.start);
1938 return err;
1939 }
1940 }
1941
1942 return 0;
1943}
1944
1945static int igt_color_evict(void *ignored)
1946{
1947 DRM_RND_STATE(prng, random_seed);
1948 const unsigned int total_size = min(8192u, max_iterations);
1949 const struct insert_mode *mode;
1950 unsigned long color = 0;
1951 struct drm_mm mm;
1952 struct evict_node *nodes;
1953 struct drm_mm_node *node, *next;
1954 unsigned int *order, n;
1955 int ret, err;
1956
1957 /* Check that the drm_mm_scan also honours color adjustment when
1958 * choosing its victims to create a hole. Our color_adjust does not
1959 * allow two nodes to be placed together without an intervening hole
1960 * enlarging the set of victims that must be evicted.
1961 */
1962
1963 ret = -ENOMEM;
1964 nodes = vzalloc(total_size * sizeof(*nodes));
1965 if (!nodes)
1966 goto err;
1967
1968 order = drm_random_order(total_size, &prng);
1969 if (!order)
1970 goto err_nodes;
1971
1972 ret = -EINVAL;
1973 drm_mm_init(&mm, 0, 2*total_size - 1);
1974 mm.color_adjust = separate_adjacent_colors;
1975 for (n = 0; n < total_size; n++) {
1976 if (!expect_insert(&mm, &nodes[n].node,
1977 1, 0, color++,
1978 &insert_modes[0])) {
1979 pr_err("insert failed, step %d\n", n);
1980 goto out;
1981 }
1982 }
1983
1984 for (mode = evict_modes; mode->name; mode++) {
1985 for (n = 1; n <= total_size; n <<= 1) {
1986 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001987 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001988 nodes, order, total_size,
1989 n, 1, color++,
1990 mode);
1991 if (err) {
1992 pr_err("%s evict_color(size=%u) failed\n",
1993 mode->name, n);
1994 goto out;
1995 }
1996 }
1997
1998 for (n = 1; n < total_size; n <<= 1) {
1999 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002000 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002001 nodes, order, total_size,
2002 total_size/2, n, color++,
2003 mode);
2004 if (err) {
2005 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2006 mode->name, total_size/2, n);
2007 goto out;
2008 }
2009 }
2010
2011 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2012 unsigned int nsize = (total_size - n + 1) / 2;
2013
2014 DRM_MM_BUG_ON(!nsize);
2015
2016 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002017 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002018 nodes, order, total_size,
2019 nsize, n, color++,
2020 mode);
2021 if (err) {
2022 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2023 mode->name, nsize, n);
2024 goto out;
2025 }
2026 }
2027 }
2028
2029 ret = 0;
2030out:
2031 if (ret)
2032 drm_mm_debug_table(&mm, __func__);
2033 drm_mm_for_each_node_safe(node, next, &mm)
2034 drm_mm_remove_node(node);
2035 drm_mm_takedown(&mm);
2036 kfree(order);
2037err_nodes:
2038 vfree(nodes);
2039err:
2040 return ret;
2041}
2042
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002043static int igt_color_evict_range(void *ignored)
2044{
2045 DRM_RND_STATE(prng, random_seed);
2046 const unsigned int total_size = 8192;
2047 const unsigned int range_size = total_size / 2;
2048 const unsigned int range_start = total_size / 4;
2049 const unsigned int range_end = range_start + range_size;
2050 const struct insert_mode *mode;
2051 unsigned long color = 0;
2052 struct drm_mm mm;
2053 struct evict_node *nodes;
2054 struct drm_mm_node *node, *next;
2055 unsigned int *order, n;
2056 int ret, err;
2057
2058 /* Like igt_color_evict(), but limited to small portion of the full
2059 * drm_mm range.
2060 */
2061
2062 ret = -ENOMEM;
2063 nodes = vzalloc(total_size * sizeof(*nodes));
2064 if (!nodes)
2065 goto err;
2066
2067 order = drm_random_order(total_size, &prng);
2068 if (!order)
2069 goto err_nodes;
2070
2071 ret = -EINVAL;
2072 drm_mm_init(&mm, 0, 2*total_size - 1);
2073 mm.color_adjust = separate_adjacent_colors;
2074 for (n = 0; n < total_size; n++) {
2075 if (!expect_insert(&mm, &nodes[n].node,
2076 1, 0, color++,
2077 &insert_modes[0])) {
2078 pr_err("insert failed, step %d\n", n);
2079 goto out;
2080 }
2081 }
2082
2083 for (mode = evict_modes; mode->name; mode++) {
2084 for (n = 1; n <= range_size; n <<= 1) {
2085 drm_random_reorder(order, range_size, &prng);
2086 err = evict_color(&mm, range_start, range_end,
2087 nodes, order, total_size,
2088 n, 1, color++,
2089 mode);
2090 if (err) {
2091 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2092 mode->name, n, range_start, range_end);
2093 goto out;
2094 }
2095 }
2096
2097 for (n = 1; n < range_size; n <<= 1) {
2098 drm_random_reorder(order, total_size, &prng);
2099 err = evict_color(&mm, range_start, range_end,
2100 nodes, order, total_size,
2101 range_size/2, n, color++,
2102 mode);
2103 if (err) {
2104 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2105 mode->name, total_size/2, n, range_start, range_end);
2106 goto out;
2107 }
2108 }
2109
2110 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2111 unsigned int nsize = (range_size - n + 1) / 2;
2112
2113 DRM_MM_BUG_ON(!nsize);
2114
2115 drm_random_reorder(order, total_size, &prng);
2116 err = evict_color(&mm, range_start, range_end,
2117 nodes, order, total_size,
2118 nsize, n, color++,
2119 mode);
2120 if (err) {
2121 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2122 mode->name, nsize, n, range_start, range_end);
2123 goto out;
2124 }
2125 }
2126 }
2127
2128 ret = 0;
2129out:
2130 if (ret)
2131 drm_mm_debug_table(&mm, __func__);
2132 drm_mm_for_each_node_safe(node, next, &mm)
2133 drm_mm_remove_node(node);
2134 drm_mm_takedown(&mm);
2135 kfree(order);
2136err_nodes:
2137 vfree(nodes);
2138err:
2139 return ret;
2140}
2141
Chris Wilson50f00332016-12-22 08:36:09 +00002142#include "drm_selftest.c"
2143
2144static int __init test_drm_mm_init(void)
2145{
2146 int err;
2147
2148 while (!random_seed)
2149 random_seed = get_random_int();
2150
2151 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2152 random_seed, max_iterations, max_prime);
2153 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2154
2155 return err > 0 ? 0 : err;
2156}
2157
2158static void __exit test_drm_mm_exit(void)
2159{
2160}
2161
2162module_init(test_drm_mm_init);
2163module_exit(test_drm_mm_exit);
2164
2165module_param(random_seed, uint, 0400);
2166module_param(max_iterations, uint, 0400);
2167module_param(max_prime, uint, 0400);
2168
2169MODULE_AUTHOR("Intel Corporation");
2170MODULE_LICENSE("GPL");