blob: 6df53e6c130841beff942a1d2b29aca6606e6982 [file] [log] [blame]
Chris Wilson50f00332016-12-22 08:36:09 +00001/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
Chris Wilson78866922016-12-22 08:36:13 +000024enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST,
28};
29
30static const struct insert_mode {
31 const char *name;
32 unsigned int search_flags;
33 unsigned int create_flags;
34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
38 {}
Chris Wilson560b3282016-12-22 08:36:17 +000039}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
42 {}
Chris Wilson78866922016-12-22 08:36:13 +000043};
44
Chris Wilson50f00332016-12-22 08:36:09 +000045static int igt_sanitycheck(void *ignored)
46{
47 pr_info("%s - ok!\n", __func__);
48 return 0;
49}
50
Chris Wilson393b50f2016-12-22 08:36:10 +000051static bool assert_no_holes(const struct drm_mm *mm)
52{
53 struct drm_mm_node *hole;
54 u64 hole_start, hole_end;
55 unsigned long count;
56
57 count = 0;
58 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
59 count++;
60 if (count) {
61 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
62 return false;
63 }
64
65 drm_mm_for_each_node(hole, mm) {
Chris Wilson3f85fb32016-12-22 08:36:37 +000066 if (drm_mm_hole_follows(hole)) {
Chris Wilson393b50f2016-12-22 08:36:10 +000067 pr_err("Hole follows node, expected none!\n");
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
76{
77 struct drm_mm_node *hole;
78 u64 hole_start, hole_end;
79 unsigned long count;
80 bool ok = true;
81
82 if (end <= start)
83 return true;
84
85 count = 0;
86 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
87 if (start != hole_start || end != hole_end) {
88 if (ok)
89 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
90 hole_start, hole_end,
91 start, end);
92 ok = false;
93 }
94 count++;
95 }
96 if (count != 1) {
97 pr_err("Expected to find one hole, found %lu instead\n", count);
98 ok = false;
99 }
100
101 return ok;
102}
103
Chris Wilson900537d2016-12-22 08:36:12 +0000104static bool assert_continuous(const struct drm_mm *mm, u64 size)
105{
106 struct drm_mm_node *node, *check, *found;
107 unsigned long n;
108 u64 addr;
109
110 if (!assert_no_holes(mm))
111 return false;
112
113 n = 0;
114 addr = 0;
115 drm_mm_for_each_node(node, mm) {
116 if (node->start != addr) {
117 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
118 n, addr, node->start);
119 return false;
120 }
121
122 if (node->size != size) {
123 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
124 n, size, node->size);
125 return false;
126 }
127
Chris Wilson3f85fb32016-12-22 08:36:37 +0000128 if (drm_mm_hole_follows(node)) {
Chris Wilson900537d2016-12-22 08:36:12 +0000129 pr_err("node[%ld] is followed by a hole!\n", n);
130 return false;
131 }
132
133 found = NULL;
134 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
135 if (node != check) {
136 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
137 node->start, check->start);
138 return false;
139 }
140 found = check;
141 }
142 if (!found) {
143 pr_err("lookup failed for node %llx + %llx\n",
144 addr, size);
145 return false;
146 }
147
148 addr += size;
149 n++;
150 }
151
152 return true;
153}
154
Chris Wilson78866922016-12-22 08:36:13 +0000155static u64 misalignment(struct drm_mm_node *node, u64 alignment)
156{
157 u64 rem;
158
159 if (!alignment)
160 return 0;
161
162 div64_u64_rem(node->start, alignment, &rem);
163 return rem;
164}
165
166static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
167 u64 size, u64 alignment, unsigned long color)
168{
169 bool ok = true;
170
171 if (!drm_mm_node_allocated(node) || node->mm != mm) {
172 pr_err("node not allocated\n");
173 ok = false;
174 }
175
176 if (node->size != size) {
177 pr_err("node has wrong size, found %llu, expected %llu\n",
178 node->size, size);
179 ok = false;
180 }
181
182 if (misalignment(node, alignment)) {
183 pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
184 node->start, misalignment(node, alignment), alignment);
185 ok = false;
186 }
187
188 if (node->color != color) {
189 pr_err("node has wrong color, found %lu, expected %lu\n",
190 node->color, color);
191 ok = false;
192 }
193
194 return ok;
195}
196
Daniel Vetterb5c37142016-12-29 12:09:24 +0100197#define show_mm(mm) do { \
198 struct drm_printer __p = drm_debug_printer(__func__); \
199 drm_mm_print((mm), &__p); } while (0)
200
Chris Wilson393b50f2016-12-22 08:36:10 +0000201static int igt_init(void *ignored)
202{
203 const unsigned int size = 4096;
204 struct drm_mm mm;
205 struct drm_mm_node tmp;
206 int ret = -EINVAL;
207
208 /* Start with some simple checks on initialising the struct drm_mm */
209 memset(&mm, 0, sizeof(mm));
210 if (drm_mm_initialized(&mm)) {
211 pr_err("zeroed mm claims to be initialized\n");
212 return ret;
213 }
214
215 memset(&mm, 0xff, sizeof(mm));
216 drm_mm_init(&mm, 0, size);
217 if (!drm_mm_initialized(&mm)) {
218 pr_err("mm claims not to be initialized\n");
219 goto out;
220 }
221
222 if (!drm_mm_clean(&mm)) {
223 pr_err("mm not empty on creation\n");
224 goto out;
225 }
226
227 /* After creation, it should all be one massive hole */
228 if (!assert_one_hole(&mm, 0, size)) {
229 ret = -EINVAL;
230 goto out;
231 }
232
233 memset(&tmp, 0, sizeof(tmp));
234 tmp.start = 0;
235 tmp.size = size;
236 ret = drm_mm_reserve_node(&mm, &tmp);
237 if (ret) {
238 pr_err("failed to reserve whole drm_mm\n");
239 goto out;
240 }
241
242 /* After filling the range entirely, there should be no holes */
243 if (!assert_no_holes(&mm)) {
244 ret = -EINVAL;
245 goto out;
246 }
247
248 /* And then after emptying it again, the massive hole should be back */
249 drm_mm_remove_node(&tmp);
250 if (!assert_one_hole(&mm, 0, size)) {
251 ret = -EINVAL;
252 goto out;
253 }
254
255out:
256 if (ret)
Daniel Vetterb5c37142016-12-29 12:09:24 +0100257 show_mm(&mm);
Chris Wilson393b50f2016-12-22 08:36:10 +0000258 drm_mm_takedown(&mm);
259 return ret;
260}
261
Chris Wilson06df8ac2016-12-22 08:36:11 +0000262static int igt_debug(void *ignored)
263{
264 struct drm_mm mm;
265 struct drm_mm_node nodes[2];
266 int ret;
267
268 /* Create a small drm_mm with a couple of nodes and a few holes, and
269 * check that the debug iterator doesn't explode over a trivial drm_mm.
270 */
271
272 drm_mm_init(&mm, 0, 4096);
273
274 memset(nodes, 0, sizeof(nodes));
275 nodes[0].start = 512;
276 nodes[0].size = 1024;
277 ret = drm_mm_reserve_node(&mm, &nodes[0]);
278 if (ret) {
279 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
280 nodes[0].start, nodes[0].size);
281 return ret;
282 }
283
284 nodes[1].size = 1024;
285 nodes[1].start = 4096 - 512 - nodes[1].size;
286 ret = drm_mm_reserve_node(&mm, &nodes[1]);
287 if (ret) {
288 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
289 nodes[1].start, nodes[1].size);
290 return ret;
291 }
292
Daniel Vetterb5c37142016-12-29 12:09:24 +0100293 show_mm(&mm);
Chris Wilson06df8ac2016-12-22 08:36:11 +0000294 return 0;
295}
296
Chris Wilson900537d2016-12-22 08:36:12 +0000297static struct drm_mm_node *set_node(struct drm_mm_node *node,
298 u64 start, u64 size)
299{
300 node->start = start;
301 node->size = size;
302 return node;
303}
304
305static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
306{
307 int err;
308
309 err = drm_mm_reserve_node(mm, node);
310 if (likely(err == -ENOSPC))
311 return true;
312
313 if (!err) {
314 pr_err("impossible reserve succeeded, node %llu + %llu\n",
315 node->start, node->size);
316 drm_mm_remove_node(node);
317 } else {
318 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
319 err, -ENOSPC, node->start, node->size);
320 }
321 return false;
322}
323
324static bool check_reserve_boundaries(struct drm_mm *mm,
325 unsigned int count,
326 u64 size)
327{
328 const struct boundary {
329 u64 start, size;
330 const char *name;
331 } boundaries[] = {
332#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
333 B(0, 0),
334 B(-size, 0),
335 B(size, 0),
336 B(size * count, 0),
337 B(-size, size),
338 B(-size, -size),
339 B(-size, 2*size),
340 B(0, -size),
341 B(size, -size),
342 B(count*size, size),
343 B(count*size, -size),
344 B(count*size, count*size),
345 B(count*size, -count*size),
346 B(count*size, -(count+1)*size),
347 B((count+1)*size, size),
348 B((count+1)*size, -size),
349 B((count+1)*size, -2*size),
350#undef B
351 };
352 struct drm_mm_node tmp = {};
353 int n;
354
355 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
356 if (!expect_reserve_fail(mm,
357 set_node(&tmp,
358 boundaries[n].start,
359 boundaries[n].size))) {
360 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
361 n, boundaries[n].name, count, size);
362 return false;
363 }
364 }
365
366 return true;
367}
368
369static int __igt_reserve(unsigned int count, u64 size)
370{
371 DRM_RND_STATE(prng, random_seed);
372 struct drm_mm mm;
373 struct drm_mm_node tmp, *nodes, *node, *next;
374 unsigned int *order, n, m, o = 0;
375 int ret, err;
376
377 /* For exercising drm_mm_reserve_node(), we want to check that
378 * reservations outside of the drm_mm range are rejected, and to
379 * overlapping and otherwise already occupied ranges. Afterwards,
380 * the tree and nodes should be intact.
381 */
382
383 DRM_MM_BUG_ON(!count);
384 DRM_MM_BUG_ON(!size);
385
386 ret = -ENOMEM;
387 order = drm_random_order(count, &prng);
388 if (!order)
389 goto err;
390
391 nodes = vzalloc(sizeof(*nodes) * count);
392 if (!nodes)
393 goto err_order;
394
395 ret = -EINVAL;
396 drm_mm_init(&mm, 0, count * size);
397
398 if (!check_reserve_boundaries(&mm, count, size))
399 goto out;
400
401 for (n = 0; n < count; n++) {
402 nodes[n].start = order[n] * size;
403 nodes[n].size = size;
404
405 err = drm_mm_reserve_node(&mm, &nodes[n]);
406 if (err) {
407 pr_err("reserve failed, step %d, start %llu\n",
408 n, nodes[n].start);
409 ret = err;
410 goto out;
411 }
412
413 if (!drm_mm_node_allocated(&nodes[n])) {
414 pr_err("reserved node not allocated! step %d, start %llu\n",
415 n, nodes[n].start);
416 goto out;
417 }
418
419 if (!expect_reserve_fail(&mm, &nodes[n]))
420 goto out;
421 }
422
423 /* After random insertion the nodes should be in order */
424 if (!assert_continuous(&mm, size))
425 goto out;
426
427 /* Repeated use should then fail */
428 drm_random_reorder(order, count, &prng);
429 for (n = 0; n < count; n++) {
430 if (!expect_reserve_fail(&mm,
431 set_node(&tmp, order[n] * size, 1)))
432 goto out;
433
434 /* Remove and reinsert should work */
435 drm_mm_remove_node(&nodes[order[n]]);
436 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
437 if (err) {
438 pr_err("reserve failed, step %d, start %llu\n",
439 n, nodes[n].start);
440 ret = err;
441 goto out;
442 }
443 }
444
445 if (!assert_continuous(&mm, size))
446 goto out;
447
448 /* Overlapping use should then fail */
449 for (n = 0; n < count; n++) {
450 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
451 goto out;
452 }
453 for (n = 0; n < count; n++) {
454 if (!expect_reserve_fail(&mm,
455 set_node(&tmp,
456 size * n,
457 size * (count - n))))
458 goto out;
459 }
460
461 /* Remove several, reinsert, check full */
462 for_each_prime_number(n, min(max_prime, count)) {
463 for (m = 0; m < n; m++) {
464 node = &nodes[order[(o + m) % count]];
465 drm_mm_remove_node(node);
466 }
467
468 for (m = 0; m < n; m++) {
469 node = &nodes[order[(o + m) % count]];
470 err = drm_mm_reserve_node(&mm, node);
471 if (err) {
472 pr_err("reserve failed, step %d/%d, start %llu\n",
473 m, n, node->start);
474 ret = err;
475 goto out;
476 }
477 }
478
479 o += n;
480
481 if (!assert_continuous(&mm, size))
482 goto out;
483 }
484
485 ret = 0;
486out:
487 drm_mm_for_each_node_safe(node, next, &mm)
488 drm_mm_remove_node(node);
489 drm_mm_takedown(&mm);
490 vfree(nodes);
491err_order:
492 kfree(order);
493err:
494 return ret;
495}
496
497static int igt_reserve(void *ignored)
498{
499 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
500 int n, ret;
501
502 for_each_prime_number_from(n, 1, 54) {
503 u64 size = BIT_ULL(n);
504
505 ret = __igt_reserve(count, size - 1);
506 if (ret)
507 return ret;
508
509 ret = __igt_reserve(count, size);
510 if (ret)
511 return ret;
512
513 ret = __igt_reserve(count, size + 1);
514 if (ret)
515 return ret;
516 }
517
518 return 0;
519}
520
Chris Wilson78866922016-12-22 08:36:13 +0000521static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
522 u64 size, u64 alignment, unsigned long color,
523 const struct insert_mode *mode)
524{
525 int err;
526
527 err = drm_mm_insert_node_generic(mm, node,
528 size, alignment, color,
529 mode->search_flags,
530 mode->create_flags);
531 if (err) {
532 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
533 size, alignment, color, mode->name, err);
534 return false;
535 }
536
537 if (!assert_node(node, mm, size, alignment, color)) {
538 drm_mm_remove_node(node);
539 return false;
540 }
541
542 return true;
543}
544
545static bool expect_insert_fail(struct drm_mm *mm, u64 size)
546{
547 struct drm_mm_node tmp = {};
548 int err;
549
550 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
551 if (likely(err == -ENOSPC))
552 return true;
553
554 if (!err) {
555 pr_err("impossible insert succeeded, node %llu + %llu\n",
556 tmp.start, tmp.size);
557 drm_mm_remove_node(&tmp);
558 } else {
559 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
560 err, -ENOSPC, size);
561 }
562 return false;
563}
564
Chris Wilson2bd966d2016-12-22 08:36:14 +0000565static int __igt_insert(unsigned int count, u64 size, bool replace)
Chris Wilson78866922016-12-22 08:36:13 +0000566{
567 DRM_RND_STATE(prng, random_seed);
568 const struct insert_mode *mode;
569 struct drm_mm mm;
570 struct drm_mm_node *nodes, *node, *next;
571 unsigned int *order, n, m, o = 0;
572 int ret;
573
574 /* Fill a range with lots of nodes, check it doesn't fail too early */
575
576 DRM_MM_BUG_ON(!count);
577 DRM_MM_BUG_ON(!size);
578
579 ret = -ENOMEM;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000580 nodes = vmalloc(count * sizeof(*nodes));
Chris Wilson78866922016-12-22 08:36:13 +0000581 if (!nodes)
582 goto err;
583
584 order = drm_random_order(count, &prng);
585 if (!order)
586 goto err_nodes;
587
588 ret = -EINVAL;
589 drm_mm_init(&mm, 0, count * size);
590
591 for (mode = insert_modes; mode->name; mode++) {
592 for (n = 0; n < count; n++) {
Chris Wilson2bd966d2016-12-22 08:36:14 +0000593 struct drm_mm_node tmp;
594
595 node = replace ? &tmp : &nodes[n];
596 memset(node, 0, sizeof(*node));
597 if (!expect_insert(&mm, node, size, 0, n, mode)) {
Chris Wilson78866922016-12-22 08:36:13 +0000598 pr_err("%s insert failed, size %llu step %d\n",
599 mode->name, size, n);
600 goto out;
601 }
Chris Wilson2bd966d2016-12-22 08:36:14 +0000602
603 if (replace) {
604 drm_mm_replace_node(&tmp, &nodes[n]);
605 if (drm_mm_node_allocated(&tmp)) {
606 pr_err("replaced old-node still allocated! step %d\n",
607 n);
608 goto out;
609 }
610
611 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
612 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
613 size, n);
614 goto out;
615 }
616
617 if (tmp.start != nodes[n].start) {
618 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
619 tmp.start, size,
620 nodes[n].start, nodes[n].size);
621 goto out;
622 }
623 }
Chris Wilson78866922016-12-22 08:36:13 +0000624 }
625
626 /* After random insertion the nodes should be in order */
627 if (!assert_continuous(&mm, size))
628 goto out;
629
630 /* Repeated use should then fail */
631 if (!expect_insert_fail(&mm, size))
632 goto out;
633
634 /* Remove one and reinsert, as the only hole it should refill itself */
635 for (n = 0; n < count; n++) {
636 u64 addr = nodes[n].start;
637
638 drm_mm_remove_node(&nodes[n]);
639 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
640 pr_err("%s reinsert failed, size %llu step %d\n",
641 mode->name, size, n);
642 goto out;
643 }
644
645 if (nodes[n].start != addr) {
646 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
647 mode->name, n, addr, nodes[n].start);
648 goto out;
649 }
650
651 if (!assert_continuous(&mm, size))
652 goto out;
653 }
654
655 /* Remove several, reinsert, check full */
656 for_each_prime_number(n, min(max_prime, count)) {
657 for (m = 0; m < n; m++) {
658 node = &nodes[order[(o + m) % count]];
659 drm_mm_remove_node(node);
660 }
661
662 for (m = 0; m < n; m++) {
663 node = &nodes[order[(o + m) % count]];
664 if (!expect_insert(&mm, node, size, 0, n, mode)) {
665 pr_err("%s multiple reinsert failed, size %llu step %d\n",
666 mode->name, size, n);
667 goto out;
668 }
669 }
670
671 o += n;
672
673 if (!assert_continuous(&mm, size))
674 goto out;
675
676 if (!expect_insert_fail(&mm, size))
677 goto out;
678 }
679
680 drm_mm_for_each_node_safe(node, next, &mm)
681 drm_mm_remove_node(node);
682 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
683 }
684
685 ret = 0;
686out:
687 drm_mm_for_each_node_safe(node, next, &mm)
688 drm_mm_remove_node(node);
689 drm_mm_takedown(&mm);
690 kfree(order);
691err_nodes:
692 vfree(nodes);
693err:
694 return ret;
695}
696
697static int igt_insert(void *ignored)
698{
699 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
700 unsigned int n;
701 int ret;
702
703 for_each_prime_number_from(n, 1, 54) {
704 u64 size = BIT_ULL(n);
705
Chris Wilson2bd966d2016-12-22 08:36:14 +0000706 ret = __igt_insert(count, size - 1, false);
Chris Wilson78866922016-12-22 08:36:13 +0000707 if (ret)
708 return ret;
709
Chris Wilson2bd966d2016-12-22 08:36:14 +0000710 ret = __igt_insert(count, size, false);
Chris Wilson78866922016-12-22 08:36:13 +0000711 if (ret)
712 return ret;
713
Chris Wilson2bd966d2016-12-22 08:36:14 +0000714 ret = __igt_insert(count, size + 1, false);
715 }
716
717 return 0;
718}
719
720static int igt_replace(void *ignored)
721{
722 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
723 unsigned int n;
724 int ret;
725
726 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
727 * then replacing it with the intended node. We want to check that
728 * the tree is intact and all the information we need is carried
729 * across to the target node.
730 */
731
732 for_each_prime_number_from(n, 1, 54) {
733 u64 size = BIT_ULL(n);
734
735 ret = __igt_insert(count, size - 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000736 if (ret)
737 return ret;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000738
739 ret = __igt_insert(count, size, true);
740 if (ret)
741 return ret;
742
743 ret = __igt_insert(count, size + 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000744 }
745
746 return 0;
747}
748
Chris Wilson2fba0de2016-12-22 08:36:15 +0000749static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
750 u64 size, u64 alignment, unsigned long color,
751 u64 range_start, u64 range_end,
752 const struct insert_mode *mode)
753{
754 int err;
755
756 err = drm_mm_insert_node_in_range_generic(mm, node,
757 size, alignment, color,
758 range_start, range_end,
759 mode->search_flags,
760 mode->create_flags);
761 if (err) {
762 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
763 size, alignment, color, mode->name,
764 range_start, range_end, err);
765 return false;
766 }
767
768 if (!assert_node(node, mm, size, alignment, color)) {
769 drm_mm_remove_node(node);
770 return false;
771 }
772
773 return true;
774}
775
776static bool expect_insert_in_range_fail(struct drm_mm *mm,
777 u64 size,
778 u64 range_start,
779 u64 range_end)
780{
781 struct drm_mm_node tmp = {};
782 int err;
783
784 err = drm_mm_insert_node_in_range_generic(mm, &tmp,
785 size, 0, 0,
786 range_start, range_end,
787 DRM_MM_SEARCH_DEFAULT,
788 DRM_MM_CREATE_DEFAULT);
789 if (likely(err == -ENOSPC))
790 return true;
791
792 if (!err) {
793 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
794 tmp.start, tmp.size, range_start, range_end);
795 drm_mm_remove_node(&tmp);
796 } else {
797 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
798 err, -ENOSPC, size, range_start, range_end);
799 }
800
801 return false;
802}
803
804static bool assert_contiguous_in_range(struct drm_mm *mm,
805 u64 size,
806 u64 start,
807 u64 end)
808{
809 struct drm_mm_node *node;
810 unsigned int n;
811
812 if (!expect_insert_in_range_fail(mm, size, start, end))
813 return false;
814
815 n = div64_u64(start + size - 1, size);
816 drm_mm_for_each_node(node, mm) {
817 if (node->start < start || node->start + node->size > end) {
818 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
819 n, node->start, node->start + node->size, start, end);
820 return false;
821 }
822
823 if (node->start != n * size) {
824 pr_err("node %d out of order, expected start %llx, found %llx\n",
825 n, n * size, node->start);
826 return false;
827 }
828
829 if (node->size != size) {
830 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
831 n, size, node->size);
832 return false;
833 }
834
Chris Wilson3f85fb32016-12-22 08:36:37 +0000835 if (drm_mm_hole_follows(node) &&
836 drm_mm_hole_node_end(node) < end) {
Chris Wilson2fba0de2016-12-22 08:36:15 +0000837 pr_err("node %d is followed by a hole!\n", n);
838 return false;
839 }
840
841 n++;
842 }
843
844 drm_mm_for_each_node_in_range(node, mm, 0, start) {
845 if (node) {
846 pr_err("node before start: node=%llx+%llu, start=%llx\n",
847 node->start, node->size, start);
848 return false;
849 }
850 }
851
852 drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
853 if (node) {
854 pr_err("node after end: node=%llx+%llu, end=%llx\n",
855 node->start, node->size, end);
856 return false;
857 }
858 }
859
860 return true;
861}
862
863static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
864{
865 const struct insert_mode *mode;
866 struct drm_mm mm;
867 struct drm_mm_node *nodes, *node, *next;
868 unsigned int n, start_n, end_n;
869 int ret;
870
871 DRM_MM_BUG_ON(!count);
872 DRM_MM_BUG_ON(!size);
873 DRM_MM_BUG_ON(end <= start);
874
875 /* Very similar to __igt_insert(), but now instead of populating the
876 * full range of the drm_mm, we try to fill a small portion of it.
877 */
878
879 ret = -ENOMEM;
880 nodes = vzalloc(count * sizeof(*nodes));
881 if (!nodes)
882 goto err;
883
884 ret = -EINVAL;
885 drm_mm_init(&mm, 0, count * size);
886
887 start_n = div64_u64(start + size - 1, size);
888 end_n = div64_u64(end - size, size);
889
890 for (mode = insert_modes; mode->name; mode++) {
891 for (n = start_n; n <= end_n; n++) {
892 if (!expect_insert_in_range(&mm, &nodes[n],
893 size, size, n,
894 start, end, mode)) {
895 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
896 mode->name, size, n,
897 start_n, end_n,
898 start, end);
899 goto out;
900 }
901 }
902
903 if (!assert_contiguous_in_range(&mm, size, start, end)) {
904 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
905 mode->name, start, end, size);
906 goto out;
907 }
908
909 /* Remove one and reinsert, it should refill itself */
910 for (n = start_n; n <= end_n; n++) {
911 u64 addr = nodes[n].start;
912
913 drm_mm_remove_node(&nodes[n]);
914 if (!expect_insert_in_range(&mm, &nodes[n],
915 size, size, n,
916 start, end, mode)) {
917 pr_err("%s reinsert failed, step %d\n", mode->name, n);
918 goto out;
919 }
920
921 if (nodes[n].start != addr) {
922 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
923 mode->name, n, addr, nodes[n].start);
924 goto out;
925 }
926 }
927
928 if (!assert_contiguous_in_range(&mm, size, start, end)) {
929 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
930 mode->name, start, end, size);
931 goto out;
932 }
933
934 drm_mm_for_each_node_safe(node, next, &mm)
935 drm_mm_remove_node(node);
936 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
937 }
938
939 ret = 0;
940out:
941 drm_mm_for_each_node_safe(node, next, &mm)
942 drm_mm_remove_node(node);
943 drm_mm_takedown(&mm);
944 vfree(nodes);
945err:
946 return ret;
947}
948
949static int insert_outside_range(void)
950{
951 struct drm_mm mm;
952 const unsigned int start = 1024;
953 const unsigned int end = 2048;
954 const unsigned int size = end - start;
955
956 drm_mm_init(&mm, start, size);
957
958 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
959 return -EINVAL;
960
961 if (!expect_insert_in_range_fail(&mm, size,
962 start - size/2, start + (size+1)/2))
963 return -EINVAL;
964
965 if (!expect_insert_in_range_fail(&mm, size,
966 end - (size+1)/2, end + size/2))
967 return -EINVAL;
968
969 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
970 return -EINVAL;
971
972 drm_mm_takedown(&mm);
973 return 0;
974}
975
976static int igt_insert_range(void *ignored)
977{
978 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
979 unsigned int n;
980 int ret;
981
982 /* Check that requests outside the bounds of drm_mm are rejected. */
983 ret = insert_outside_range();
984 if (ret)
985 return ret;
986
987 for_each_prime_number_from(n, 1, 50) {
988 const u64 size = BIT_ULL(n);
989 const u64 max = count * size;
990
991 ret = __igt_insert_range(count, size, 0, max);
992 if (ret)
993 return ret;
994
995 ret = __igt_insert_range(count, size, 1, max);
996 if (ret)
997 return ret;
998
999 ret = __igt_insert_range(count, size, 0, max - 1);
1000 if (ret)
1001 return ret;
1002
1003 ret = __igt_insert_range(count, size, 0, max/2);
1004 if (ret)
1005 return ret;
1006
1007 ret = __igt_insert_range(count, size, max/2, max);
1008 if (ret)
1009 return ret;
1010
1011 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1012 if (ret)
1013 return ret;
1014 }
1015
1016 return 0;
1017}
1018
Chris Wilson9b26f2e2016-12-22 08:36:16 +00001019static int igt_align(void *ignored)
1020{
1021 const struct insert_mode *mode;
1022 const unsigned int max_count = min(8192u, max_prime);
1023 struct drm_mm mm;
1024 struct drm_mm_node *nodes, *node, *next;
1025 unsigned int prime;
1026 int ret = -EINVAL;
1027
1028 /* For each of the possible insertion modes, we pick a few
1029 * arbitrary alignments and check that the inserted node
1030 * meets our requirements.
1031 */
1032
1033 nodes = vzalloc(max_count * sizeof(*nodes));
1034 if (!nodes)
1035 goto err;
1036
1037 drm_mm_init(&mm, 1, U64_MAX - 2);
1038
1039 for (mode = insert_modes; mode->name; mode++) {
1040 unsigned int i = 0;
1041
1042 for_each_prime_number_from(prime, 1, max_count) {
1043 u64 size = next_prime_number(prime);
1044
1045 if (!expect_insert(&mm, &nodes[i],
1046 size, prime, i,
1047 mode)) {
1048 pr_err("%s insert failed with alignment=%d",
1049 mode->name, prime);
1050 goto out;
1051 }
1052
1053 i++;
1054 }
1055
1056 drm_mm_for_each_node_safe(node, next, &mm)
1057 drm_mm_remove_node(node);
1058 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1059 }
1060
1061 ret = 0;
1062out:
1063 drm_mm_for_each_node_safe(node, next, &mm)
1064 drm_mm_remove_node(node);
1065 drm_mm_takedown(&mm);
1066 vfree(nodes);
1067err:
1068 return ret;
1069}
1070
1071static int igt_align_pot(int max)
1072{
1073 struct drm_mm mm;
1074 struct drm_mm_node *node, *next;
1075 int bit;
1076 int ret = -EINVAL;
1077
1078 /* Check that we can align to the full u64 address space */
1079
1080 drm_mm_init(&mm, 1, U64_MAX - 2);
1081
1082 for (bit = max - 1; bit; bit--) {
1083 u64 align, size;
1084
1085 node = kzalloc(sizeof(*node), GFP_KERNEL);
1086 if (!node) {
1087 ret = -ENOMEM;
1088 goto out;
1089 }
1090
1091 align = BIT_ULL(bit);
1092 size = BIT_ULL(bit-1) + 1;
1093 if (!expect_insert(&mm, node,
1094 size, align, bit,
1095 &insert_modes[0])) {
1096 pr_err("insert failed with alignment=%llx [%d]",
1097 align, bit);
1098 goto out;
1099 }
1100 }
1101
1102 ret = 0;
1103out:
1104 drm_mm_for_each_node_safe(node, next, &mm) {
1105 drm_mm_remove_node(node);
1106 kfree(node);
1107 }
1108 drm_mm_takedown(&mm);
1109 return ret;
1110}
1111
1112static int igt_align32(void *ignored)
1113{
1114 return igt_align_pot(32);
1115}
1116
1117static int igt_align64(void *ignored)
1118{
1119 return igt_align_pot(64);
1120}
1121
Chris Wilson9a71e272016-12-22 08:36:29 +00001122static void show_scan(const struct drm_mm_scan *scan)
Chris Wilson560b3282016-12-22 08:36:17 +00001123{
Chris Wilson71733202016-12-22 08:36:24 +00001124 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001125 scan->hit_start, scan->hit_end,
1126 scan->size, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001127}
1128
1129static void show_holes(const struct drm_mm *mm, int count)
1130{
1131 u64 hole_start, hole_end;
1132 struct drm_mm_node *hole;
1133
1134 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1135 struct drm_mm_node *next = list_next_entry(hole, node_list);
1136 const char *node1 = NULL, *node2 = NULL;
1137
1138 if (hole->allocated)
1139 node1 = kasprintf(GFP_KERNEL,
1140 "[%llx + %lld, color=%ld], ",
1141 hole->start, hole->size, hole->color);
1142
1143 if (next->allocated)
1144 node2 = kasprintf(GFP_KERNEL,
1145 ", [%llx + %lld, color=%ld]",
1146 next->start, next->size, next->color);
1147
1148 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1149 node1,
1150 hole_start, hole_end, hole_end - hole_start,
1151 node2);
1152
1153 kfree(node2);
1154 kfree(node1);
1155
1156 if (!--count)
1157 break;
1158 }
1159}
1160
1161struct evict_node {
1162 struct drm_mm_node node;
1163 struct list_head link;
1164};
1165
Chris Wilson9a71e272016-12-22 08:36:29 +00001166static bool evict_nodes(struct drm_mm_scan *scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001167 struct evict_node *nodes,
1168 unsigned int *order,
1169 unsigned int count,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001170 bool use_color,
Chris Wilson560b3282016-12-22 08:36:17 +00001171 struct list_head *evict_list)
1172{
1173 struct evict_node *e, *en;
1174 unsigned int i;
1175
1176 for (i = 0; i < count; i++) {
1177 e = &nodes[order ? order[i] : i];
1178 list_add(&e->link, evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001179 if (drm_mm_scan_add_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001180 break;
1181 }
1182 list_for_each_entry_safe(e, en, evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001183 if (!drm_mm_scan_remove_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001184 list_del(&e->link);
1185 }
1186 if (list_empty(evict_list)) {
Chris Wilson71733202016-12-22 08:36:24 +00001187 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001188 scan->size, count, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001189 return false;
1190 }
1191
1192 list_for_each_entry(e, evict_list, link)
1193 drm_mm_remove_node(&e->node);
1194
Chris Wilson3fa489d2016-12-22 08:36:36 +00001195 if (use_color) {
1196 struct drm_mm_node *node;
1197
1198 while ((node = drm_mm_scan_color_evict(scan))) {
1199 e = container_of(node, typeof(*e), node);
1200 drm_mm_remove_node(&e->node);
1201 list_add(&e->link, evict_list);
1202 }
1203 } else {
1204 if (drm_mm_scan_color_evict(scan)) {
1205 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1206 return false;
1207 }
1208 }
1209
Chris Wilson560b3282016-12-22 08:36:17 +00001210 return true;
1211}
1212
1213static bool evict_nothing(struct drm_mm *mm,
1214 unsigned int total_size,
1215 struct evict_node *nodes)
1216{
Chris Wilson9a71e272016-12-22 08:36:29 +00001217 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001218 LIST_HEAD(evict_list);
1219 struct evict_node *e;
1220 struct drm_mm_node *node;
1221 unsigned int n;
1222
Chris Wilson0b04d472016-12-22 08:36:33 +00001223 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001224 for (n = 0; n < total_size; n++) {
1225 e = &nodes[n];
1226 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001227 drm_mm_scan_add_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001228 }
1229 list_for_each_entry(e, &evict_list, link)
Chris Wilson9a71e272016-12-22 08:36:29 +00001230 drm_mm_scan_remove_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001231
1232 for (n = 0; n < total_size; n++) {
1233 e = &nodes[n];
1234
1235 if (!drm_mm_node_allocated(&e->node)) {
1236 pr_err("node[%d] no longer allocated!\n", n);
1237 return false;
1238 }
1239
1240 e->link.next = NULL;
1241 }
1242
1243 drm_mm_for_each_node(node, mm) {
1244 e = container_of(node, typeof(*e), node);
1245 e->link.next = &e->link;
1246 }
1247
1248 for (n = 0; n < total_size; n++) {
1249 e = &nodes[n];
1250
1251 if (!e->link.next) {
1252 pr_err("node[%d] no longer connected!\n", n);
1253 return false;
1254 }
1255 }
1256
1257 return assert_continuous(mm, nodes[0].node.size);
1258}
1259
1260static bool evict_everything(struct drm_mm *mm,
1261 unsigned int total_size,
1262 struct evict_node *nodes)
1263{
Chris Wilson9a71e272016-12-22 08:36:29 +00001264 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001265 LIST_HEAD(evict_list);
1266 struct evict_node *e;
1267 unsigned int n;
1268 int err;
1269
Chris Wilson0b04d472016-12-22 08:36:33 +00001270 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001271 for (n = 0; n < total_size; n++) {
1272 e = &nodes[n];
1273 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001274 if (drm_mm_scan_add_block(&scan, &e->node))
1275 break;
Chris Wilson560b3282016-12-22 08:36:17 +00001276 }
Chris Wilson95b8c642017-01-10 14:40:31 +00001277
1278 err = 0;
Chris Wilson560b3282016-12-22 08:36:17 +00001279 list_for_each_entry(e, &evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001280 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
Chris Wilson95b8c642017-01-10 14:40:31 +00001281 if (!err) {
1282 pr_err("Node %lld not marked for eviction!\n",
1283 e->node.start);
1284 err = -EINVAL;
1285 }
Chris Wilson560b3282016-12-22 08:36:17 +00001286 }
1287 }
Chris Wilson95b8c642017-01-10 14:40:31 +00001288 if (err)
1289 return false;
Chris Wilson560b3282016-12-22 08:36:17 +00001290
1291 list_for_each_entry(e, &evict_list, link)
1292 drm_mm_remove_node(&e->node);
1293
1294 if (!assert_one_hole(mm, 0, total_size))
1295 return false;
1296
1297 list_for_each_entry(e, &evict_list, link) {
1298 err = drm_mm_reserve_node(mm, &e->node);
1299 if (err) {
1300 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1301 e->node.start);
1302 return false;
1303 }
1304 }
1305
1306 return assert_continuous(mm, nodes[0].node.size);
1307}
1308
1309static int evict_something(struct drm_mm *mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001310 u64 range_start, u64 range_end,
Chris Wilson560b3282016-12-22 08:36:17 +00001311 struct evict_node *nodes,
1312 unsigned int *order,
1313 unsigned int count,
1314 unsigned int size,
1315 unsigned int alignment,
1316 const struct insert_mode *mode)
1317{
Chris Wilson9a71e272016-12-22 08:36:29 +00001318 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001319 LIST_HEAD(evict_list);
1320 struct evict_node *e;
1321 struct drm_mm_node tmp;
1322 int err;
1323
Chris Wilson9a71e272016-12-22 08:36:29 +00001324 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001325 size, alignment, 0,
Chris Wilson0b04d472016-12-22 08:36:33 +00001326 range_start, range_end,
1327 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001328 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001329 nodes, order, count, false,
Chris Wilson560b3282016-12-22 08:36:17 +00001330 &evict_list))
1331 return -EINVAL;
1332
1333 memset(&tmp, 0, sizeof(tmp));
1334 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1335 mode->search_flags,
1336 mode->create_flags);
1337 if (err) {
1338 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1339 size, alignment);
Chris Wilson9a71e272016-12-22 08:36:29 +00001340 show_scan(&scan);
Chris Wilson560b3282016-12-22 08:36:17 +00001341 show_holes(mm, 3);
1342 return err;
1343 }
1344
Chris Wilson0e483252016-12-22 08:36:18 +00001345 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1346 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1347 tmp.start, tmp.size, range_start, range_end);
1348 err = -EINVAL;
1349 }
1350
Chris Wilson3f85fb32016-12-22 08:36:37 +00001351 if (!assert_node(&tmp, mm, size, alignment, 0) ||
1352 drm_mm_hole_follows(&tmp)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001353 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1354 tmp.size, size,
1355 alignment, misalignment(&tmp, alignment),
Chris Wilson3f85fb32016-12-22 08:36:37 +00001356 tmp.start, drm_mm_hole_follows(&tmp));
Chris Wilson560b3282016-12-22 08:36:17 +00001357 err = -EINVAL;
1358 }
1359
1360 drm_mm_remove_node(&tmp);
1361 if (err)
1362 return err;
1363
1364 list_for_each_entry(e, &evict_list, link) {
1365 err = drm_mm_reserve_node(mm, &e->node);
1366 if (err) {
1367 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1368 e->node.start);
1369 return err;
1370 }
1371 }
1372
1373 if (!assert_continuous(mm, nodes[0].node.size)) {
1374 pr_err("range is no longer continuous\n");
1375 return -EINVAL;
1376 }
1377
1378 return 0;
1379}
1380
1381static int igt_evict(void *ignored)
1382{
1383 DRM_RND_STATE(prng, random_seed);
1384 const unsigned int size = 8192;
1385 const struct insert_mode *mode;
1386 struct drm_mm mm;
1387 struct evict_node *nodes;
1388 struct drm_mm_node *node, *next;
1389 unsigned int *order, n;
1390 int ret, err;
1391
1392 /* Here we populate a full drm_mm and then try and insert a new node
1393 * by evicting other nodes in a random order. The drm_mm_scan should
1394 * pick the first matching hole it finds from the random list. We
1395 * repeat that for different allocation strategies, alignments and
1396 * sizes to try and stress the hole finder.
1397 */
1398
1399 ret = -ENOMEM;
1400 nodes = vzalloc(size * sizeof(*nodes));
1401 if (!nodes)
1402 goto err;
1403
1404 order = drm_random_order(size, &prng);
1405 if (!order)
1406 goto err_nodes;
1407
1408 ret = -EINVAL;
1409 drm_mm_init(&mm, 0, size);
1410 for (n = 0; n < size; n++) {
1411 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1412 DRM_MM_SEARCH_DEFAULT);
1413 if (err) {
1414 pr_err("insert failed, step %d\n", n);
1415 ret = err;
1416 goto out;
1417 }
1418 }
1419
1420 /* First check that using the scanner doesn't break the mm */
1421 if (!evict_nothing(&mm, size, nodes)) {
1422 pr_err("evict_nothing() failed\n");
1423 goto out;
1424 }
1425 if (!evict_everything(&mm, size, nodes)) {
1426 pr_err("evict_everything() failed\n");
1427 goto out;
1428 }
1429
1430 for (mode = evict_modes; mode->name; mode++) {
1431 for (n = 1; n <= size; n <<= 1) {
1432 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001433 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001434 nodes, order, size,
1435 n, 1,
1436 mode);
1437 if (err) {
1438 pr_err("%s evict_something(size=%u) failed\n",
1439 mode->name, n);
1440 ret = err;
1441 goto out;
1442 }
1443 }
1444
1445 for (n = 1; n < size; n <<= 1) {
1446 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001447 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001448 nodes, order, size,
1449 size/2, n,
1450 mode);
1451 if (err) {
1452 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1453 mode->name, size/2, n);
1454 ret = err;
1455 goto out;
1456 }
1457 }
1458
1459 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1460 unsigned int nsize = (size - n + 1) / 2;
1461
1462 DRM_MM_BUG_ON(!nsize);
1463
1464 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001465 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001466 nodes, order, size,
1467 nsize, n,
1468 mode);
1469 if (err) {
1470 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1471 mode->name, nsize, n);
1472 ret = err;
1473 goto out;
1474 }
1475 }
1476 }
1477
1478 ret = 0;
1479out:
1480 drm_mm_for_each_node_safe(node, next, &mm)
1481 drm_mm_remove_node(node);
1482 drm_mm_takedown(&mm);
1483 kfree(order);
1484err_nodes:
1485 vfree(nodes);
1486err:
1487 return ret;
1488}
1489
Chris Wilson0e483252016-12-22 08:36:18 +00001490static int igt_evict_range(void *ignored)
1491{
1492 DRM_RND_STATE(prng, random_seed);
1493 const unsigned int size = 8192;
1494 const unsigned int range_size = size / 2;
1495 const unsigned int range_start = size / 4;
1496 const unsigned int range_end = range_start + range_size;
1497 const struct insert_mode *mode;
1498 struct drm_mm mm;
1499 struct evict_node *nodes;
1500 struct drm_mm_node *node, *next;
1501 unsigned int *order, n;
1502 int ret, err;
1503
1504 /* Like igt_evict() but now we are limiting the search to a
1505 * small portion of the full drm_mm.
1506 */
1507
1508 ret = -ENOMEM;
1509 nodes = vzalloc(size * sizeof(*nodes));
1510 if (!nodes)
1511 goto err;
1512
1513 order = drm_random_order(size, &prng);
1514 if (!order)
1515 goto err_nodes;
1516
1517 ret = -EINVAL;
1518 drm_mm_init(&mm, 0, size);
1519 for (n = 0; n < size; n++) {
1520 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1521 DRM_MM_SEARCH_DEFAULT);
1522 if (err) {
1523 pr_err("insert failed, step %d\n", n);
1524 ret = err;
1525 goto out;
1526 }
1527 }
1528
1529 for (mode = evict_modes; mode->name; mode++) {
1530 for (n = 1; n <= range_size; n <<= 1) {
1531 drm_random_reorder(order, size, &prng);
1532 err = evict_something(&mm, range_start, range_end,
1533 nodes, order, size,
1534 n, 1,
1535 mode);
1536 if (err) {
1537 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1538 mode->name, n, range_start, range_end);
1539 goto out;
1540 }
1541 }
1542
1543 for (n = 1; n <= range_size; n <<= 1) {
1544 drm_random_reorder(order, size, &prng);
1545 err = evict_something(&mm, range_start, range_end,
1546 nodes, order, size,
1547 range_size/2, n,
1548 mode);
1549 if (err) {
1550 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1551 mode->name, range_size/2, n, range_start, range_end);
1552 goto out;
1553 }
1554 }
1555
1556 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1557 unsigned int nsize = (range_size - n + 1) / 2;
1558
1559 DRM_MM_BUG_ON(!nsize);
1560
1561 drm_random_reorder(order, size, &prng);
1562 err = evict_something(&mm, range_start, range_end,
1563 nodes, order, size,
1564 nsize, n,
1565 mode);
1566 if (err) {
1567 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1568 mode->name, nsize, n, range_start, range_end);
1569 goto out;
1570 }
1571 }
1572 }
1573
1574 ret = 0;
1575out:
1576 drm_mm_for_each_node_safe(node, next, &mm)
1577 drm_mm_remove_node(node);
1578 drm_mm_takedown(&mm);
1579 kfree(order);
1580err_nodes:
1581 vfree(nodes);
1582err:
1583 return ret;
1584}
1585
Chris Wilson05ab3c22016-12-22 08:36:19 +00001586static unsigned int node_index(const struct drm_mm_node *node)
1587{
1588 return div64_u64(node->start, node->size);
1589}
1590
1591static int igt_topdown(void *ignored)
1592{
1593 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1594 DRM_RND_STATE(prng, random_seed);
1595 const unsigned int count = 8192;
1596 unsigned int size;
1597 unsigned long *bitmap = NULL;
1598 struct drm_mm mm;
1599 struct drm_mm_node *nodes, *node, *next;
1600 unsigned int *order, n, m, o = 0;
1601 int ret;
1602
1603 /* When allocating top-down, we expect to be returned a node
1604 * from a suitable hole at the top of the drm_mm. We check that
1605 * the returned node does match the highest available slot.
1606 */
1607
1608 ret = -ENOMEM;
1609 nodes = vzalloc(count * sizeof(*nodes));
1610 if (!nodes)
1611 goto err;
1612
1613 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1614 GFP_TEMPORARY);
1615 if (!bitmap)
1616 goto err_nodes;
1617
1618 order = drm_random_order(count, &prng);
1619 if (!order)
1620 goto err_bitmap;
1621
1622 ret = -EINVAL;
1623 for (size = 1; size <= 64; size <<= 1) {
1624 drm_mm_init(&mm, 0, size*count);
1625 for (n = 0; n < count; n++) {
1626 if (!expect_insert(&mm, &nodes[n],
1627 size, 0, n,
1628 topdown)) {
1629 pr_err("insert failed, size %u step %d\n", size, n);
1630 goto out;
1631 }
1632
Chris Wilson3f85fb32016-12-22 08:36:37 +00001633 if (drm_mm_hole_follows(&nodes[n])) {
Chris Wilson05ab3c22016-12-22 08:36:19 +00001634 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1635 n, nodes[n].start, size);
1636 goto out;
1637 }
1638
1639 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1640 goto out;
1641 }
1642
1643 if (!assert_continuous(&mm, size))
1644 goto out;
1645
1646 drm_random_reorder(order, count, &prng);
1647 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1648 for (m = 0; m < n; m++) {
1649 node = &nodes[order[(o + m) % count]];
1650 drm_mm_remove_node(node);
1651 __set_bit(node_index(node), bitmap);
1652 }
1653
1654 for (m = 0; m < n; m++) {
1655 unsigned int last;
1656
1657 node = &nodes[order[(o + m) % count]];
1658 if (!expect_insert(&mm, node,
1659 size, 0, 0,
1660 topdown)) {
1661 pr_err("insert failed, step %d/%d\n", m, n);
1662 goto out;
1663 }
1664
Chris Wilson3f85fb32016-12-22 08:36:37 +00001665 if (drm_mm_hole_follows(node)) {
Chris Wilson05ab3c22016-12-22 08:36:19 +00001666 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1667 m, n, node->start);
1668 goto out;
1669 }
1670
1671 last = find_last_bit(bitmap, count);
1672 if (node_index(node) != last) {
1673 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1674 m, n, size, last, node_index(node));
1675 goto out;
1676 }
1677
1678 __clear_bit(last, bitmap);
1679 }
1680
1681 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1682
1683 o += n;
1684 }
1685
1686 drm_mm_for_each_node_safe(node, next, &mm)
1687 drm_mm_remove_node(node);
1688 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1689 }
1690
1691 ret = 0;
1692out:
1693 drm_mm_for_each_node_safe(node, next, &mm)
1694 drm_mm_remove_node(node);
1695 drm_mm_takedown(&mm);
1696 kfree(order);
1697err_bitmap:
1698 kfree(bitmap);
1699err_nodes:
1700 vfree(nodes);
1701err:
1702 return ret;
1703}
1704
Chris Wilson4c2ba552016-12-22 08:36:20 +00001705static void separate_adjacent_colors(const struct drm_mm_node *node,
1706 unsigned long color,
1707 u64 *start,
1708 u64 *end)
1709{
1710 if (node->allocated && node->color != color)
1711 ++*start;
1712
1713 node = list_next_entry(node, node_list);
1714 if (node->allocated && node->color != color)
1715 --*end;
1716}
1717
1718static bool colors_abutt(const struct drm_mm_node *node)
1719{
Chris Wilson3f85fb32016-12-22 08:36:37 +00001720 if (!drm_mm_hole_follows(node) &&
Chris Wilson4c2ba552016-12-22 08:36:20 +00001721 list_next_entry(node, node_list)->allocated) {
1722 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1723 node->color, node->start, node->size,
1724 list_next_entry(node, node_list)->color,
1725 list_next_entry(node, node_list)->start,
1726 list_next_entry(node, node_list)->size);
1727 return true;
1728 }
1729
1730 return false;
1731}
1732
1733static int igt_color(void *ignored)
1734{
1735 const unsigned int count = min(4096u, max_iterations);
1736 const struct insert_mode *mode;
1737 struct drm_mm mm;
1738 struct drm_mm_node *node, *nn;
1739 unsigned int n;
1740 int ret = -EINVAL, err;
1741
1742 /* Color adjustment complicates everything. First we just check
1743 * that when we insert a node we apply any color_adjustment callback.
1744 * The callback we use should ensure that there is a gap between
1745 * any two nodes, and so after each insertion we check that those
1746 * holes are inserted and that they are preserved.
1747 */
1748
1749 drm_mm_init(&mm, 0, U64_MAX);
1750
1751 for (n = 1; n <= count; n++) {
1752 node = kzalloc(sizeof(*node), GFP_KERNEL);
1753 if (!node) {
1754 ret = -ENOMEM;
1755 goto out;
1756 }
1757
1758 if (!expect_insert(&mm, node,
1759 n, 0, n,
1760 &insert_modes[0])) {
1761 pr_err("insert failed, step %d\n", n);
1762 kfree(node);
1763 goto out;
1764 }
1765 }
1766
1767 drm_mm_for_each_node_safe(node, nn, &mm) {
1768 if (node->color != node->size) {
1769 pr_err("invalid color stored: expected %lld, found %ld\n",
1770 node->size, node->color);
1771
1772 goto out;
1773 }
1774
1775 drm_mm_remove_node(node);
1776 kfree(node);
1777 }
1778
1779 /* Now, let's start experimenting with applying a color callback */
1780 mm.color_adjust = separate_adjacent_colors;
1781 for (mode = insert_modes; mode->name; mode++) {
1782 u64 last;
1783
1784 node = kzalloc(sizeof(*node), GFP_KERNEL);
1785 if (!node) {
1786 ret = -ENOMEM;
1787 goto out;
1788 }
1789
1790 node->size = 1 + 2*count;
1791 node->color = node->size;
1792
1793 err = drm_mm_reserve_node(&mm, node);
1794 if (err) {
1795 pr_err("initial reserve failed!\n");
1796 ret = err;
1797 goto out;
1798 }
1799
1800 last = node->start + node->size;
1801
1802 for (n = 1; n <= count; n++) {
1803 int rem;
1804
1805 node = kzalloc(sizeof(*node), GFP_KERNEL);
1806 if (!node) {
1807 ret = -ENOMEM;
1808 goto out;
1809 }
1810
1811 node->start = last;
1812 node->size = n + count;
1813 node->color = node->size;
1814
1815 err = drm_mm_reserve_node(&mm, node);
1816 if (err != -ENOSPC) {
1817 pr_err("reserve %d did not report color overlap! err=%d\n",
1818 n, err);
1819 goto out;
1820 }
1821
1822 node->start += n + 1;
1823 rem = misalignment(node, n + count);
1824 node->start += n + count - rem;
1825
1826 err = drm_mm_reserve_node(&mm, node);
1827 if (err) {
1828 pr_err("reserve %d failed, err=%d\n", n, err);
1829 ret = err;
1830 goto out;
1831 }
1832
1833 last = node->start + node->size;
1834 }
1835
1836 for (n = 1; n <= count; n++) {
1837 node = kzalloc(sizeof(*node), GFP_KERNEL);
1838 if (!node) {
1839 ret = -ENOMEM;
1840 goto out;
1841 }
1842
1843 if (!expect_insert(&mm, node,
1844 n, n, n,
1845 mode)) {
1846 pr_err("%s insert failed, step %d\n",
1847 mode->name, n);
1848 kfree(node);
1849 goto out;
1850 }
1851 }
1852
1853 drm_mm_for_each_node_safe(node, nn, &mm) {
1854 u64 rem;
1855
1856 if (node->color != node->size) {
1857 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1858 mode->name, node->size, node->color);
1859
1860 goto out;
1861 }
1862
1863 if (colors_abutt(node))
1864 goto out;
1865
1866 div64_u64_rem(node->start, node->size, &rem);
1867 if (rem) {
1868 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1869 mode->name, node->start, node->size, rem);
1870 goto out;
1871 }
1872
1873 drm_mm_remove_node(node);
1874 kfree(node);
1875 }
1876 }
1877
1878 ret = 0;
1879out:
1880 drm_mm_for_each_node_safe(node, nn, &mm) {
1881 drm_mm_remove_node(node);
1882 kfree(node);
1883 }
1884 drm_mm_takedown(&mm);
1885 return ret;
1886}
1887
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001888static int evict_color(struct drm_mm *mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001889 u64 range_start, u64 range_end,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001890 struct evict_node *nodes,
1891 unsigned int *order,
1892 unsigned int count,
1893 unsigned int size,
1894 unsigned int alignment,
1895 unsigned long color,
1896 const struct insert_mode *mode)
1897{
Chris Wilson9a71e272016-12-22 08:36:29 +00001898 struct drm_mm_scan scan;
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001899 LIST_HEAD(evict_list);
1900 struct evict_node *e;
1901 struct drm_mm_node tmp;
1902 int err;
1903
Chris Wilson9a71e272016-12-22 08:36:29 +00001904 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001905 size, alignment, color,
Chris Wilson0b04d472016-12-22 08:36:33 +00001906 range_start, range_end,
1907 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001908 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001909 nodes, order, count, true,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001910 &evict_list))
1911 return -EINVAL;
1912
1913 memset(&tmp, 0, sizeof(tmp));
1914 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1915 mode->search_flags,
1916 mode->create_flags);
1917 if (err) {
1918 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1919 size, alignment, color, err);
Chris Wilson9a71e272016-12-22 08:36:29 +00001920 show_scan(&scan);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001921 show_holes(mm, 3);
1922 return err;
1923 }
1924
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001925 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1926 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1927 tmp.start, tmp.size, range_start, range_end);
1928 err = -EINVAL;
1929 }
1930
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001931 if (colors_abutt(&tmp))
1932 err = -EINVAL;
1933
1934 if (!assert_node(&tmp, mm, size, alignment, color)) {
1935 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
1936 tmp.size, size,
1937 alignment, misalignment(&tmp, alignment), tmp.start);
1938 err = -EINVAL;
1939 }
1940
1941 drm_mm_remove_node(&tmp);
1942 if (err)
1943 return err;
1944
1945 list_for_each_entry(e, &evict_list, link) {
1946 err = drm_mm_reserve_node(mm, &e->node);
1947 if (err) {
1948 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1949 e->node.start);
1950 return err;
1951 }
1952 }
1953
1954 return 0;
1955}
1956
1957static int igt_color_evict(void *ignored)
1958{
1959 DRM_RND_STATE(prng, random_seed);
1960 const unsigned int total_size = min(8192u, max_iterations);
1961 const struct insert_mode *mode;
1962 unsigned long color = 0;
1963 struct drm_mm mm;
1964 struct evict_node *nodes;
1965 struct drm_mm_node *node, *next;
1966 unsigned int *order, n;
1967 int ret, err;
1968
1969 /* Check that the drm_mm_scan also honours color adjustment when
1970 * choosing its victims to create a hole. Our color_adjust does not
1971 * allow two nodes to be placed together without an intervening hole
1972 * enlarging the set of victims that must be evicted.
1973 */
1974
1975 ret = -ENOMEM;
1976 nodes = vzalloc(total_size * sizeof(*nodes));
1977 if (!nodes)
1978 goto err;
1979
1980 order = drm_random_order(total_size, &prng);
1981 if (!order)
1982 goto err_nodes;
1983
1984 ret = -EINVAL;
1985 drm_mm_init(&mm, 0, 2*total_size - 1);
1986 mm.color_adjust = separate_adjacent_colors;
1987 for (n = 0; n < total_size; n++) {
1988 if (!expect_insert(&mm, &nodes[n].node,
1989 1, 0, color++,
1990 &insert_modes[0])) {
1991 pr_err("insert failed, step %d\n", n);
1992 goto out;
1993 }
1994 }
1995
1996 for (mode = evict_modes; mode->name; mode++) {
1997 for (n = 1; n <= total_size; n <<= 1) {
1998 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001999 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002000 nodes, order, total_size,
2001 n, 1, color++,
2002 mode);
2003 if (err) {
2004 pr_err("%s evict_color(size=%u) failed\n",
2005 mode->name, n);
2006 goto out;
2007 }
2008 }
2009
2010 for (n = 1; n < total_size; n <<= 1) {
2011 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002012 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002013 nodes, order, total_size,
2014 total_size/2, n, color++,
2015 mode);
2016 if (err) {
2017 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2018 mode->name, total_size/2, n);
2019 goto out;
2020 }
2021 }
2022
2023 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2024 unsigned int nsize = (total_size - n + 1) / 2;
2025
2026 DRM_MM_BUG_ON(!nsize);
2027
2028 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002029 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002030 nodes, order, total_size,
2031 nsize, n, color++,
2032 mode);
2033 if (err) {
2034 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2035 mode->name, nsize, n);
2036 goto out;
2037 }
2038 }
2039 }
2040
2041 ret = 0;
2042out:
2043 if (ret)
Daniel Vetterb5c37142016-12-29 12:09:24 +01002044 show_mm(&mm);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002045 drm_mm_for_each_node_safe(node, next, &mm)
2046 drm_mm_remove_node(node);
2047 drm_mm_takedown(&mm);
2048 kfree(order);
2049err_nodes:
2050 vfree(nodes);
2051err:
2052 return ret;
2053}
2054
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002055static int igt_color_evict_range(void *ignored)
2056{
2057 DRM_RND_STATE(prng, random_seed);
2058 const unsigned int total_size = 8192;
2059 const unsigned int range_size = total_size / 2;
2060 const unsigned int range_start = total_size / 4;
2061 const unsigned int range_end = range_start + range_size;
2062 const struct insert_mode *mode;
2063 unsigned long color = 0;
2064 struct drm_mm mm;
2065 struct evict_node *nodes;
2066 struct drm_mm_node *node, *next;
2067 unsigned int *order, n;
2068 int ret, err;
2069
2070 /* Like igt_color_evict(), but limited to small portion of the full
2071 * drm_mm range.
2072 */
2073
2074 ret = -ENOMEM;
2075 nodes = vzalloc(total_size * sizeof(*nodes));
2076 if (!nodes)
2077 goto err;
2078
2079 order = drm_random_order(total_size, &prng);
2080 if (!order)
2081 goto err_nodes;
2082
2083 ret = -EINVAL;
2084 drm_mm_init(&mm, 0, 2*total_size - 1);
2085 mm.color_adjust = separate_adjacent_colors;
2086 for (n = 0; n < total_size; n++) {
2087 if (!expect_insert(&mm, &nodes[n].node,
2088 1, 0, color++,
2089 &insert_modes[0])) {
2090 pr_err("insert failed, step %d\n", n);
2091 goto out;
2092 }
2093 }
2094
2095 for (mode = evict_modes; mode->name; mode++) {
2096 for (n = 1; n <= range_size; n <<= 1) {
2097 drm_random_reorder(order, range_size, &prng);
2098 err = evict_color(&mm, range_start, range_end,
2099 nodes, order, total_size,
2100 n, 1, color++,
2101 mode);
2102 if (err) {
2103 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2104 mode->name, n, range_start, range_end);
2105 goto out;
2106 }
2107 }
2108
2109 for (n = 1; n < range_size; n <<= 1) {
2110 drm_random_reorder(order, total_size, &prng);
2111 err = evict_color(&mm, range_start, range_end,
2112 nodes, order, total_size,
2113 range_size/2, n, color++,
2114 mode);
2115 if (err) {
2116 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2117 mode->name, total_size/2, n, range_start, range_end);
2118 goto out;
2119 }
2120 }
2121
2122 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2123 unsigned int nsize = (range_size - n + 1) / 2;
2124
2125 DRM_MM_BUG_ON(!nsize);
2126
2127 drm_random_reorder(order, total_size, &prng);
2128 err = evict_color(&mm, range_start, range_end,
2129 nodes, order, total_size,
2130 nsize, n, color++,
2131 mode);
2132 if (err) {
2133 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2134 mode->name, nsize, n, range_start, range_end);
2135 goto out;
2136 }
2137 }
2138 }
2139
2140 ret = 0;
2141out:
2142 if (ret)
Daniel Vetterb5c37142016-12-29 12:09:24 +01002143 show_mm(&mm);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002144 drm_mm_for_each_node_safe(node, next, &mm)
2145 drm_mm_remove_node(node);
2146 drm_mm_takedown(&mm);
2147 kfree(order);
2148err_nodes:
2149 vfree(nodes);
2150err:
2151 return ret;
2152}
2153
Chris Wilson50f00332016-12-22 08:36:09 +00002154#include "drm_selftest.c"
2155
2156static int __init test_drm_mm_init(void)
2157{
2158 int err;
2159
2160 while (!random_seed)
2161 random_seed = get_random_int();
2162
2163 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2164 random_seed, max_iterations, max_prime);
2165 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2166
2167 return err > 0 ? 0 : err;
2168}
2169
2170static void __exit test_drm_mm_exit(void)
2171{
2172}
2173
2174module_init(test_drm_mm_init);
2175module_exit(test_drm_mm_exit);
2176
2177module_param(random_seed, uint, 0400);
2178module_param(max_iterations, uint, 0400);
2179module_param(max_prime, uint, 0400);
2180
2181MODULE_AUTHOR("Intel Corporation");
2182MODULE_LICENSE("GPL");