blob: 2ce92f4dcfc7d6c7c39cca80bef8e53279d39a5e [file] [log] [blame]
Chris Wilson50f00332016-12-22 08:36:09 +00001/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
Chris Wilson78866922016-12-22 08:36:13 +000024enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST,
28};
29
30static const struct insert_mode {
31 const char *name;
32 unsigned int search_flags;
33 unsigned int create_flags;
34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
38 {}
Chris Wilson560b3282016-12-22 08:36:17 +000039}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
42 {}
Chris Wilson78866922016-12-22 08:36:13 +000043};
44
Chris Wilson50f00332016-12-22 08:36:09 +000045static int igt_sanitycheck(void *ignored)
46{
47 pr_info("%s - ok!\n", __func__);
48 return 0;
49}
50
Chris Wilson393b50f2016-12-22 08:36:10 +000051static bool assert_no_holes(const struct drm_mm *mm)
52{
53 struct drm_mm_node *hole;
54 u64 hole_start, hole_end;
55 unsigned long count;
56
57 count = 0;
58 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
59 count++;
60 if (count) {
61 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
62 return false;
63 }
64
65 drm_mm_for_each_node(hole, mm) {
Chris Wilson3f85fb32016-12-22 08:36:37 +000066 if (drm_mm_hole_follows(hole)) {
Chris Wilson393b50f2016-12-22 08:36:10 +000067 pr_err("Hole follows node, expected none!\n");
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
76{
77 struct drm_mm_node *hole;
78 u64 hole_start, hole_end;
79 unsigned long count;
80 bool ok = true;
81
82 if (end <= start)
83 return true;
84
85 count = 0;
86 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
87 if (start != hole_start || end != hole_end) {
88 if (ok)
89 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
90 hole_start, hole_end,
91 start, end);
92 ok = false;
93 }
94 count++;
95 }
96 if (count != 1) {
97 pr_err("Expected to find one hole, found %lu instead\n", count);
98 ok = false;
99 }
100
101 return ok;
102}
103
Chris Wilson900537d2016-12-22 08:36:12 +0000104static bool assert_continuous(const struct drm_mm *mm, u64 size)
105{
106 struct drm_mm_node *node, *check, *found;
107 unsigned long n;
108 u64 addr;
109
110 if (!assert_no_holes(mm))
111 return false;
112
113 n = 0;
114 addr = 0;
115 drm_mm_for_each_node(node, mm) {
116 if (node->start != addr) {
117 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
118 n, addr, node->start);
119 return false;
120 }
121
122 if (node->size != size) {
123 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
124 n, size, node->size);
125 return false;
126 }
127
Chris Wilson3f85fb32016-12-22 08:36:37 +0000128 if (drm_mm_hole_follows(node)) {
Chris Wilson900537d2016-12-22 08:36:12 +0000129 pr_err("node[%ld] is followed by a hole!\n", n);
130 return false;
131 }
132
133 found = NULL;
134 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
135 if (node != check) {
136 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
137 node->start, check->start);
138 return false;
139 }
140 found = check;
141 }
142 if (!found) {
143 pr_err("lookup failed for node %llx + %llx\n",
144 addr, size);
145 return false;
146 }
147
148 addr += size;
149 n++;
150 }
151
152 return true;
153}
154
Chris Wilson78866922016-12-22 08:36:13 +0000155static u64 misalignment(struct drm_mm_node *node, u64 alignment)
156{
157 u64 rem;
158
159 if (!alignment)
160 return 0;
161
162 div64_u64_rem(node->start, alignment, &rem);
163 return rem;
164}
165
166static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
167 u64 size, u64 alignment, unsigned long color)
168{
169 bool ok = true;
170
171 if (!drm_mm_node_allocated(node) || node->mm != mm) {
172 pr_err("node not allocated\n");
173 ok = false;
174 }
175
176 if (node->size != size) {
177 pr_err("node has wrong size, found %llu, expected %llu\n",
178 node->size, size);
179 ok = false;
180 }
181
182 if (misalignment(node, alignment)) {
183 pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
184 node->start, misalignment(node, alignment), alignment);
185 ok = false;
186 }
187
188 if (node->color != color) {
189 pr_err("node has wrong color, found %lu, expected %lu\n",
190 node->color, color);
191 ok = false;
192 }
193
194 return ok;
195}
196
Chris Wilson393b50f2016-12-22 08:36:10 +0000197static int igt_init(void *ignored)
198{
199 const unsigned int size = 4096;
200 struct drm_mm mm;
201 struct drm_mm_node tmp;
202 int ret = -EINVAL;
203
204 /* Start with some simple checks on initialising the struct drm_mm */
205 memset(&mm, 0, sizeof(mm));
206 if (drm_mm_initialized(&mm)) {
207 pr_err("zeroed mm claims to be initialized\n");
208 return ret;
209 }
210
211 memset(&mm, 0xff, sizeof(mm));
212 drm_mm_init(&mm, 0, size);
213 if (!drm_mm_initialized(&mm)) {
214 pr_err("mm claims not to be initialized\n");
215 goto out;
216 }
217
218 if (!drm_mm_clean(&mm)) {
219 pr_err("mm not empty on creation\n");
220 goto out;
221 }
222
223 /* After creation, it should all be one massive hole */
224 if (!assert_one_hole(&mm, 0, size)) {
225 ret = -EINVAL;
226 goto out;
227 }
228
229 memset(&tmp, 0, sizeof(tmp));
230 tmp.start = 0;
231 tmp.size = size;
232 ret = drm_mm_reserve_node(&mm, &tmp);
233 if (ret) {
234 pr_err("failed to reserve whole drm_mm\n");
235 goto out;
236 }
237
238 /* After filling the range entirely, there should be no holes */
239 if (!assert_no_holes(&mm)) {
240 ret = -EINVAL;
241 goto out;
242 }
243
244 /* And then after emptying it again, the massive hole should be back */
245 drm_mm_remove_node(&tmp);
246 if (!assert_one_hole(&mm, 0, size)) {
247 ret = -EINVAL;
248 goto out;
249 }
250
251out:
252 if (ret)
253 drm_mm_debug_table(&mm, __func__);
254 drm_mm_takedown(&mm);
255 return ret;
256}
257
Chris Wilson06df8ac2016-12-22 08:36:11 +0000258static int igt_debug(void *ignored)
259{
260 struct drm_mm mm;
261 struct drm_mm_node nodes[2];
262 int ret;
263
264 /* Create a small drm_mm with a couple of nodes and a few holes, and
265 * check that the debug iterator doesn't explode over a trivial drm_mm.
266 */
267
268 drm_mm_init(&mm, 0, 4096);
269
270 memset(nodes, 0, sizeof(nodes));
271 nodes[0].start = 512;
272 nodes[0].size = 1024;
273 ret = drm_mm_reserve_node(&mm, &nodes[0]);
274 if (ret) {
275 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
276 nodes[0].start, nodes[0].size);
277 return ret;
278 }
279
280 nodes[1].size = 1024;
281 nodes[1].start = 4096 - 512 - nodes[1].size;
282 ret = drm_mm_reserve_node(&mm, &nodes[1]);
283 if (ret) {
284 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
285 nodes[1].start, nodes[1].size);
286 return ret;
287 }
288
289 drm_mm_debug_table(&mm, __func__);
290 return 0;
291}
292
Chris Wilson900537d2016-12-22 08:36:12 +0000293static struct drm_mm_node *set_node(struct drm_mm_node *node,
294 u64 start, u64 size)
295{
296 node->start = start;
297 node->size = size;
298 return node;
299}
300
301static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
302{
303 int err;
304
305 err = drm_mm_reserve_node(mm, node);
306 if (likely(err == -ENOSPC))
307 return true;
308
309 if (!err) {
310 pr_err("impossible reserve succeeded, node %llu + %llu\n",
311 node->start, node->size);
312 drm_mm_remove_node(node);
313 } else {
314 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
315 err, -ENOSPC, node->start, node->size);
316 }
317 return false;
318}
319
320static bool check_reserve_boundaries(struct drm_mm *mm,
321 unsigned int count,
322 u64 size)
323{
324 const struct boundary {
325 u64 start, size;
326 const char *name;
327 } boundaries[] = {
328#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
329 B(0, 0),
330 B(-size, 0),
331 B(size, 0),
332 B(size * count, 0),
333 B(-size, size),
334 B(-size, -size),
335 B(-size, 2*size),
336 B(0, -size),
337 B(size, -size),
338 B(count*size, size),
339 B(count*size, -size),
340 B(count*size, count*size),
341 B(count*size, -count*size),
342 B(count*size, -(count+1)*size),
343 B((count+1)*size, size),
344 B((count+1)*size, -size),
345 B((count+1)*size, -2*size),
346#undef B
347 };
348 struct drm_mm_node tmp = {};
349 int n;
350
351 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
352 if (!expect_reserve_fail(mm,
353 set_node(&tmp,
354 boundaries[n].start,
355 boundaries[n].size))) {
356 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
357 n, boundaries[n].name, count, size);
358 return false;
359 }
360 }
361
362 return true;
363}
364
365static int __igt_reserve(unsigned int count, u64 size)
366{
367 DRM_RND_STATE(prng, random_seed);
368 struct drm_mm mm;
369 struct drm_mm_node tmp, *nodes, *node, *next;
370 unsigned int *order, n, m, o = 0;
371 int ret, err;
372
373 /* For exercising drm_mm_reserve_node(), we want to check that
374 * reservations outside of the drm_mm range are rejected, and to
375 * overlapping and otherwise already occupied ranges. Afterwards,
376 * the tree and nodes should be intact.
377 */
378
379 DRM_MM_BUG_ON(!count);
380 DRM_MM_BUG_ON(!size);
381
382 ret = -ENOMEM;
383 order = drm_random_order(count, &prng);
384 if (!order)
385 goto err;
386
387 nodes = vzalloc(sizeof(*nodes) * count);
388 if (!nodes)
389 goto err_order;
390
391 ret = -EINVAL;
392 drm_mm_init(&mm, 0, count * size);
393
394 if (!check_reserve_boundaries(&mm, count, size))
395 goto out;
396
397 for (n = 0; n < count; n++) {
398 nodes[n].start = order[n] * size;
399 nodes[n].size = size;
400
401 err = drm_mm_reserve_node(&mm, &nodes[n]);
402 if (err) {
403 pr_err("reserve failed, step %d, start %llu\n",
404 n, nodes[n].start);
405 ret = err;
406 goto out;
407 }
408
409 if (!drm_mm_node_allocated(&nodes[n])) {
410 pr_err("reserved node not allocated! step %d, start %llu\n",
411 n, nodes[n].start);
412 goto out;
413 }
414
415 if (!expect_reserve_fail(&mm, &nodes[n]))
416 goto out;
417 }
418
419 /* After random insertion the nodes should be in order */
420 if (!assert_continuous(&mm, size))
421 goto out;
422
423 /* Repeated use should then fail */
424 drm_random_reorder(order, count, &prng);
425 for (n = 0; n < count; n++) {
426 if (!expect_reserve_fail(&mm,
427 set_node(&tmp, order[n] * size, 1)))
428 goto out;
429
430 /* Remove and reinsert should work */
431 drm_mm_remove_node(&nodes[order[n]]);
432 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
433 if (err) {
434 pr_err("reserve failed, step %d, start %llu\n",
435 n, nodes[n].start);
436 ret = err;
437 goto out;
438 }
439 }
440
441 if (!assert_continuous(&mm, size))
442 goto out;
443
444 /* Overlapping use should then fail */
445 for (n = 0; n < count; n++) {
446 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
447 goto out;
448 }
449 for (n = 0; n < count; n++) {
450 if (!expect_reserve_fail(&mm,
451 set_node(&tmp,
452 size * n,
453 size * (count - n))))
454 goto out;
455 }
456
457 /* Remove several, reinsert, check full */
458 for_each_prime_number(n, min(max_prime, count)) {
459 for (m = 0; m < n; m++) {
460 node = &nodes[order[(o + m) % count]];
461 drm_mm_remove_node(node);
462 }
463
464 for (m = 0; m < n; m++) {
465 node = &nodes[order[(o + m) % count]];
466 err = drm_mm_reserve_node(&mm, node);
467 if (err) {
468 pr_err("reserve failed, step %d/%d, start %llu\n",
469 m, n, node->start);
470 ret = err;
471 goto out;
472 }
473 }
474
475 o += n;
476
477 if (!assert_continuous(&mm, size))
478 goto out;
479 }
480
481 ret = 0;
482out:
483 drm_mm_for_each_node_safe(node, next, &mm)
484 drm_mm_remove_node(node);
485 drm_mm_takedown(&mm);
486 vfree(nodes);
487err_order:
488 kfree(order);
489err:
490 return ret;
491}
492
493static int igt_reserve(void *ignored)
494{
495 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
496 int n, ret;
497
498 for_each_prime_number_from(n, 1, 54) {
499 u64 size = BIT_ULL(n);
500
501 ret = __igt_reserve(count, size - 1);
502 if (ret)
503 return ret;
504
505 ret = __igt_reserve(count, size);
506 if (ret)
507 return ret;
508
509 ret = __igt_reserve(count, size + 1);
510 if (ret)
511 return ret;
512 }
513
514 return 0;
515}
516
Chris Wilson78866922016-12-22 08:36:13 +0000517static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
518 u64 size, u64 alignment, unsigned long color,
519 const struct insert_mode *mode)
520{
521 int err;
522
523 err = drm_mm_insert_node_generic(mm, node,
524 size, alignment, color,
525 mode->search_flags,
526 mode->create_flags);
527 if (err) {
528 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
529 size, alignment, color, mode->name, err);
530 return false;
531 }
532
533 if (!assert_node(node, mm, size, alignment, color)) {
534 drm_mm_remove_node(node);
535 return false;
536 }
537
538 return true;
539}
540
541static bool expect_insert_fail(struct drm_mm *mm, u64 size)
542{
543 struct drm_mm_node tmp = {};
544 int err;
545
546 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
547 if (likely(err == -ENOSPC))
548 return true;
549
550 if (!err) {
551 pr_err("impossible insert succeeded, node %llu + %llu\n",
552 tmp.start, tmp.size);
553 drm_mm_remove_node(&tmp);
554 } else {
555 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
556 err, -ENOSPC, size);
557 }
558 return false;
559}
560
Chris Wilson2bd966d2016-12-22 08:36:14 +0000561static int __igt_insert(unsigned int count, u64 size, bool replace)
Chris Wilson78866922016-12-22 08:36:13 +0000562{
563 DRM_RND_STATE(prng, random_seed);
564 const struct insert_mode *mode;
565 struct drm_mm mm;
566 struct drm_mm_node *nodes, *node, *next;
567 unsigned int *order, n, m, o = 0;
568 int ret;
569
570 /* Fill a range with lots of nodes, check it doesn't fail too early */
571
572 DRM_MM_BUG_ON(!count);
573 DRM_MM_BUG_ON(!size);
574
575 ret = -ENOMEM;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000576 nodes = vmalloc(count * sizeof(*nodes));
Chris Wilson78866922016-12-22 08:36:13 +0000577 if (!nodes)
578 goto err;
579
580 order = drm_random_order(count, &prng);
581 if (!order)
582 goto err_nodes;
583
584 ret = -EINVAL;
585 drm_mm_init(&mm, 0, count * size);
586
587 for (mode = insert_modes; mode->name; mode++) {
588 for (n = 0; n < count; n++) {
Chris Wilson2bd966d2016-12-22 08:36:14 +0000589 struct drm_mm_node tmp;
590
591 node = replace ? &tmp : &nodes[n];
592 memset(node, 0, sizeof(*node));
593 if (!expect_insert(&mm, node, size, 0, n, mode)) {
Chris Wilson78866922016-12-22 08:36:13 +0000594 pr_err("%s insert failed, size %llu step %d\n",
595 mode->name, size, n);
596 goto out;
597 }
Chris Wilson2bd966d2016-12-22 08:36:14 +0000598
599 if (replace) {
600 drm_mm_replace_node(&tmp, &nodes[n]);
601 if (drm_mm_node_allocated(&tmp)) {
602 pr_err("replaced old-node still allocated! step %d\n",
603 n);
604 goto out;
605 }
606
607 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
608 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
609 size, n);
610 goto out;
611 }
612
613 if (tmp.start != nodes[n].start) {
614 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
615 tmp.start, size,
616 nodes[n].start, nodes[n].size);
617 goto out;
618 }
619 }
Chris Wilson78866922016-12-22 08:36:13 +0000620 }
621
622 /* After random insertion the nodes should be in order */
623 if (!assert_continuous(&mm, size))
624 goto out;
625
626 /* Repeated use should then fail */
627 if (!expect_insert_fail(&mm, size))
628 goto out;
629
630 /* Remove one and reinsert, as the only hole it should refill itself */
631 for (n = 0; n < count; n++) {
632 u64 addr = nodes[n].start;
633
634 drm_mm_remove_node(&nodes[n]);
635 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
636 pr_err("%s reinsert failed, size %llu step %d\n",
637 mode->name, size, n);
638 goto out;
639 }
640
641 if (nodes[n].start != addr) {
642 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
643 mode->name, n, addr, nodes[n].start);
644 goto out;
645 }
646
647 if (!assert_continuous(&mm, size))
648 goto out;
649 }
650
651 /* Remove several, reinsert, check full */
652 for_each_prime_number(n, min(max_prime, count)) {
653 for (m = 0; m < n; m++) {
654 node = &nodes[order[(o + m) % count]];
655 drm_mm_remove_node(node);
656 }
657
658 for (m = 0; m < n; m++) {
659 node = &nodes[order[(o + m) % count]];
660 if (!expect_insert(&mm, node, size, 0, n, mode)) {
661 pr_err("%s multiple reinsert failed, size %llu step %d\n",
662 mode->name, size, n);
663 goto out;
664 }
665 }
666
667 o += n;
668
669 if (!assert_continuous(&mm, size))
670 goto out;
671
672 if (!expect_insert_fail(&mm, size))
673 goto out;
674 }
675
676 drm_mm_for_each_node_safe(node, next, &mm)
677 drm_mm_remove_node(node);
678 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
679 }
680
681 ret = 0;
682out:
683 drm_mm_for_each_node_safe(node, next, &mm)
684 drm_mm_remove_node(node);
685 drm_mm_takedown(&mm);
686 kfree(order);
687err_nodes:
688 vfree(nodes);
689err:
690 return ret;
691}
692
693static int igt_insert(void *ignored)
694{
695 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
696 unsigned int n;
697 int ret;
698
699 for_each_prime_number_from(n, 1, 54) {
700 u64 size = BIT_ULL(n);
701
Chris Wilson2bd966d2016-12-22 08:36:14 +0000702 ret = __igt_insert(count, size - 1, false);
Chris Wilson78866922016-12-22 08:36:13 +0000703 if (ret)
704 return ret;
705
Chris Wilson2bd966d2016-12-22 08:36:14 +0000706 ret = __igt_insert(count, size, false);
Chris Wilson78866922016-12-22 08:36:13 +0000707 if (ret)
708 return ret;
709
Chris Wilson2bd966d2016-12-22 08:36:14 +0000710 ret = __igt_insert(count, size + 1, false);
711 }
712
713 return 0;
714}
715
716static int igt_replace(void *ignored)
717{
718 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
719 unsigned int n;
720 int ret;
721
722 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
723 * then replacing it with the intended node. We want to check that
724 * the tree is intact and all the information we need is carried
725 * across to the target node.
726 */
727
728 for_each_prime_number_from(n, 1, 54) {
729 u64 size = BIT_ULL(n);
730
731 ret = __igt_insert(count, size - 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000732 if (ret)
733 return ret;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000734
735 ret = __igt_insert(count, size, true);
736 if (ret)
737 return ret;
738
739 ret = __igt_insert(count, size + 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000740 }
741
742 return 0;
743}
744
Chris Wilson2fba0de2016-12-22 08:36:15 +0000745static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
746 u64 size, u64 alignment, unsigned long color,
747 u64 range_start, u64 range_end,
748 const struct insert_mode *mode)
749{
750 int err;
751
752 err = drm_mm_insert_node_in_range_generic(mm, node,
753 size, alignment, color,
754 range_start, range_end,
755 mode->search_flags,
756 mode->create_flags);
757 if (err) {
758 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
759 size, alignment, color, mode->name,
760 range_start, range_end, err);
761 return false;
762 }
763
764 if (!assert_node(node, mm, size, alignment, color)) {
765 drm_mm_remove_node(node);
766 return false;
767 }
768
769 return true;
770}
771
772static bool expect_insert_in_range_fail(struct drm_mm *mm,
773 u64 size,
774 u64 range_start,
775 u64 range_end)
776{
777 struct drm_mm_node tmp = {};
778 int err;
779
780 err = drm_mm_insert_node_in_range_generic(mm, &tmp,
781 size, 0, 0,
782 range_start, range_end,
783 DRM_MM_SEARCH_DEFAULT,
784 DRM_MM_CREATE_DEFAULT);
785 if (likely(err == -ENOSPC))
786 return true;
787
788 if (!err) {
789 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
790 tmp.start, tmp.size, range_start, range_end);
791 drm_mm_remove_node(&tmp);
792 } else {
793 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
794 err, -ENOSPC, size, range_start, range_end);
795 }
796
797 return false;
798}
799
800static bool assert_contiguous_in_range(struct drm_mm *mm,
801 u64 size,
802 u64 start,
803 u64 end)
804{
805 struct drm_mm_node *node;
806 unsigned int n;
807
808 if (!expect_insert_in_range_fail(mm, size, start, end))
809 return false;
810
811 n = div64_u64(start + size - 1, size);
812 drm_mm_for_each_node(node, mm) {
813 if (node->start < start || node->start + node->size > end) {
814 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
815 n, node->start, node->start + node->size, start, end);
816 return false;
817 }
818
819 if (node->start != n * size) {
820 pr_err("node %d out of order, expected start %llx, found %llx\n",
821 n, n * size, node->start);
822 return false;
823 }
824
825 if (node->size != size) {
826 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
827 n, size, node->size);
828 return false;
829 }
830
Chris Wilson3f85fb32016-12-22 08:36:37 +0000831 if (drm_mm_hole_follows(node) &&
832 drm_mm_hole_node_end(node) < end) {
Chris Wilson2fba0de2016-12-22 08:36:15 +0000833 pr_err("node %d is followed by a hole!\n", n);
834 return false;
835 }
836
837 n++;
838 }
839
840 drm_mm_for_each_node_in_range(node, mm, 0, start) {
841 if (node) {
842 pr_err("node before start: node=%llx+%llu, start=%llx\n",
843 node->start, node->size, start);
844 return false;
845 }
846 }
847
848 drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
849 if (node) {
850 pr_err("node after end: node=%llx+%llu, end=%llx\n",
851 node->start, node->size, end);
852 return false;
853 }
854 }
855
856 return true;
857}
858
859static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
860{
861 const struct insert_mode *mode;
862 struct drm_mm mm;
863 struct drm_mm_node *nodes, *node, *next;
864 unsigned int n, start_n, end_n;
865 int ret;
866
867 DRM_MM_BUG_ON(!count);
868 DRM_MM_BUG_ON(!size);
869 DRM_MM_BUG_ON(end <= start);
870
871 /* Very similar to __igt_insert(), but now instead of populating the
872 * full range of the drm_mm, we try to fill a small portion of it.
873 */
874
875 ret = -ENOMEM;
876 nodes = vzalloc(count * sizeof(*nodes));
877 if (!nodes)
878 goto err;
879
880 ret = -EINVAL;
881 drm_mm_init(&mm, 0, count * size);
882
883 start_n = div64_u64(start + size - 1, size);
884 end_n = div64_u64(end - size, size);
885
886 for (mode = insert_modes; mode->name; mode++) {
887 for (n = start_n; n <= end_n; n++) {
888 if (!expect_insert_in_range(&mm, &nodes[n],
889 size, size, n,
890 start, end, mode)) {
891 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
892 mode->name, size, n,
893 start_n, end_n,
894 start, end);
895 goto out;
896 }
897 }
898
899 if (!assert_contiguous_in_range(&mm, size, start, end)) {
900 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
901 mode->name, start, end, size);
902 goto out;
903 }
904
905 /* Remove one and reinsert, it should refill itself */
906 for (n = start_n; n <= end_n; n++) {
907 u64 addr = nodes[n].start;
908
909 drm_mm_remove_node(&nodes[n]);
910 if (!expect_insert_in_range(&mm, &nodes[n],
911 size, size, n,
912 start, end, mode)) {
913 pr_err("%s reinsert failed, step %d\n", mode->name, n);
914 goto out;
915 }
916
917 if (nodes[n].start != addr) {
918 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
919 mode->name, n, addr, nodes[n].start);
920 goto out;
921 }
922 }
923
924 if (!assert_contiguous_in_range(&mm, size, start, end)) {
925 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
926 mode->name, start, end, size);
927 goto out;
928 }
929
930 drm_mm_for_each_node_safe(node, next, &mm)
931 drm_mm_remove_node(node);
932 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
933 }
934
935 ret = 0;
936out:
937 drm_mm_for_each_node_safe(node, next, &mm)
938 drm_mm_remove_node(node);
939 drm_mm_takedown(&mm);
940 vfree(nodes);
941err:
942 return ret;
943}
944
945static int insert_outside_range(void)
946{
947 struct drm_mm mm;
948 const unsigned int start = 1024;
949 const unsigned int end = 2048;
950 const unsigned int size = end - start;
951
952 drm_mm_init(&mm, start, size);
953
954 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
955 return -EINVAL;
956
957 if (!expect_insert_in_range_fail(&mm, size,
958 start - size/2, start + (size+1)/2))
959 return -EINVAL;
960
961 if (!expect_insert_in_range_fail(&mm, size,
962 end - (size+1)/2, end + size/2))
963 return -EINVAL;
964
965 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
966 return -EINVAL;
967
968 drm_mm_takedown(&mm);
969 return 0;
970}
971
972static int igt_insert_range(void *ignored)
973{
974 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
975 unsigned int n;
976 int ret;
977
978 /* Check that requests outside the bounds of drm_mm are rejected. */
979 ret = insert_outside_range();
980 if (ret)
981 return ret;
982
983 for_each_prime_number_from(n, 1, 50) {
984 const u64 size = BIT_ULL(n);
985 const u64 max = count * size;
986
987 ret = __igt_insert_range(count, size, 0, max);
988 if (ret)
989 return ret;
990
991 ret = __igt_insert_range(count, size, 1, max);
992 if (ret)
993 return ret;
994
995 ret = __igt_insert_range(count, size, 0, max - 1);
996 if (ret)
997 return ret;
998
999 ret = __igt_insert_range(count, size, 0, max/2);
1000 if (ret)
1001 return ret;
1002
1003 ret = __igt_insert_range(count, size, max/2, max);
1004 if (ret)
1005 return ret;
1006
1007 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1008 if (ret)
1009 return ret;
1010 }
1011
1012 return 0;
1013}
1014
Chris Wilson9b26f2e2016-12-22 08:36:16 +00001015static int igt_align(void *ignored)
1016{
1017 const struct insert_mode *mode;
1018 const unsigned int max_count = min(8192u, max_prime);
1019 struct drm_mm mm;
1020 struct drm_mm_node *nodes, *node, *next;
1021 unsigned int prime;
1022 int ret = -EINVAL;
1023
1024 /* For each of the possible insertion modes, we pick a few
1025 * arbitrary alignments and check that the inserted node
1026 * meets our requirements.
1027 */
1028
1029 nodes = vzalloc(max_count * sizeof(*nodes));
1030 if (!nodes)
1031 goto err;
1032
1033 drm_mm_init(&mm, 1, U64_MAX - 2);
1034
1035 for (mode = insert_modes; mode->name; mode++) {
1036 unsigned int i = 0;
1037
1038 for_each_prime_number_from(prime, 1, max_count) {
1039 u64 size = next_prime_number(prime);
1040
1041 if (!expect_insert(&mm, &nodes[i],
1042 size, prime, i,
1043 mode)) {
1044 pr_err("%s insert failed with alignment=%d",
1045 mode->name, prime);
1046 goto out;
1047 }
1048
1049 i++;
1050 }
1051
1052 drm_mm_for_each_node_safe(node, next, &mm)
1053 drm_mm_remove_node(node);
1054 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1055 }
1056
1057 ret = 0;
1058out:
1059 drm_mm_for_each_node_safe(node, next, &mm)
1060 drm_mm_remove_node(node);
1061 drm_mm_takedown(&mm);
1062 vfree(nodes);
1063err:
1064 return ret;
1065}
1066
1067static int igt_align_pot(int max)
1068{
1069 struct drm_mm mm;
1070 struct drm_mm_node *node, *next;
1071 int bit;
1072 int ret = -EINVAL;
1073
1074 /* Check that we can align to the full u64 address space */
1075
1076 drm_mm_init(&mm, 1, U64_MAX - 2);
1077
1078 for (bit = max - 1; bit; bit--) {
1079 u64 align, size;
1080
1081 node = kzalloc(sizeof(*node), GFP_KERNEL);
1082 if (!node) {
1083 ret = -ENOMEM;
1084 goto out;
1085 }
1086
1087 align = BIT_ULL(bit);
1088 size = BIT_ULL(bit-1) + 1;
1089 if (!expect_insert(&mm, node,
1090 size, align, bit,
1091 &insert_modes[0])) {
1092 pr_err("insert failed with alignment=%llx [%d]",
1093 align, bit);
1094 goto out;
1095 }
1096 }
1097
1098 ret = 0;
1099out:
1100 drm_mm_for_each_node_safe(node, next, &mm) {
1101 drm_mm_remove_node(node);
1102 kfree(node);
1103 }
1104 drm_mm_takedown(&mm);
1105 return ret;
1106}
1107
1108static int igt_align32(void *ignored)
1109{
1110 return igt_align_pot(32);
1111}
1112
1113static int igt_align64(void *ignored)
1114{
1115 return igt_align_pot(64);
1116}
1117
Chris Wilson9a71e272016-12-22 08:36:29 +00001118static void show_scan(const struct drm_mm_scan *scan)
Chris Wilson560b3282016-12-22 08:36:17 +00001119{
Chris Wilson71733202016-12-22 08:36:24 +00001120 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001121 scan->hit_start, scan->hit_end,
1122 scan->size, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001123}
1124
1125static void show_holes(const struct drm_mm *mm, int count)
1126{
1127 u64 hole_start, hole_end;
1128 struct drm_mm_node *hole;
1129
1130 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1131 struct drm_mm_node *next = list_next_entry(hole, node_list);
1132 const char *node1 = NULL, *node2 = NULL;
1133
1134 if (hole->allocated)
1135 node1 = kasprintf(GFP_KERNEL,
1136 "[%llx + %lld, color=%ld], ",
1137 hole->start, hole->size, hole->color);
1138
1139 if (next->allocated)
1140 node2 = kasprintf(GFP_KERNEL,
1141 ", [%llx + %lld, color=%ld]",
1142 next->start, next->size, next->color);
1143
1144 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1145 node1,
1146 hole_start, hole_end, hole_end - hole_start,
1147 node2);
1148
1149 kfree(node2);
1150 kfree(node1);
1151
1152 if (!--count)
1153 break;
1154 }
1155}
1156
1157struct evict_node {
1158 struct drm_mm_node node;
1159 struct list_head link;
1160};
1161
Chris Wilson9a71e272016-12-22 08:36:29 +00001162static bool evict_nodes(struct drm_mm_scan *scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001163 struct evict_node *nodes,
1164 unsigned int *order,
1165 unsigned int count,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001166 bool use_color,
Chris Wilson560b3282016-12-22 08:36:17 +00001167 struct list_head *evict_list)
1168{
1169 struct evict_node *e, *en;
1170 unsigned int i;
1171
1172 for (i = 0; i < count; i++) {
1173 e = &nodes[order ? order[i] : i];
1174 list_add(&e->link, evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001175 if (drm_mm_scan_add_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001176 break;
1177 }
1178 list_for_each_entry_safe(e, en, evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001179 if (!drm_mm_scan_remove_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001180 list_del(&e->link);
1181 }
1182 if (list_empty(evict_list)) {
Chris Wilson71733202016-12-22 08:36:24 +00001183 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001184 scan->size, count, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001185 return false;
1186 }
1187
1188 list_for_each_entry(e, evict_list, link)
1189 drm_mm_remove_node(&e->node);
1190
Chris Wilson3fa489d2016-12-22 08:36:36 +00001191 if (use_color) {
1192 struct drm_mm_node *node;
1193
1194 while ((node = drm_mm_scan_color_evict(scan))) {
1195 e = container_of(node, typeof(*e), node);
1196 drm_mm_remove_node(&e->node);
1197 list_add(&e->link, evict_list);
1198 }
1199 } else {
1200 if (drm_mm_scan_color_evict(scan)) {
1201 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1202 return false;
1203 }
1204 }
1205
Chris Wilson560b3282016-12-22 08:36:17 +00001206 return true;
1207}
1208
1209static bool evict_nothing(struct drm_mm *mm,
1210 unsigned int total_size,
1211 struct evict_node *nodes)
1212{
Chris Wilson9a71e272016-12-22 08:36:29 +00001213 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001214 LIST_HEAD(evict_list);
1215 struct evict_node *e;
1216 struct drm_mm_node *node;
1217 unsigned int n;
1218
Chris Wilson0b04d472016-12-22 08:36:33 +00001219 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001220 for (n = 0; n < total_size; n++) {
1221 e = &nodes[n];
1222 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001223 drm_mm_scan_add_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001224 }
1225 list_for_each_entry(e, &evict_list, link)
Chris Wilson9a71e272016-12-22 08:36:29 +00001226 drm_mm_scan_remove_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001227
1228 for (n = 0; n < total_size; n++) {
1229 e = &nodes[n];
1230
1231 if (!drm_mm_node_allocated(&e->node)) {
1232 pr_err("node[%d] no longer allocated!\n", n);
1233 return false;
1234 }
1235
1236 e->link.next = NULL;
1237 }
1238
1239 drm_mm_for_each_node(node, mm) {
1240 e = container_of(node, typeof(*e), node);
1241 e->link.next = &e->link;
1242 }
1243
1244 for (n = 0; n < total_size; n++) {
1245 e = &nodes[n];
1246
1247 if (!e->link.next) {
1248 pr_err("node[%d] no longer connected!\n", n);
1249 return false;
1250 }
1251 }
1252
1253 return assert_continuous(mm, nodes[0].node.size);
1254}
1255
1256static bool evict_everything(struct drm_mm *mm,
1257 unsigned int total_size,
1258 struct evict_node *nodes)
1259{
Chris Wilson9a71e272016-12-22 08:36:29 +00001260 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001261 LIST_HEAD(evict_list);
1262 struct evict_node *e;
1263 unsigned int n;
1264 int err;
1265
Chris Wilson0b04d472016-12-22 08:36:33 +00001266 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001267 for (n = 0; n < total_size; n++) {
1268 e = &nodes[n];
1269 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001270 if (drm_mm_scan_add_block(&scan, &e->node))
1271 break;
Chris Wilson560b3282016-12-22 08:36:17 +00001272 }
1273 list_for_each_entry(e, &evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001274 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001275 pr_err("Node %lld not marked for eviction!\n",
1276 e->node.start);
1277 list_del(&e->link);
1278 }
1279 }
1280
1281 list_for_each_entry(e, &evict_list, link)
1282 drm_mm_remove_node(&e->node);
1283
1284 if (!assert_one_hole(mm, 0, total_size))
1285 return false;
1286
1287 list_for_each_entry(e, &evict_list, link) {
1288 err = drm_mm_reserve_node(mm, &e->node);
1289 if (err) {
1290 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1291 e->node.start);
1292 return false;
1293 }
1294 }
1295
1296 return assert_continuous(mm, nodes[0].node.size);
1297}
1298
1299static int evict_something(struct drm_mm *mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001300 u64 range_start, u64 range_end,
Chris Wilson560b3282016-12-22 08:36:17 +00001301 struct evict_node *nodes,
1302 unsigned int *order,
1303 unsigned int count,
1304 unsigned int size,
1305 unsigned int alignment,
1306 const struct insert_mode *mode)
1307{
Chris Wilson9a71e272016-12-22 08:36:29 +00001308 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001309 LIST_HEAD(evict_list);
1310 struct evict_node *e;
1311 struct drm_mm_node tmp;
1312 int err;
1313
Chris Wilson9a71e272016-12-22 08:36:29 +00001314 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001315 size, alignment, 0,
Chris Wilson0b04d472016-12-22 08:36:33 +00001316 range_start, range_end,
1317 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001318 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001319 nodes, order, count, false,
Chris Wilson560b3282016-12-22 08:36:17 +00001320 &evict_list))
1321 return -EINVAL;
1322
1323 memset(&tmp, 0, sizeof(tmp));
1324 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1325 mode->search_flags,
1326 mode->create_flags);
1327 if (err) {
1328 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1329 size, alignment);
Chris Wilson9a71e272016-12-22 08:36:29 +00001330 show_scan(&scan);
Chris Wilson560b3282016-12-22 08:36:17 +00001331 show_holes(mm, 3);
1332 return err;
1333 }
1334
Chris Wilson0e483252016-12-22 08:36:18 +00001335 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1336 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1337 tmp.start, tmp.size, range_start, range_end);
1338 err = -EINVAL;
1339 }
1340
Chris Wilson3f85fb32016-12-22 08:36:37 +00001341 if (!assert_node(&tmp, mm, size, alignment, 0) ||
1342 drm_mm_hole_follows(&tmp)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001343 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1344 tmp.size, size,
1345 alignment, misalignment(&tmp, alignment),
Chris Wilson3f85fb32016-12-22 08:36:37 +00001346 tmp.start, drm_mm_hole_follows(&tmp));
Chris Wilson560b3282016-12-22 08:36:17 +00001347 err = -EINVAL;
1348 }
1349
1350 drm_mm_remove_node(&tmp);
1351 if (err)
1352 return err;
1353
1354 list_for_each_entry(e, &evict_list, link) {
1355 err = drm_mm_reserve_node(mm, &e->node);
1356 if (err) {
1357 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1358 e->node.start);
1359 return err;
1360 }
1361 }
1362
1363 if (!assert_continuous(mm, nodes[0].node.size)) {
1364 pr_err("range is no longer continuous\n");
1365 return -EINVAL;
1366 }
1367
1368 return 0;
1369}
1370
1371static int igt_evict(void *ignored)
1372{
1373 DRM_RND_STATE(prng, random_seed);
1374 const unsigned int size = 8192;
1375 const struct insert_mode *mode;
1376 struct drm_mm mm;
1377 struct evict_node *nodes;
1378 struct drm_mm_node *node, *next;
1379 unsigned int *order, n;
1380 int ret, err;
1381
1382 /* Here we populate a full drm_mm and then try and insert a new node
1383 * by evicting other nodes in a random order. The drm_mm_scan should
1384 * pick the first matching hole it finds from the random list. We
1385 * repeat that for different allocation strategies, alignments and
1386 * sizes to try and stress the hole finder.
1387 */
1388
1389 ret = -ENOMEM;
1390 nodes = vzalloc(size * sizeof(*nodes));
1391 if (!nodes)
1392 goto err;
1393
1394 order = drm_random_order(size, &prng);
1395 if (!order)
1396 goto err_nodes;
1397
1398 ret = -EINVAL;
1399 drm_mm_init(&mm, 0, size);
1400 for (n = 0; n < size; n++) {
1401 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1402 DRM_MM_SEARCH_DEFAULT);
1403 if (err) {
1404 pr_err("insert failed, step %d\n", n);
1405 ret = err;
1406 goto out;
1407 }
1408 }
1409
1410 /* First check that using the scanner doesn't break the mm */
1411 if (!evict_nothing(&mm, size, nodes)) {
1412 pr_err("evict_nothing() failed\n");
1413 goto out;
1414 }
1415 if (!evict_everything(&mm, size, nodes)) {
1416 pr_err("evict_everything() failed\n");
1417 goto out;
1418 }
1419
1420 for (mode = evict_modes; mode->name; mode++) {
1421 for (n = 1; n <= size; n <<= 1) {
1422 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001423 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001424 nodes, order, size,
1425 n, 1,
1426 mode);
1427 if (err) {
1428 pr_err("%s evict_something(size=%u) failed\n",
1429 mode->name, n);
1430 ret = err;
1431 goto out;
1432 }
1433 }
1434
1435 for (n = 1; n < size; n <<= 1) {
1436 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001437 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001438 nodes, order, size,
1439 size/2, n,
1440 mode);
1441 if (err) {
1442 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1443 mode->name, size/2, n);
1444 ret = err;
1445 goto out;
1446 }
1447 }
1448
1449 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1450 unsigned int nsize = (size - n + 1) / 2;
1451
1452 DRM_MM_BUG_ON(!nsize);
1453
1454 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001455 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001456 nodes, order, size,
1457 nsize, n,
1458 mode);
1459 if (err) {
1460 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1461 mode->name, nsize, n);
1462 ret = err;
1463 goto out;
1464 }
1465 }
1466 }
1467
1468 ret = 0;
1469out:
1470 drm_mm_for_each_node_safe(node, next, &mm)
1471 drm_mm_remove_node(node);
1472 drm_mm_takedown(&mm);
1473 kfree(order);
1474err_nodes:
1475 vfree(nodes);
1476err:
1477 return ret;
1478}
1479
Chris Wilson0e483252016-12-22 08:36:18 +00001480static int igt_evict_range(void *ignored)
1481{
1482 DRM_RND_STATE(prng, random_seed);
1483 const unsigned int size = 8192;
1484 const unsigned int range_size = size / 2;
1485 const unsigned int range_start = size / 4;
1486 const unsigned int range_end = range_start + range_size;
1487 const struct insert_mode *mode;
1488 struct drm_mm mm;
1489 struct evict_node *nodes;
1490 struct drm_mm_node *node, *next;
1491 unsigned int *order, n;
1492 int ret, err;
1493
1494 /* Like igt_evict() but now we are limiting the search to a
1495 * small portion of the full drm_mm.
1496 */
1497
1498 ret = -ENOMEM;
1499 nodes = vzalloc(size * sizeof(*nodes));
1500 if (!nodes)
1501 goto err;
1502
1503 order = drm_random_order(size, &prng);
1504 if (!order)
1505 goto err_nodes;
1506
1507 ret = -EINVAL;
1508 drm_mm_init(&mm, 0, size);
1509 for (n = 0; n < size; n++) {
1510 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1511 DRM_MM_SEARCH_DEFAULT);
1512 if (err) {
1513 pr_err("insert failed, step %d\n", n);
1514 ret = err;
1515 goto out;
1516 }
1517 }
1518
1519 for (mode = evict_modes; mode->name; mode++) {
1520 for (n = 1; n <= range_size; n <<= 1) {
1521 drm_random_reorder(order, size, &prng);
1522 err = evict_something(&mm, range_start, range_end,
1523 nodes, order, size,
1524 n, 1,
1525 mode);
1526 if (err) {
1527 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1528 mode->name, n, range_start, range_end);
1529 goto out;
1530 }
1531 }
1532
1533 for (n = 1; n <= range_size; n <<= 1) {
1534 drm_random_reorder(order, size, &prng);
1535 err = evict_something(&mm, range_start, range_end,
1536 nodes, order, size,
1537 range_size/2, n,
1538 mode);
1539 if (err) {
1540 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1541 mode->name, range_size/2, n, range_start, range_end);
1542 goto out;
1543 }
1544 }
1545
1546 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1547 unsigned int nsize = (range_size - n + 1) / 2;
1548
1549 DRM_MM_BUG_ON(!nsize);
1550
1551 drm_random_reorder(order, size, &prng);
1552 err = evict_something(&mm, range_start, range_end,
1553 nodes, order, size,
1554 nsize, n,
1555 mode);
1556 if (err) {
1557 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1558 mode->name, nsize, n, range_start, range_end);
1559 goto out;
1560 }
1561 }
1562 }
1563
1564 ret = 0;
1565out:
1566 drm_mm_for_each_node_safe(node, next, &mm)
1567 drm_mm_remove_node(node);
1568 drm_mm_takedown(&mm);
1569 kfree(order);
1570err_nodes:
1571 vfree(nodes);
1572err:
1573 return ret;
1574}
1575
Chris Wilson05ab3c22016-12-22 08:36:19 +00001576static unsigned int node_index(const struct drm_mm_node *node)
1577{
1578 return div64_u64(node->start, node->size);
1579}
1580
1581static int igt_topdown(void *ignored)
1582{
1583 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1584 DRM_RND_STATE(prng, random_seed);
1585 const unsigned int count = 8192;
1586 unsigned int size;
1587 unsigned long *bitmap = NULL;
1588 struct drm_mm mm;
1589 struct drm_mm_node *nodes, *node, *next;
1590 unsigned int *order, n, m, o = 0;
1591 int ret;
1592
1593 /* When allocating top-down, we expect to be returned a node
1594 * from a suitable hole at the top of the drm_mm. We check that
1595 * the returned node does match the highest available slot.
1596 */
1597
1598 ret = -ENOMEM;
1599 nodes = vzalloc(count * sizeof(*nodes));
1600 if (!nodes)
1601 goto err;
1602
1603 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1604 GFP_TEMPORARY);
1605 if (!bitmap)
1606 goto err_nodes;
1607
1608 order = drm_random_order(count, &prng);
1609 if (!order)
1610 goto err_bitmap;
1611
1612 ret = -EINVAL;
1613 for (size = 1; size <= 64; size <<= 1) {
1614 drm_mm_init(&mm, 0, size*count);
1615 for (n = 0; n < count; n++) {
1616 if (!expect_insert(&mm, &nodes[n],
1617 size, 0, n,
1618 topdown)) {
1619 pr_err("insert failed, size %u step %d\n", size, n);
1620 goto out;
1621 }
1622
Chris Wilson3f85fb32016-12-22 08:36:37 +00001623 if (drm_mm_hole_follows(&nodes[n])) {
Chris Wilson05ab3c22016-12-22 08:36:19 +00001624 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1625 n, nodes[n].start, size);
1626 goto out;
1627 }
1628
1629 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1630 goto out;
1631 }
1632
1633 if (!assert_continuous(&mm, size))
1634 goto out;
1635
1636 drm_random_reorder(order, count, &prng);
1637 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1638 for (m = 0; m < n; m++) {
1639 node = &nodes[order[(o + m) % count]];
1640 drm_mm_remove_node(node);
1641 __set_bit(node_index(node), bitmap);
1642 }
1643
1644 for (m = 0; m < n; m++) {
1645 unsigned int last;
1646
1647 node = &nodes[order[(o + m) % count]];
1648 if (!expect_insert(&mm, node,
1649 size, 0, 0,
1650 topdown)) {
1651 pr_err("insert failed, step %d/%d\n", m, n);
1652 goto out;
1653 }
1654
Chris Wilson3f85fb32016-12-22 08:36:37 +00001655 if (drm_mm_hole_follows(node)) {
Chris Wilson05ab3c22016-12-22 08:36:19 +00001656 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1657 m, n, node->start);
1658 goto out;
1659 }
1660
1661 last = find_last_bit(bitmap, count);
1662 if (node_index(node) != last) {
1663 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1664 m, n, size, last, node_index(node));
1665 goto out;
1666 }
1667
1668 __clear_bit(last, bitmap);
1669 }
1670
1671 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1672
1673 o += n;
1674 }
1675
1676 drm_mm_for_each_node_safe(node, next, &mm)
1677 drm_mm_remove_node(node);
1678 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1679 }
1680
1681 ret = 0;
1682out:
1683 drm_mm_for_each_node_safe(node, next, &mm)
1684 drm_mm_remove_node(node);
1685 drm_mm_takedown(&mm);
1686 kfree(order);
1687err_bitmap:
1688 kfree(bitmap);
1689err_nodes:
1690 vfree(nodes);
1691err:
1692 return ret;
1693}
1694
Chris Wilson4c2ba552016-12-22 08:36:20 +00001695static void separate_adjacent_colors(const struct drm_mm_node *node,
1696 unsigned long color,
1697 u64 *start,
1698 u64 *end)
1699{
1700 if (node->allocated && node->color != color)
1701 ++*start;
1702
1703 node = list_next_entry(node, node_list);
1704 if (node->allocated && node->color != color)
1705 --*end;
1706}
1707
1708static bool colors_abutt(const struct drm_mm_node *node)
1709{
Chris Wilson3f85fb32016-12-22 08:36:37 +00001710 if (!drm_mm_hole_follows(node) &&
Chris Wilson4c2ba552016-12-22 08:36:20 +00001711 list_next_entry(node, node_list)->allocated) {
1712 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1713 node->color, node->start, node->size,
1714 list_next_entry(node, node_list)->color,
1715 list_next_entry(node, node_list)->start,
1716 list_next_entry(node, node_list)->size);
1717 return true;
1718 }
1719
1720 return false;
1721}
1722
1723static int igt_color(void *ignored)
1724{
1725 const unsigned int count = min(4096u, max_iterations);
1726 const struct insert_mode *mode;
1727 struct drm_mm mm;
1728 struct drm_mm_node *node, *nn;
1729 unsigned int n;
1730 int ret = -EINVAL, err;
1731
1732 /* Color adjustment complicates everything. First we just check
1733 * that when we insert a node we apply any color_adjustment callback.
1734 * The callback we use should ensure that there is a gap between
1735 * any two nodes, and so after each insertion we check that those
1736 * holes are inserted and that they are preserved.
1737 */
1738
1739 drm_mm_init(&mm, 0, U64_MAX);
1740
1741 for (n = 1; n <= count; n++) {
1742 node = kzalloc(sizeof(*node), GFP_KERNEL);
1743 if (!node) {
1744 ret = -ENOMEM;
1745 goto out;
1746 }
1747
1748 if (!expect_insert(&mm, node,
1749 n, 0, n,
1750 &insert_modes[0])) {
1751 pr_err("insert failed, step %d\n", n);
1752 kfree(node);
1753 goto out;
1754 }
1755 }
1756
1757 drm_mm_for_each_node_safe(node, nn, &mm) {
1758 if (node->color != node->size) {
1759 pr_err("invalid color stored: expected %lld, found %ld\n",
1760 node->size, node->color);
1761
1762 goto out;
1763 }
1764
1765 drm_mm_remove_node(node);
1766 kfree(node);
1767 }
1768
1769 /* Now, let's start experimenting with applying a color callback */
1770 mm.color_adjust = separate_adjacent_colors;
1771 for (mode = insert_modes; mode->name; mode++) {
1772 u64 last;
1773
1774 node = kzalloc(sizeof(*node), GFP_KERNEL);
1775 if (!node) {
1776 ret = -ENOMEM;
1777 goto out;
1778 }
1779
1780 node->size = 1 + 2*count;
1781 node->color = node->size;
1782
1783 err = drm_mm_reserve_node(&mm, node);
1784 if (err) {
1785 pr_err("initial reserve failed!\n");
1786 ret = err;
1787 goto out;
1788 }
1789
1790 last = node->start + node->size;
1791
1792 for (n = 1; n <= count; n++) {
1793 int rem;
1794
1795 node = kzalloc(sizeof(*node), GFP_KERNEL);
1796 if (!node) {
1797 ret = -ENOMEM;
1798 goto out;
1799 }
1800
1801 node->start = last;
1802 node->size = n + count;
1803 node->color = node->size;
1804
1805 err = drm_mm_reserve_node(&mm, node);
1806 if (err != -ENOSPC) {
1807 pr_err("reserve %d did not report color overlap! err=%d\n",
1808 n, err);
1809 goto out;
1810 }
1811
1812 node->start += n + 1;
1813 rem = misalignment(node, n + count);
1814 node->start += n + count - rem;
1815
1816 err = drm_mm_reserve_node(&mm, node);
1817 if (err) {
1818 pr_err("reserve %d failed, err=%d\n", n, err);
1819 ret = err;
1820 goto out;
1821 }
1822
1823 last = node->start + node->size;
1824 }
1825
1826 for (n = 1; n <= count; n++) {
1827 node = kzalloc(sizeof(*node), GFP_KERNEL);
1828 if (!node) {
1829 ret = -ENOMEM;
1830 goto out;
1831 }
1832
1833 if (!expect_insert(&mm, node,
1834 n, n, n,
1835 mode)) {
1836 pr_err("%s insert failed, step %d\n",
1837 mode->name, n);
1838 kfree(node);
1839 goto out;
1840 }
1841 }
1842
1843 drm_mm_for_each_node_safe(node, nn, &mm) {
1844 u64 rem;
1845
1846 if (node->color != node->size) {
1847 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1848 mode->name, node->size, node->color);
1849
1850 goto out;
1851 }
1852
1853 if (colors_abutt(node))
1854 goto out;
1855
1856 div64_u64_rem(node->start, node->size, &rem);
1857 if (rem) {
1858 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1859 mode->name, node->start, node->size, rem);
1860 goto out;
1861 }
1862
1863 drm_mm_remove_node(node);
1864 kfree(node);
1865 }
1866 }
1867
1868 ret = 0;
1869out:
1870 drm_mm_for_each_node_safe(node, nn, &mm) {
1871 drm_mm_remove_node(node);
1872 kfree(node);
1873 }
1874 drm_mm_takedown(&mm);
1875 return ret;
1876}
1877
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001878static int evict_color(struct drm_mm *mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001879 u64 range_start, u64 range_end,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001880 struct evict_node *nodes,
1881 unsigned int *order,
1882 unsigned int count,
1883 unsigned int size,
1884 unsigned int alignment,
1885 unsigned long color,
1886 const struct insert_mode *mode)
1887{
Chris Wilson9a71e272016-12-22 08:36:29 +00001888 struct drm_mm_scan scan;
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001889 LIST_HEAD(evict_list);
1890 struct evict_node *e;
1891 struct drm_mm_node tmp;
1892 int err;
1893
Chris Wilson9a71e272016-12-22 08:36:29 +00001894 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001895 size, alignment, color,
Chris Wilson0b04d472016-12-22 08:36:33 +00001896 range_start, range_end,
1897 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001898 if (!evict_nodes(&scan,
Chris Wilson3fa489d2016-12-22 08:36:36 +00001899 nodes, order, count, true,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001900 &evict_list))
1901 return -EINVAL;
1902
1903 memset(&tmp, 0, sizeof(tmp));
1904 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1905 mode->search_flags,
1906 mode->create_flags);
1907 if (err) {
1908 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1909 size, alignment, color, err);
Chris Wilson9a71e272016-12-22 08:36:29 +00001910 show_scan(&scan);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001911 show_holes(mm, 3);
1912 return err;
1913 }
1914
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001915 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1916 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1917 tmp.start, tmp.size, range_start, range_end);
1918 err = -EINVAL;
1919 }
1920
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001921 if (colors_abutt(&tmp))
1922 err = -EINVAL;
1923
1924 if (!assert_node(&tmp, mm, size, alignment, color)) {
1925 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
1926 tmp.size, size,
1927 alignment, misalignment(&tmp, alignment), tmp.start);
1928 err = -EINVAL;
1929 }
1930
1931 drm_mm_remove_node(&tmp);
1932 if (err)
1933 return err;
1934
1935 list_for_each_entry(e, &evict_list, link) {
1936 err = drm_mm_reserve_node(mm, &e->node);
1937 if (err) {
1938 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1939 e->node.start);
1940 return err;
1941 }
1942 }
1943
1944 return 0;
1945}
1946
1947static int igt_color_evict(void *ignored)
1948{
1949 DRM_RND_STATE(prng, random_seed);
1950 const unsigned int total_size = min(8192u, max_iterations);
1951 const struct insert_mode *mode;
1952 unsigned long color = 0;
1953 struct drm_mm mm;
1954 struct evict_node *nodes;
1955 struct drm_mm_node *node, *next;
1956 unsigned int *order, n;
1957 int ret, err;
1958
1959 /* Check that the drm_mm_scan also honours color adjustment when
1960 * choosing its victims to create a hole. Our color_adjust does not
1961 * allow two nodes to be placed together without an intervening hole
1962 * enlarging the set of victims that must be evicted.
1963 */
1964
1965 ret = -ENOMEM;
1966 nodes = vzalloc(total_size * sizeof(*nodes));
1967 if (!nodes)
1968 goto err;
1969
1970 order = drm_random_order(total_size, &prng);
1971 if (!order)
1972 goto err_nodes;
1973
1974 ret = -EINVAL;
1975 drm_mm_init(&mm, 0, 2*total_size - 1);
1976 mm.color_adjust = separate_adjacent_colors;
1977 for (n = 0; n < total_size; n++) {
1978 if (!expect_insert(&mm, &nodes[n].node,
1979 1, 0, color++,
1980 &insert_modes[0])) {
1981 pr_err("insert failed, step %d\n", n);
1982 goto out;
1983 }
1984 }
1985
1986 for (mode = evict_modes; mode->name; mode++) {
1987 for (n = 1; n <= total_size; n <<= 1) {
1988 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001989 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001990 nodes, order, total_size,
1991 n, 1, color++,
1992 mode);
1993 if (err) {
1994 pr_err("%s evict_color(size=%u) failed\n",
1995 mode->name, n);
1996 goto out;
1997 }
1998 }
1999
2000 for (n = 1; n < total_size; n <<= 1) {
2001 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002002 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002003 nodes, order, total_size,
2004 total_size/2, n, color++,
2005 mode);
2006 if (err) {
2007 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2008 mode->name, total_size/2, n);
2009 goto out;
2010 }
2011 }
2012
2013 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2014 unsigned int nsize = (total_size - n + 1) / 2;
2015
2016 DRM_MM_BUG_ON(!nsize);
2017
2018 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002019 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002020 nodes, order, total_size,
2021 nsize, n, color++,
2022 mode);
2023 if (err) {
2024 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2025 mode->name, nsize, n);
2026 goto out;
2027 }
2028 }
2029 }
2030
2031 ret = 0;
2032out:
2033 if (ret)
2034 drm_mm_debug_table(&mm, __func__);
2035 drm_mm_for_each_node_safe(node, next, &mm)
2036 drm_mm_remove_node(node);
2037 drm_mm_takedown(&mm);
2038 kfree(order);
2039err_nodes:
2040 vfree(nodes);
2041err:
2042 return ret;
2043}
2044
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002045static int igt_color_evict_range(void *ignored)
2046{
2047 DRM_RND_STATE(prng, random_seed);
2048 const unsigned int total_size = 8192;
2049 const unsigned int range_size = total_size / 2;
2050 const unsigned int range_start = total_size / 4;
2051 const unsigned int range_end = range_start + range_size;
2052 const struct insert_mode *mode;
2053 unsigned long color = 0;
2054 struct drm_mm mm;
2055 struct evict_node *nodes;
2056 struct drm_mm_node *node, *next;
2057 unsigned int *order, n;
2058 int ret, err;
2059
2060 /* Like igt_color_evict(), but limited to small portion of the full
2061 * drm_mm range.
2062 */
2063
2064 ret = -ENOMEM;
2065 nodes = vzalloc(total_size * sizeof(*nodes));
2066 if (!nodes)
2067 goto err;
2068
2069 order = drm_random_order(total_size, &prng);
2070 if (!order)
2071 goto err_nodes;
2072
2073 ret = -EINVAL;
2074 drm_mm_init(&mm, 0, 2*total_size - 1);
2075 mm.color_adjust = separate_adjacent_colors;
2076 for (n = 0; n < total_size; n++) {
2077 if (!expect_insert(&mm, &nodes[n].node,
2078 1, 0, color++,
2079 &insert_modes[0])) {
2080 pr_err("insert failed, step %d\n", n);
2081 goto out;
2082 }
2083 }
2084
2085 for (mode = evict_modes; mode->name; mode++) {
2086 for (n = 1; n <= range_size; n <<= 1) {
2087 drm_random_reorder(order, range_size, &prng);
2088 err = evict_color(&mm, range_start, range_end,
2089 nodes, order, total_size,
2090 n, 1, color++,
2091 mode);
2092 if (err) {
2093 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2094 mode->name, n, range_start, range_end);
2095 goto out;
2096 }
2097 }
2098
2099 for (n = 1; n < range_size; n <<= 1) {
2100 drm_random_reorder(order, total_size, &prng);
2101 err = evict_color(&mm, range_start, range_end,
2102 nodes, order, total_size,
2103 range_size/2, n, color++,
2104 mode);
2105 if (err) {
2106 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2107 mode->name, total_size/2, n, range_start, range_end);
2108 goto out;
2109 }
2110 }
2111
2112 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2113 unsigned int nsize = (range_size - n + 1) / 2;
2114
2115 DRM_MM_BUG_ON(!nsize);
2116
2117 drm_random_reorder(order, total_size, &prng);
2118 err = evict_color(&mm, range_start, range_end,
2119 nodes, order, total_size,
2120 nsize, n, color++,
2121 mode);
2122 if (err) {
2123 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2124 mode->name, nsize, n, range_start, range_end);
2125 goto out;
2126 }
2127 }
2128 }
2129
2130 ret = 0;
2131out:
2132 if (ret)
2133 drm_mm_debug_table(&mm, __func__);
2134 drm_mm_for_each_node_safe(node, next, &mm)
2135 drm_mm_remove_node(node);
2136 drm_mm_takedown(&mm);
2137 kfree(order);
2138err_nodes:
2139 vfree(nodes);
2140err:
2141 return ret;
2142}
2143
Chris Wilson50f00332016-12-22 08:36:09 +00002144#include "drm_selftest.c"
2145
2146static int __init test_drm_mm_init(void)
2147{
2148 int err;
2149
2150 while (!random_seed)
2151 random_seed = get_random_int();
2152
2153 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2154 random_seed, max_iterations, max_prime);
2155 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2156
2157 return err > 0 ? 0 : err;
2158}
2159
2160static void __exit test_drm_mm_exit(void)
2161{
2162}
2163
2164module_init(test_drm_mm_init);
2165module_exit(test_drm_mm_exit);
2166
2167module_param(random_seed, uint, 0400);
2168module_param(max_iterations, uint, 0400);
2169module_param(max_prime, uint, 0400);
2170
2171MODULE_AUTHOR("Intel Corporation");
2172MODULE_LICENSE("GPL");