blob: 997f2bc93b9b73bd5396cddf89ca1fc4cf34d025 [file] [log] [blame]
Chris Wilson50f00332016-12-22 08:36:09 +00001/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
Chris Wilson78866922016-12-22 08:36:13 +000024enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST,
28};
29
30static const struct insert_mode {
31 const char *name;
32 unsigned int search_flags;
33 unsigned int create_flags;
34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
38 {}
Chris Wilson560b3282016-12-22 08:36:17 +000039}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
42 {}
Chris Wilson78866922016-12-22 08:36:13 +000043};
44
Chris Wilson50f00332016-12-22 08:36:09 +000045static int igt_sanitycheck(void *ignored)
46{
47 pr_info("%s - ok!\n", __func__);
48 return 0;
49}
50
Chris Wilson393b50f2016-12-22 08:36:10 +000051static bool assert_no_holes(const struct drm_mm *mm)
52{
53 struct drm_mm_node *hole;
54 u64 hole_start, hole_end;
55 unsigned long count;
56
57 count = 0;
58 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
59 count++;
60 if (count) {
61 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
62 return false;
63 }
64
65 drm_mm_for_each_node(hole, mm) {
66 if (hole->hole_follows) {
67 pr_err("Hole follows node, expected none!\n");
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
76{
77 struct drm_mm_node *hole;
78 u64 hole_start, hole_end;
79 unsigned long count;
80 bool ok = true;
81
82 if (end <= start)
83 return true;
84
85 count = 0;
86 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
87 if (start != hole_start || end != hole_end) {
88 if (ok)
89 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
90 hole_start, hole_end,
91 start, end);
92 ok = false;
93 }
94 count++;
95 }
96 if (count != 1) {
97 pr_err("Expected to find one hole, found %lu instead\n", count);
98 ok = false;
99 }
100
101 return ok;
102}
103
Chris Wilson900537d2016-12-22 08:36:12 +0000104static bool assert_continuous(const struct drm_mm *mm, u64 size)
105{
106 struct drm_mm_node *node, *check, *found;
107 unsigned long n;
108 u64 addr;
109
110 if (!assert_no_holes(mm))
111 return false;
112
113 n = 0;
114 addr = 0;
115 drm_mm_for_each_node(node, mm) {
116 if (node->start != addr) {
117 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
118 n, addr, node->start);
119 return false;
120 }
121
122 if (node->size != size) {
123 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
124 n, size, node->size);
125 return false;
126 }
127
128 if (node->hole_follows) {
129 pr_err("node[%ld] is followed by a hole!\n", n);
130 return false;
131 }
132
133 found = NULL;
134 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
135 if (node != check) {
136 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
137 node->start, check->start);
138 return false;
139 }
140 found = check;
141 }
142 if (!found) {
143 pr_err("lookup failed for node %llx + %llx\n",
144 addr, size);
145 return false;
146 }
147
148 addr += size;
149 n++;
150 }
151
152 return true;
153}
154
Chris Wilson78866922016-12-22 08:36:13 +0000155static u64 misalignment(struct drm_mm_node *node, u64 alignment)
156{
157 u64 rem;
158
159 if (!alignment)
160 return 0;
161
162 div64_u64_rem(node->start, alignment, &rem);
163 return rem;
164}
165
166static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
167 u64 size, u64 alignment, unsigned long color)
168{
169 bool ok = true;
170
171 if (!drm_mm_node_allocated(node) || node->mm != mm) {
172 pr_err("node not allocated\n");
173 ok = false;
174 }
175
176 if (node->size != size) {
177 pr_err("node has wrong size, found %llu, expected %llu\n",
178 node->size, size);
179 ok = false;
180 }
181
182 if (misalignment(node, alignment)) {
183 pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
184 node->start, misalignment(node, alignment), alignment);
185 ok = false;
186 }
187
188 if (node->color != color) {
189 pr_err("node has wrong color, found %lu, expected %lu\n",
190 node->color, color);
191 ok = false;
192 }
193
194 return ok;
195}
196
Chris Wilson393b50f2016-12-22 08:36:10 +0000197static int igt_init(void *ignored)
198{
199 const unsigned int size = 4096;
200 struct drm_mm mm;
201 struct drm_mm_node tmp;
202 int ret = -EINVAL;
203
204 /* Start with some simple checks on initialising the struct drm_mm */
205 memset(&mm, 0, sizeof(mm));
206 if (drm_mm_initialized(&mm)) {
207 pr_err("zeroed mm claims to be initialized\n");
208 return ret;
209 }
210
211 memset(&mm, 0xff, sizeof(mm));
212 drm_mm_init(&mm, 0, size);
213 if (!drm_mm_initialized(&mm)) {
214 pr_err("mm claims not to be initialized\n");
215 goto out;
216 }
217
218 if (!drm_mm_clean(&mm)) {
219 pr_err("mm not empty on creation\n");
220 goto out;
221 }
222
223 /* After creation, it should all be one massive hole */
224 if (!assert_one_hole(&mm, 0, size)) {
225 ret = -EINVAL;
226 goto out;
227 }
228
229 memset(&tmp, 0, sizeof(tmp));
230 tmp.start = 0;
231 tmp.size = size;
232 ret = drm_mm_reserve_node(&mm, &tmp);
233 if (ret) {
234 pr_err("failed to reserve whole drm_mm\n");
235 goto out;
236 }
237
238 /* After filling the range entirely, there should be no holes */
239 if (!assert_no_holes(&mm)) {
240 ret = -EINVAL;
241 goto out;
242 }
243
244 /* And then after emptying it again, the massive hole should be back */
245 drm_mm_remove_node(&tmp);
246 if (!assert_one_hole(&mm, 0, size)) {
247 ret = -EINVAL;
248 goto out;
249 }
250
251out:
252 if (ret)
253 drm_mm_debug_table(&mm, __func__);
254 drm_mm_takedown(&mm);
255 return ret;
256}
257
Chris Wilson06df8ac2016-12-22 08:36:11 +0000258static int igt_debug(void *ignored)
259{
260 struct drm_mm mm;
261 struct drm_mm_node nodes[2];
262 int ret;
263
264 /* Create a small drm_mm with a couple of nodes and a few holes, and
265 * check that the debug iterator doesn't explode over a trivial drm_mm.
266 */
267
268 drm_mm_init(&mm, 0, 4096);
269
270 memset(nodes, 0, sizeof(nodes));
271 nodes[0].start = 512;
272 nodes[0].size = 1024;
273 ret = drm_mm_reserve_node(&mm, &nodes[0]);
274 if (ret) {
275 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
276 nodes[0].start, nodes[0].size);
277 return ret;
278 }
279
280 nodes[1].size = 1024;
281 nodes[1].start = 4096 - 512 - nodes[1].size;
282 ret = drm_mm_reserve_node(&mm, &nodes[1]);
283 if (ret) {
284 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
285 nodes[1].start, nodes[1].size);
286 return ret;
287 }
288
289 drm_mm_debug_table(&mm, __func__);
290 return 0;
291}
292
Chris Wilson900537d2016-12-22 08:36:12 +0000293static struct drm_mm_node *set_node(struct drm_mm_node *node,
294 u64 start, u64 size)
295{
296 node->start = start;
297 node->size = size;
298 return node;
299}
300
301static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
302{
303 int err;
304
305 err = drm_mm_reserve_node(mm, node);
306 if (likely(err == -ENOSPC))
307 return true;
308
309 if (!err) {
310 pr_err("impossible reserve succeeded, node %llu + %llu\n",
311 node->start, node->size);
312 drm_mm_remove_node(node);
313 } else {
314 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
315 err, -ENOSPC, node->start, node->size);
316 }
317 return false;
318}
319
320static bool check_reserve_boundaries(struct drm_mm *mm,
321 unsigned int count,
322 u64 size)
323{
324 const struct boundary {
325 u64 start, size;
326 const char *name;
327 } boundaries[] = {
328#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
329 B(0, 0),
330 B(-size, 0),
331 B(size, 0),
332 B(size * count, 0),
333 B(-size, size),
334 B(-size, -size),
335 B(-size, 2*size),
336 B(0, -size),
337 B(size, -size),
338 B(count*size, size),
339 B(count*size, -size),
340 B(count*size, count*size),
341 B(count*size, -count*size),
342 B(count*size, -(count+1)*size),
343 B((count+1)*size, size),
344 B((count+1)*size, -size),
345 B((count+1)*size, -2*size),
346#undef B
347 };
348 struct drm_mm_node tmp = {};
349 int n;
350
351 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
352 if (!expect_reserve_fail(mm,
353 set_node(&tmp,
354 boundaries[n].start,
355 boundaries[n].size))) {
356 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
357 n, boundaries[n].name, count, size);
358 return false;
359 }
360 }
361
362 return true;
363}
364
365static int __igt_reserve(unsigned int count, u64 size)
366{
367 DRM_RND_STATE(prng, random_seed);
368 struct drm_mm mm;
369 struct drm_mm_node tmp, *nodes, *node, *next;
370 unsigned int *order, n, m, o = 0;
371 int ret, err;
372
373 /* For exercising drm_mm_reserve_node(), we want to check that
374 * reservations outside of the drm_mm range are rejected, and to
375 * overlapping and otherwise already occupied ranges. Afterwards,
376 * the tree and nodes should be intact.
377 */
378
379 DRM_MM_BUG_ON(!count);
380 DRM_MM_BUG_ON(!size);
381
382 ret = -ENOMEM;
383 order = drm_random_order(count, &prng);
384 if (!order)
385 goto err;
386
387 nodes = vzalloc(sizeof(*nodes) * count);
388 if (!nodes)
389 goto err_order;
390
391 ret = -EINVAL;
392 drm_mm_init(&mm, 0, count * size);
393
394 if (!check_reserve_boundaries(&mm, count, size))
395 goto out;
396
397 for (n = 0; n < count; n++) {
398 nodes[n].start = order[n] * size;
399 nodes[n].size = size;
400
401 err = drm_mm_reserve_node(&mm, &nodes[n]);
402 if (err) {
403 pr_err("reserve failed, step %d, start %llu\n",
404 n, nodes[n].start);
405 ret = err;
406 goto out;
407 }
408
409 if (!drm_mm_node_allocated(&nodes[n])) {
410 pr_err("reserved node not allocated! step %d, start %llu\n",
411 n, nodes[n].start);
412 goto out;
413 }
414
415 if (!expect_reserve_fail(&mm, &nodes[n]))
416 goto out;
417 }
418
419 /* After random insertion the nodes should be in order */
420 if (!assert_continuous(&mm, size))
421 goto out;
422
423 /* Repeated use should then fail */
424 drm_random_reorder(order, count, &prng);
425 for (n = 0; n < count; n++) {
426 if (!expect_reserve_fail(&mm,
427 set_node(&tmp, order[n] * size, 1)))
428 goto out;
429
430 /* Remove and reinsert should work */
431 drm_mm_remove_node(&nodes[order[n]]);
432 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
433 if (err) {
434 pr_err("reserve failed, step %d, start %llu\n",
435 n, nodes[n].start);
436 ret = err;
437 goto out;
438 }
439 }
440
441 if (!assert_continuous(&mm, size))
442 goto out;
443
444 /* Overlapping use should then fail */
445 for (n = 0; n < count; n++) {
446 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
447 goto out;
448 }
449 for (n = 0; n < count; n++) {
450 if (!expect_reserve_fail(&mm,
451 set_node(&tmp,
452 size * n,
453 size * (count - n))))
454 goto out;
455 }
456
457 /* Remove several, reinsert, check full */
458 for_each_prime_number(n, min(max_prime, count)) {
459 for (m = 0; m < n; m++) {
460 node = &nodes[order[(o + m) % count]];
461 drm_mm_remove_node(node);
462 }
463
464 for (m = 0; m < n; m++) {
465 node = &nodes[order[(o + m) % count]];
466 err = drm_mm_reserve_node(&mm, node);
467 if (err) {
468 pr_err("reserve failed, step %d/%d, start %llu\n",
469 m, n, node->start);
470 ret = err;
471 goto out;
472 }
473 }
474
475 o += n;
476
477 if (!assert_continuous(&mm, size))
478 goto out;
479 }
480
481 ret = 0;
482out:
483 drm_mm_for_each_node_safe(node, next, &mm)
484 drm_mm_remove_node(node);
485 drm_mm_takedown(&mm);
486 vfree(nodes);
487err_order:
488 kfree(order);
489err:
490 return ret;
491}
492
493static int igt_reserve(void *ignored)
494{
495 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
496 int n, ret;
497
498 for_each_prime_number_from(n, 1, 54) {
499 u64 size = BIT_ULL(n);
500
501 ret = __igt_reserve(count, size - 1);
502 if (ret)
503 return ret;
504
505 ret = __igt_reserve(count, size);
506 if (ret)
507 return ret;
508
509 ret = __igt_reserve(count, size + 1);
510 if (ret)
511 return ret;
512 }
513
514 return 0;
515}
516
Chris Wilson78866922016-12-22 08:36:13 +0000517static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
518 u64 size, u64 alignment, unsigned long color,
519 const struct insert_mode *mode)
520{
521 int err;
522
523 err = drm_mm_insert_node_generic(mm, node,
524 size, alignment, color,
525 mode->search_flags,
526 mode->create_flags);
527 if (err) {
528 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
529 size, alignment, color, mode->name, err);
530 return false;
531 }
532
533 if (!assert_node(node, mm, size, alignment, color)) {
534 drm_mm_remove_node(node);
535 return false;
536 }
537
538 return true;
539}
540
541static bool expect_insert_fail(struct drm_mm *mm, u64 size)
542{
543 struct drm_mm_node tmp = {};
544 int err;
545
546 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
547 if (likely(err == -ENOSPC))
548 return true;
549
550 if (!err) {
551 pr_err("impossible insert succeeded, node %llu + %llu\n",
552 tmp.start, tmp.size);
553 drm_mm_remove_node(&tmp);
554 } else {
555 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
556 err, -ENOSPC, size);
557 }
558 return false;
559}
560
Chris Wilson2bd966d2016-12-22 08:36:14 +0000561static int __igt_insert(unsigned int count, u64 size, bool replace)
Chris Wilson78866922016-12-22 08:36:13 +0000562{
563 DRM_RND_STATE(prng, random_seed);
564 const struct insert_mode *mode;
565 struct drm_mm mm;
566 struct drm_mm_node *nodes, *node, *next;
567 unsigned int *order, n, m, o = 0;
568 int ret;
569
570 /* Fill a range with lots of nodes, check it doesn't fail too early */
571
572 DRM_MM_BUG_ON(!count);
573 DRM_MM_BUG_ON(!size);
574
575 ret = -ENOMEM;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000576 nodes = vmalloc(count * sizeof(*nodes));
Chris Wilson78866922016-12-22 08:36:13 +0000577 if (!nodes)
578 goto err;
579
580 order = drm_random_order(count, &prng);
581 if (!order)
582 goto err_nodes;
583
584 ret = -EINVAL;
585 drm_mm_init(&mm, 0, count * size);
586
587 for (mode = insert_modes; mode->name; mode++) {
588 for (n = 0; n < count; n++) {
Chris Wilson2bd966d2016-12-22 08:36:14 +0000589 struct drm_mm_node tmp;
590
591 node = replace ? &tmp : &nodes[n];
592 memset(node, 0, sizeof(*node));
593 if (!expect_insert(&mm, node, size, 0, n, mode)) {
Chris Wilson78866922016-12-22 08:36:13 +0000594 pr_err("%s insert failed, size %llu step %d\n",
595 mode->name, size, n);
596 goto out;
597 }
Chris Wilson2bd966d2016-12-22 08:36:14 +0000598
599 if (replace) {
600 drm_mm_replace_node(&tmp, &nodes[n]);
601 if (drm_mm_node_allocated(&tmp)) {
602 pr_err("replaced old-node still allocated! step %d\n",
603 n);
604 goto out;
605 }
606
607 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
608 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
609 size, n);
610 goto out;
611 }
612
613 if (tmp.start != nodes[n].start) {
614 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
615 tmp.start, size,
616 nodes[n].start, nodes[n].size);
617 goto out;
618 }
619 }
Chris Wilson78866922016-12-22 08:36:13 +0000620 }
621
622 /* After random insertion the nodes should be in order */
623 if (!assert_continuous(&mm, size))
624 goto out;
625
626 /* Repeated use should then fail */
627 if (!expect_insert_fail(&mm, size))
628 goto out;
629
630 /* Remove one and reinsert, as the only hole it should refill itself */
631 for (n = 0; n < count; n++) {
632 u64 addr = nodes[n].start;
633
634 drm_mm_remove_node(&nodes[n]);
635 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
636 pr_err("%s reinsert failed, size %llu step %d\n",
637 mode->name, size, n);
638 goto out;
639 }
640
641 if (nodes[n].start != addr) {
642 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
643 mode->name, n, addr, nodes[n].start);
644 goto out;
645 }
646
647 if (!assert_continuous(&mm, size))
648 goto out;
649 }
650
651 /* Remove several, reinsert, check full */
652 for_each_prime_number(n, min(max_prime, count)) {
653 for (m = 0; m < n; m++) {
654 node = &nodes[order[(o + m) % count]];
655 drm_mm_remove_node(node);
656 }
657
658 for (m = 0; m < n; m++) {
659 node = &nodes[order[(o + m) % count]];
660 if (!expect_insert(&mm, node, size, 0, n, mode)) {
661 pr_err("%s multiple reinsert failed, size %llu step %d\n",
662 mode->name, size, n);
663 goto out;
664 }
665 }
666
667 o += n;
668
669 if (!assert_continuous(&mm, size))
670 goto out;
671
672 if (!expect_insert_fail(&mm, size))
673 goto out;
674 }
675
676 drm_mm_for_each_node_safe(node, next, &mm)
677 drm_mm_remove_node(node);
678 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
679 }
680
681 ret = 0;
682out:
683 drm_mm_for_each_node_safe(node, next, &mm)
684 drm_mm_remove_node(node);
685 drm_mm_takedown(&mm);
686 kfree(order);
687err_nodes:
688 vfree(nodes);
689err:
690 return ret;
691}
692
693static int igt_insert(void *ignored)
694{
695 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
696 unsigned int n;
697 int ret;
698
699 for_each_prime_number_from(n, 1, 54) {
700 u64 size = BIT_ULL(n);
701
Chris Wilson2bd966d2016-12-22 08:36:14 +0000702 ret = __igt_insert(count, size - 1, false);
Chris Wilson78866922016-12-22 08:36:13 +0000703 if (ret)
704 return ret;
705
Chris Wilson2bd966d2016-12-22 08:36:14 +0000706 ret = __igt_insert(count, size, false);
Chris Wilson78866922016-12-22 08:36:13 +0000707 if (ret)
708 return ret;
709
Chris Wilson2bd966d2016-12-22 08:36:14 +0000710 ret = __igt_insert(count, size + 1, false);
711 }
712
713 return 0;
714}
715
716static int igt_replace(void *ignored)
717{
718 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
719 unsigned int n;
720 int ret;
721
722 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
723 * then replacing it with the intended node. We want to check that
724 * the tree is intact and all the information we need is carried
725 * across to the target node.
726 */
727
728 for_each_prime_number_from(n, 1, 54) {
729 u64 size = BIT_ULL(n);
730
731 ret = __igt_insert(count, size - 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000732 if (ret)
733 return ret;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000734
735 ret = __igt_insert(count, size, true);
736 if (ret)
737 return ret;
738
739 ret = __igt_insert(count, size + 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000740 }
741
742 return 0;
743}
744
Chris Wilson2fba0de2016-12-22 08:36:15 +0000745static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
746 u64 size, u64 alignment, unsigned long color,
747 u64 range_start, u64 range_end,
748 const struct insert_mode *mode)
749{
750 int err;
751
752 err = drm_mm_insert_node_in_range_generic(mm, node,
753 size, alignment, color,
754 range_start, range_end,
755 mode->search_flags,
756 mode->create_flags);
757 if (err) {
758 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
759 size, alignment, color, mode->name,
760 range_start, range_end, err);
761 return false;
762 }
763
764 if (!assert_node(node, mm, size, alignment, color)) {
765 drm_mm_remove_node(node);
766 return false;
767 }
768
769 return true;
770}
771
772static bool expect_insert_in_range_fail(struct drm_mm *mm,
773 u64 size,
774 u64 range_start,
775 u64 range_end)
776{
777 struct drm_mm_node tmp = {};
778 int err;
779
780 err = drm_mm_insert_node_in_range_generic(mm, &tmp,
781 size, 0, 0,
782 range_start, range_end,
783 DRM_MM_SEARCH_DEFAULT,
784 DRM_MM_CREATE_DEFAULT);
785 if (likely(err == -ENOSPC))
786 return true;
787
788 if (!err) {
789 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
790 tmp.start, tmp.size, range_start, range_end);
791 drm_mm_remove_node(&tmp);
792 } else {
793 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
794 err, -ENOSPC, size, range_start, range_end);
795 }
796
797 return false;
798}
799
800static bool assert_contiguous_in_range(struct drm_mm *mm,
801 u64 size,
802 u64 start,
803 u64 end)
804{
805 struct drm_mm_node *node;
806 unsigned int n;
807
808 if (!expect_insert_in_range_fail(mm, size, start, end))
809 return false;
810
811 n = div64_u64(start + size - 1, size);
812 drm_mm_for_each_node(node, mm) {
813 if (node->start < start || node->start + node->size > end) {
814 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
815 n, node->start, node->start + node->size, start, end);
816 return false;
817 }
818
819 if (node->start != n * size) {
820 pr_err("node %d out of order, expected start %llx, found %llx\n",
821 n, n * size, node->start);
822 return false;
823 }
824
825 if (node->size != size) {
826 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
827 n, size, node->size);
828 return false;
829 }
830
831 if (node->hole_follows && drm_mm_hole_node_end(node) < end) {
832 pr_err("node %d is followed by a hole!\n", n);
833 return false;
834 }
835
836 n++;
837 }
838
839 drm_mm_for_each_node_in_range(node, mm, 0, start) {
840 if (node) {
841 pr_err("node before start: node=%llx+%llu, start=%llx\n",
842 node->start, node->size, start);
843 return false;
844 }
845 }
846
847 drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
848 if (node) {
849 pr_err("node after end: node=%llx+%llu, end=%llx\n",
850 node->start, node->size, end);
851 return false;
852 }
853 }
854
855 return true;
856}
857
858static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
859{
860 const struct insert_mode *mode;
861 struct drm_mm mm;
862 struct drm_mm_node *nodes, *node, *next;
863 unsigned int n, start_n, end_n;
864 int ret;
865
866 DRM_MM_BUG_ON(!count);
867 DRM_MM_BUG_ON(!size);
868 DRM_MM_BUG_ON(end <= start);
869
870 /* Very similar to __igt_insert(), but now instead of populating the
871 * full range of the drm_mm, we try to fill a small portion of it.
872 */
873
874 ret = -ENOMEM;
875 nodes = vzalloc(count * sizeof(*nodes));
876 if (!nodes)
877 goto err;
878
879 ret = -EINVAL;
880 drm_mm_init(&mm, 0, count * size);
881
882 start_n = div64_u64(start + size - 1, size);
883 end_n = div64_u64(end - size, size);
884
885 for (mode = insert_modes; mode->name; mode++) {
886 for (n = start_n; n <= end_n; n++) {
887 if (!expect_insert_in_range(&mm, &nodes[n],
888 size, size, n,
889 start, end, mode)) {
890 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
891 mode->name, size, n,
892 start_n, end_n,
893 start, end);
894 goto out;
895 }
896 }
897
898 if (!assert_contiguous_in_range(&mm, size, start, end)) {
899 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
900 mode->name, start, end, size);
901 goto out;
902 }
903
904 /* Remove one and reinsert, it should refill itself */
905 for (n = start_n; n <= end_n; n++) {
906 u64 addr = nodes[n].start;
907
908 drm_mm_remove_node(&nodes[n]);
909 if (!expect_insert_in_range(&mm, &nodes[n],
910 size, size, n,
911 start, end, mode)) {
912 pr_err("%s reinsert failed, step %d\n", mode->name, n);
913 goto out;
914 }
915
916 if (nodes[n].start != addr) {
917 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
918 mode->name, n, addr, nodes[n].start);
919 goto out;
920 }
921 }
922
923 if (!assert_contiguous_in_range(&mm, size, start, end)) {
924 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
925 mode->name, start, end, size);
926 goto out;
927 }
928
929 drm_mm_for_each_node_safe(node, next, &mm)
930 drm_mm_remove_node(node);
931 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
932 }
933
934 ret = 0;
935out:
936 drm_mm_for_each_node_safe(node, next, &mm)
937 drm_mm_remove_node(node);
938 drm_mm_takedown(&mm);
939 vfree(nodes);
940err:
941 return ret;
942}
943
944static int insert_outside_range(void)
945{
946 struct drm_mm mm;
947 const unsigned int start = 1024;
948 const unsigned int end = 2048;
949 const unsigned int size = end - start;
950
951 drm_mm_init(&mm, start, size);
952
953 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
954 return -EINVAL;
955
956 if (!expect_insert_in_range_fail(&mm, size,
957 start - size/2, start + (size+1)/2))
958 return -EINVAL;
959
960 if (!expect_insert_in_range_fail(&mm, size,
961 end - (size+1)/2, end + size/2))
962 return -EINVAL;
963
964 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
965 return -EINVAL;
966
967 drm_mm_takedown(&mm);
968 return 0;
969}
970
971static int igt_insert_range(void *ignored)
972{
973 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
974 unsigned int n;
975 int ret;
976
977 /* Check that requests outside the bounds of drm_mm are rejected. */
978 ret = insert_outside_range();
979 if (ret)
980 return ret;
981
982 for_each_prime_number_from(n, 1, 50) {
983 const u64 size = BIT_ULL(n);
984 const u64 max = count * size;
985
986 ret = __igt_insert_range(count, size, 0, max);
987 if (ret)
988 return ret;
989
990 ret = __igt_insert_range(count, size, 1, max);
991 if (ret)
992 return ret;
993
994 ret = __igt_insert_range(count, size, 0, max - 1);
995 if (ret)
996 return ret;
997
998 ret = __igt_insert_range(count, size, 0, max/2);
999 if (ret)
1000 return ret;
1001
1002 ret = __igt_insert_range(count, size, max/2, max);
1003 if (ret)
1004 return ret;
1005
1006 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1007 if (ret)
1008 return ret;
1009 }
1010
1011 return 0;
1012}
1013
Chris Wilson9b26f2e2016-12-22 08:36:16 +00001014static int igt_align(void *ignored)
1015{
1016 const struct insert_mode *mode;
1017 const unsigned int max_count = min(8192u, max_prime);
1018 struct drm_mm mm;
1019 struct drm_mm_node *nodes, *node, *next;
1020 unsigned int prime;
1021 int ret = -EINVAL;
1022
1023 /* For each of the possible insertion modes, we pick a few
1024 * arbitrary alignments and check that the inserted node
1025 * meets our requirements.
1026 */
1027
1028 nodes = vzalloc(max_count * sizeof(*nodes));
1029 if (!nodes)
1030 goto err;
1031
1032 drm_mm_init(&mm, 1, U64_MAX - 2);
1033
1034 for (mode = insert_modes; mode->name; mode++) {
1035 unsigned int i = 0;
1036
1037 for_each_prime_number_from(prime, 1, max_count) {
1038 u64 size = next_prime_number(prime);
1039
1040 if (!expect_insert(&mm, &nodes[i],
1041 size, prime, i,
1042 mode)) {
1043 pr_err("%s insert failed with alignment=%d",
1044 mode->name, prime);
1045 goto out;
1046 }
1047
1048 i++;
1049 }
1050
1051 drm_mm_for_each_node_safe(node, next, &mm)
1052 drm_mm_remove_node(node);
1053 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1054 }
1055
1056 ret = 0;
1057out:
1058 drm_mm_for_each_node_safe(node, next, &mm)
1059 drm_mm_remove_node(node);
1060 drm_mm_takedown(&mm);
1061 vfree(nodes);
1062err:
1063 return ret;
1064}
1065
1066static int igt_align_pot(int max)
1067{
1068 struct drm_mm mm;
1069 struct drm_mm_node *node, *next;
1070 int bit;
1071 int ret = -EINVAL;
1072
1073 /* Check that we can align to the full u64 address space */
1074
1075 drm_mm_init(&mm, 1, U64_MAX - 2);
1076
1077 for (bit = max - 1; bit; bit--) {
1078 u64 align, size;
1079
1080 node = kzalloc(sizeof(*node), GFP_KERNEL);
1081 if (!node) {
1082 ret = -ENOMEM;
1083 goto out;
1084 }
1085
1086 align = BIT_ULL(bit);
1087 size = BIT_ULL(bit-1) + 1;
1088 if (!expect_insert(&mm, node,
1089 size, align, bit,
1090 &insert_modes[0])) {
1091 pr_err("insert failed with alignment=%llx [%d]",
1092 align, bit);
1093 goto out;
1094 }
1095 }
1096
1097 ret = 0;
1098out:
1099 drm_mm_for_each_node_safe(node, next, &mm) {
1100 drm_mm_remove_node(node);
1101 kfree(node);
1102 }
1103 drm_mm_takedown(&mm);
1104 return ret;
1105}
1106
1107static int igt_align32(void *ignored)
1108{
1109 return igt_align_pot(32);
1110}
1111
1112static int igt_align64(void *ignored)
1113{
1114 return igt_align_pot(64);
1115}
1116
Chris Wilson9a71e272016-12-22 08:36:29 +00001117static void show_scan(const struct drm_mm_scan *scan)
Chris Wilson560b3282016-12-22 08:36:17 +00001118{
Chris Wilson71733202016-12-22 08:36:24 +00001119 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001120 scan->hit_start, scan->hit_end,
1121 scan->size, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001122}
1123
1124static void show_holes(const struct drm_mm *mm, int count)
1125{
1126 u64 hole_start, hole_end;
1127 struct drm_mm_node *hole;
1128
1129 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1130 struct drm_mm_node *next = list_next_entry(hole, node_list);
1131 const char *node1 = NULL, *node2 = NULL;
1132
1133 if (hole->allocated)
1134 node1 = kasprintf(GFP_KERNEL,
1135 "[%llx + %lld, color=%ld], ",
1136 hole->start, hole->size, hole->color);
1137
1138 if (next->allocated)
1139 node2 = kasprintf(GFP_KERNEL,
1140 ", [%llx + %lld, color=%ld]",
1141 next->start, next->size, next->color);
1142
1143 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1144 node1,
1145 hole_start, hole_end, hole_end - hole_start,
1146 node2);
1147
1148 kfree(node2);
1149 kfree(node1);
1150
1151 if (!--count)
1152 break;
1153 }
1154}
1155
1156struct evict_node {
1157 struct drm_mm_node node;
1158 struct list_head link;
1159};
1160
Chris Wilson9a71e272016-12-22 08:36:29 +00001161static bool evict_nodes(struct drm_mm_scan *scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001162 struct evict_node *nodes,
1163 unsigned int *order,
1164 unsigned int count,
1165 struct list_head *evict_list)
1166{
1167 struct evict_node *e, *en;
1168 unsigned int i;
1169
1170 for (i = 0; i < count; i++) {
1171 e = &nodes[order ? order[i] : i];
1172 list_add(&e->link, evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001173 if (drm_mm_scan_add_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001174 break;
1175 }
1176 list_for_each_entry_safe(e, en, evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001177 if (!drm_mm_scan_remove_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001178 list_del(&e->link);
1179 }
1180 if (list_empty(evict_list)) {
Chris Wilson71733202016-12-22 08:36:24 +00001181 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001182 scan->size, count, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001183 return false;
1184 }
1185
1186 list_for_each_entry(e, evict_list, link)
1187 drm_mm_remove_node(&e->node);
1188
1189 return true;
1190}
1191
1192static bool evict_nothing(struct drm_mm *mm,
1193 unsigned int total_size,
1194 struct evict_node *nodes)
1195{
Chris Wilson9a71e272016-12-22 08:36:29 +00001196 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001197 LIST_HEAD(evict_list);
1198 struct evict_node *e;
1199 struct drm_mm_node *node;
1200 unsigned int n;
1201
Chris Wilson9a71e272016-12-22 08:36:29 +00001202 drm_mm_scan_init(&scan, mm, 1, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001203 for (n = 0; n < total_size; n++) {
1204 e = &nodes[n];
1205 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001206 drm_mm_scan_add_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001207 }
1208 list_for_each_entry(e, &evict_list, link)
Chris Wilson9a71e272016-12-22 08:36:29 +00001209 drm_mm_scan_remove_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001210
1211 for (n = 0; n < total_size; n++) {
1212 e = &nodes[n];
1213
1214 if (!drm_mm_node_allocated(&e->node)) {
1215 pr_err("node[%d] no longer allocated!\n", n);
1216 return false;
1217 }
1218
1219 e->link.next = NULL;
1220 }
1221
1222 drm_mm_for_each_node(node, mm) {
1223 e = container_of(node, typeof(*e), node);
1224 e->link.next = &e->link;
1225 }
1226
1227 for (n = 0; n < total_size; n++) {
1228 e = &nodes[n];
1229
1230 if (!e->link.next) {
1231 pr_err("node[%d] no longer connected!\n", n);
1232 return false;
1233 }
1234 }
1235
1236 return assert_continuous(mm, nodes[0].node.size);
1237}
1238
1239static bool evict_everything(struct drm_mm *mm,
1240 unsigned int total_size,
1241 struct evict_node *nodes)
1242{
Chris Wilson9a71e272016-12-22 08:36:29 +00001243 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001244 LIST_HEAD(evict_list);
1245 struct evict_node *e;
1246 unsigned int n;
1247 int err;
1248
Chris Wilson9a71e272016-12-22 08:36:29 +00001249 drm_mm_scan_init(&scan, mm, total_size, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001250 for (n = 0; n < total_size; n++) {
1251 e = &nodes[n];
1252 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001253 if (drm_mm_scan_add_block(&scan, &e->node))
1254 break;
Chris Wilson560b3282016-12-22 08:36:17 +00001255 }
1256 list_for_each_entry(e, &evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001257 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001258 pr_err("Node %lld not marked for eviction!\n",
1259 e->node.start);
1260 list_del(&e->link);
1261 }
1262 }
1263
1264 list_for_each_entry(e, &evict_list, link)
1265 drm_mm_remove_node(&e->node);
1266
1267 if (!assert_one_hole(mm, 0, total_size))
1268 return false;
1269
1270 list_for_each_entry(e, &evict_list, link) {
1271 err = drm_mm_reserve_node(mm, &e->node);
1272 if (err) {
1273 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1274 e->node.start);
1275 return false;
1276 }
1277 }
1278
1279 return assert_continuous(mm, nodes[0].node.size);
1280}
1281
1282static int evict_something(struct drm_mm *mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001283 u64 range_start, u64 range_end,
Chris Wilson560b3282016-12-22 08:36:17 +00001284 struct evict_node *nodes,
1285 unsigned int *order,
1286 unsigned int count,
1287 unsigned int size,
1288 unsigned int alignment,
1289 const struct insert_mode *mode)
1290{
Chris Wilson9a71e272016-12-22 08:36:29 +00001291 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001292 LIST_HEAD(evict_list);
1293 struct evict_node *e;
1294 struct drm_mm_node tmp;
1295 int err;
1296
Chris Wilson9a71e272016-12-22 08:36:29 +00001297 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001298 size, alignment, 0,
1299 range_start, range_end);
Chris Wilson9a71e272016-12-22 08:36:29 +00001300 if (!evict_nodes(&scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001301 nodes, order, count,
1302 &evict_list))
1303 return -EINVAL;
1304
1305 memset(&tmp, 0, sizeof(tmp));
1306 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1307 mode->search_flags,
1308 mode->create_flags);
1309 if (err) {
1310 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1311 size, alignment);
Chris Wilson9a71e272016-12-22 08:36:29 +00001312 show_scan(&scan);
Chris Wilson560b3282016-12-22 08:36:17 +00001313 show_holes(mm, 3);
1314 return err;
1315 }
1316
Chris Wilson0e483252016-12-22 08:36:18 +00001317 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1318 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1319 tmp.start, tmp.size, range_start, range_end);
1320 err = -EINVAL;
1321 }
1322
Chris Wilson560b3282016-12-22 08:36:17 +00001323 if (!assert_node(&tmp, mm, size, alignment, 0) || tmp.hole_follows) {
1324 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1325 tmp.size, size,
1326 alignment, misalignment(&tmp, alignment),
1327 tmp.start, tmp.hole_follows);
1328 err = -EINVAL;
1329 }
1330
1331 drm_mm_remove_node(&tmp);
1332 if (err)
1333 return err;
1334
1335 list_for_each_entry(e, &evict_list, link) {
1336 err = drm_mm_reserve_node(mm, &e->node);
1337 if (err) {
1338 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1339 e->node.start);
1340 return err;
1341 }
1342 }
1343
1344 if (!assert_continuous(mm, nodes[0].node.size)) {
1345 pr_err("range is no longer continuous\n");
1346 return -EINVAL;
1347 }
1348
1349 return 0;
1350}
1351
1352static int igt_evict(void *ignored)
1353{
1354 DRM_RND_STATE(prng, random_seed);
1355 const unsigned int size = 8192;
1356 const struct insert_mode *mode;
1357 struct drm_mm mm;
1358 struct evict_node *nodes;
1359 struct drm_mm_node *node, *next;
1360 unsigned int *order, n;
1361 int ret, err;
1362
1363 /* Here we populate a full drm_mm and then try and insert a new node
1364 * by evicting other nodes in a random order. The drm_mm_scan should
1365 * pick the first matching hole it finds from the random list. We
1366 * repeat that for different allocation strategies, alignments and
1367 * sizes to try and stress the hole finder.
1368 */
1369
1370 ret = -ENOMEM;
1371 nodes = vzalloc(size * sizeof(*nodes));
1372 if (!nodes)
1373 goto err;
1374
1375 order = drm_random_order(size, &prng);
1376 if (!order)
1377 goto err_nodes;
1378
1379 ret = -EINVAL;
1380 drm_mm_init(&mm, 0, size);
1381 for (n = 0; n < size; n++) {
1382 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1383 DRM_MM_SEARCH_DEFAULT);
1384 if (err) {
1385 pr_err("insert failed, step %d\n", n);
1386 ret = err;
1387 goto out;
1388 }
1389 }
1390
1391 /* First check that using the scanner doesn't break the mm */
1392 if (!evict_nothing(&mm, size, nodes)) {
1393 pr_err("evict_nothing() failed\n");
1394 goto out;
1395 }
1396 if (!evict_everything(&mm, size, nodes)) {
1397 pr_err("evict_everything() failed\n");
1398 goto out;
1399 }
1400
1401 for (mode = evict_modes; mode->name; mode++) {
1402 for (n = 1; n <= size; n <<= 1) {
1403 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001404 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001405 nodes, order, size,
1406 n, 1,
1407 mode);
1408 if (err) {
1409 pr_err("%s evict_something(size=%u) failed\n",
1410 mode->name, n);
1411 ret = err;
1412 goto out;
1413 }
1414 }
1415
1416 for (n = 1; n < size; n <<= 1) {
1417 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001418 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001419 nodes, order, size,
1420 size/2, n,
1421 mode);
1422 if (err) {
1423 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1424 mode->name, size/2, n);
1425 ret = err;
1426 goto out;
1427 }
1428 }
1429
1430 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1431 unsigned int nsize = (size - n + 1) / 2;
1432
1433 DRM_MM_BUG_ON(!nsize);
1434
1435 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001436 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001437 nodes, order, size,
1438 nsize, n,
1439 mode);
1440 if (err) {
1441 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1442 mode->name, nsize, n);
1443 ret = err;
1444 goto out;
1445 }
1446 }
1447 }
1448
1449 ret = 0;
1450out:
1451 drm_mm_for_each_node_safe(node, next, &mm)
1452 drm_mm_remove_node(node);
1453 drm_mm_takedown(&mm);
1454 kfree(order);
1455err_nodes:
1456 vfree(nodes);
1457err:
1458 return ret;
1459}
1460
Chris Wilson0e483252016-12-22 08:36:18 +00001461static int igt_evict_range(void *ignored)
1462{
1463 DRM_RND_STATE(prng, random_seed);
1464 const unsigned int size = 8192;
1465 const unsigned int range_size = size / 2;
1466 const unsigned int range_start = size / 4;
1467 const unsigned int range_end = range_start + range_size;
1468 const struct insert_mode *mode;
1469 struct drm_mm mm;
1470 struct evict_node *nodes;
1471 struct drm_mm_node *node, *next;
1472 unsigned int *order, n;
1473 int ret, err;
1474
1475 /* Like igt_evict() but now we are limiting the search to a
1476 * small portion of the full drm_mm.
1477 */
1478
1479 ret = -ENOMEM;
1480 nodes = vzalloc(size * sizeof(*nodes));
1481 if (!nodes)
1482 goto err;
1483
1484 order = drm_random_order(size, &prng);
1485 if (!order)
1486 goto err_nodes;
1487
1488 ret = -EINVAL;
1489 drm_mm_init(&mm, 0, size);
1490 for (n = 0; n < size; n++) {
1491 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1492 DRM_MM_SEARCH_DEFAULT);
1493 if (err) {
1494 pr_err("insert failed, step %d\n", n);
1495 ret = err;
1496 goto out;
1497 }
1498 }
1499
1500 for (mode = evict_modes; mode->name; mode++) {
1501 for (n = 1; n <= range_size; n <<= 1) {
1502 drm_random_reorder(order, size, &prng);
1503 err = evict_something(&mm, range_start, range_end,
1504 nodes, order, size,
1505 n, 1,
1506 mode);
1507 if (err) {
1508 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1509 mode->name, n, range_start, range_end);
1510 goto out;
1511 }
1512 }
1513
1514 for (n = 1; n <= range_size; n <<= 1) {
1515 drm_random_reorder(order, size, &prng);
1516 err = evict_something(&mm, range_start, range_end,
1517 nodes, order, size,
1518 range_size/2, n,
1519 mode);
1520 if (err) {
1521 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1522 mode->name, range_size/2, n, range_start, range_end);
1523 goto out;
1524 }
1525 }
1526
1527 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1528 unsigned int nsize = (range_size - n + 1) / 2;
1529
1530 DRM_MM_BUG_ON(!nsize);
1531
1532 drm_random_reorder(order, size, &prng);
1533 err = evict_something(&mm, range_start, range_end,
1534 nodes, order, size,
1535 nsize, n,
1536 mode);
1537 if (err) {
1538 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1539 mode->name, nsize, n, range_start, range_end);
1540 goto out;
1541 }
1542 }
1543 }
1544
1545 ret = 0;
1546out:
1547 drm_mm_for_each_node_safe(node, next, &mm)
1548 drm_mm_remove_node(node);
1549 drm_mm_takedown(&mm);
1550 kfree(order);
1551err_nodes:
1552 vfree(nodes);
1553err:
1554 return ret;
1555}
1556
Chris Wilson05ab3c22016-12-22 08:36:19 +00001557static unsigned int node_index(const struct drm_mm_node *node)
1558{
1559 return div64_u64(node->start, node->size);
1560}
1561
1562static int igt_topdown(void *ignored)
1563{
1564 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1565 DRM_RND_STATE(prng, random_seed);
1566 const unsigned int count = 8192;
1567 unsigned int size;
1568 unsigned long *bitmap = NULL;
1569 struct drm_mm mm;
1570 struct drm_mm_node *nodes, *node, *next;
1571 unsigned int *order, n, m, o = 0;
1572 int ret;
1573
1574 /* When allocating top-down, we expect to be returned a node
1575 * from a suitable hole at the top of the drm_mm. We check that
1576 * the returned node does match the highest available slot.
1577 */
1578
1579 ret = -ENOMEM;
1580 nodes = vzalloc(count * sizeof(*nodes));
1581 if (!nodes)
1582 goto err;
1583
1584 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1585 GFP_TEMPORARY);
1586 if (!bitmap)
1587 goto err_nodes;
1588
1589 order = drm_random_order(count, &prng);
1590 if (!order)
1591 goto err_bitmap;
1592
1593 ret = -EINVAL;
1594 for (size = 1; size <= 64; size <<= 1) {
1595 drm_mm_init(&mm, 0, size*count);
1596 for (n = 0; n < count; n++) {
1597 if (!expect_insert(&mm, &nodes[n],
1598 size, 0, n,
1599 topdown)) {
1600 pr_err("insert failed, size %u step %d\n", size, n);
1601 goto out;
1602 }
1603
1604 if (nodes[n].hole_follows) {
1605 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1606 n, nodes[n].start, size);
1607 goto out;
1608 }
1609
1610 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1611 goto out;
1612 }
1613
1614 if (!assert_continuous(&mm, size))
1615 goto out;
1616
1617 drm_random_reorder(order, count, &prng);
1618 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1619 for (m = 0; m < n; m++) {
1620 node = &nodes[order[(o + m) % count]];
1621 drm_mm_remove_node(node);
1622 __set_bit(node_index(node), bitmap);
1623 }
1624
1625 for (m = 0; m < n; m++) {
1626 unsigned int last;
1627
1628 node = &nodes[order[(o + m) % count]];
1629 if (!expect_insert(&mm, node,
1630 size, 0, 0,
1631 topdown)) {
1632 pr_err("insert failed, step %d/%d\n", m, n);
1633 goto out;
1634 }
1635
1636 if (node->hole_follows) {
1637 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1638 m, n, node->start);
1639 goto out;
1640 }
1641
1642 last = find_last_bit(bitmap, count);
1643 if (node_index(node) != last) {
1644 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1645 m, n, size, last, node_index(node));
1646 goto out;
1647 }
1648
1649 __clear_bit(last, bitmap);
1650 }
1651
1652 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1653
1654 o += n;
1655 }
1656
1657 drm_mm_for_each_node_safe(node, next, &mm)
1658 drm_mm_remove_node(node);
1659 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1660 }
1661
1662 ret = 0;
1663out:
1664 drm_mm_for_each_node_safe(node, next, &mm)
1665 drm_mm_remove_node(node);
1666 drm_mm_takedown(&mm);
1667 kfree(order);
1668err_bitmap:
1669 kfree(bitmap);
1670err_nodes:
1671 vfree(nodes);
1672err:
1673 return ret;
1674}
1675
Chris Wilson4c2ba552016-12-22 08:36:20 +00001676static void separate_adjacent_colors(const struct drm_mm_node *node,
1677 unsigned long color,
1678 u64 *start,
1679 u64 *end)
1680{
1681 if (node->allocated && node->color != color)
1682 ++*start;
1683
1684 node = list_next_entry(node, node_list);
1685 if (node->allocated && node->color != color)
1686 --*end;
1687}
1688
1689static bool colors_abutt(const struct drm_mm_node *node)
1690{
1691 if (!node->hole_follows &&
1692 list_next_entry(node, node_list)->allocated) {
1693 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1694 node->color, node->start, node->size,
1695 list_next_entry(node, node_list)->color,
1696 list_next_entry(node, node_list)->start,
1697 list_next_entry(node, node_list)->size);
1698 return true;
1699 }
1700
1701 return false;
1702}
1703
1704static int igt_color(void *ignored)
1705{
1706 const unsigned int count = min(4096u, max_iterations);
1707 const struct insert_mode *mode;
1708 struct drm_mm mm;
1709 struct drm_mm_node *node, *nn;
1710 unsigned int n;
1711 int ret = -EINVAL, err;
1712
1713 /* Color adjustment complicates everything. First we just check
1714 * that when we insert a node we apply any color_adjustment callback.
1715 * The callback we use should ensure that there is a gap between
1716 * any two nodes, and so after each insertion we check that those
1717 * holes are inserted and that they are preserved.
1718 */
1719
1720 drm_mm_init(&mm, 0, U64_MAX);
1721
1722 for (n = 1; n <= count; n++) {
1723 node = kzalloc(sizeof(*node), GFP_KERNEL);
1724 if (!node) {
1725 ret = -ENOMEM;
1726 goto out;
1727 }
1728
1729 if (!expect_insert(&mm, node,
1730 n, 0, n,
1731 &insert_modes[0])) {
1732 pr_err("insert failed, step %d\n", n);
1733 kfree(node);
1734 goto out;
1735 }
1736 }
1737
1738 drm_mm_for_each_node_safe(node, nn, &mm) {
1739 if (node->color != node->size) {
1740 pr_err("invalid color stored: expected %lld, found %ld\n",
1741 node->size, node->color);
1742
1743 goto out;
1744 }
1745
1746 drm_mm_remove_node(node);
1747 kfree(node);
1748 }
1749
1750 /* Now, let's start experimenting with applying a color callback */
1751 mm.color_adjust = separate_adjacent_colors;
1752 for (mode = insert_modes; mode->name; mode++) {
1753 u64 last;
1754
1755 node = kzalloc(sizeof(*node), GFP_KERNEL);
1756 if (!node) {
1757 ret = -ENOMEM;
1758 goto out;
1759 }
1760
1761 node->size = 1 + 2*count;
1762 node->color = node->size;
1763
1764 err = drm_mm_reserve_node(&mm, node);
1765 if (err) {
1766 pr_err("initial reserve failed!\n");
1767 ret = err;
1768 goto out;
1769 }
1770
1771 last = node->start + node->size;
1772
1773 for (n = 1; n <= count; n++) {
1774 int rem;
1775
1776 node = kzalloc(sizeof(*node), GFP_KERNEL);
1777 if (!node) {
1778 ret = -ENOMEM;
1779 goto out;
1780 }
1781
1782 node->start = last;
1783 node->size = n + count;
1784 node->color = node->size;
1785
1786 err = drm_mm_reserve_node(&mm, node);
1787 if (err != -ENOSPC) {
1788 pr_err("reserve %d did not report color overlap! err=%d\n",
1789 n, err);
1790 goto out;
1791 }
1792
1793 node->start += n + 1;
1794 rem = misalignment(node, n + count);
1795 node->start += n + count - rem;
1796
1797 err = drm_mm_reserve_node(&mm, node);
1798 if (err) {
1799 pr_err("reserve %d failed, err=%d\n", n, err);
1800 ret = err;
1801 goto out;
1802 }
1803
1804 last = node->start + node->size;
1805 }
1806
1807 for (n = 1; n <= count; n++) {
1808 node = kzalloc(sizeof(*node), GFP_KERNEL);
1809 if (!node) {
1810 ret = -ENOMEM;
1811 goto out;
1812 }
1813
1814 if (!expect_insert(&mm, node,
1815 n, n, n,
1816 mode)) {
1817 pr_err("%s insert failed, step %d\n",
1818 mode->name, n);
1819 kfree(node);
1820 goto out;
1821 }
1822 }
1823
1824 drm_mm_for_each_node_safe(node, nn, &mm) {
1825 u64 rem;
1826
1827 if (node->color != node->size) {
1828 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1829 mode->name, node->size, node->color);
1830
1831 goto out;
1832 }
1833
1834 if (colors_abutt(node))
1835 goto out;
1836
1837 div64_u64_rem(node->start, node->size, &rem);
1838 if (rem) {
1839 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1840 mode->name, node->start, node->size, rem);
1841 goto out;
1842 }
1843
1844 drm_mm_remove_node(node);
1845 kfree(node);
1846 }
1847 }
1848
1849 ret = 0;
1850out:
1851 drm_mm_for_each_node_safe(node, nn, &mm) {
1852 drm_mm_remove_node(node);
1853 kfree(node);
1854 }
1855 drm_mm_takedown(&mm);
1856 return ret;
1857}
1858
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001859static int evict_color(struct drm_mm *mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001860 u64 range_start, u64 range_end,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001861 struct evict_node *nodes,
1862 unsigned int *order,
1863 unsigned int count,
1864 unsigned int size,
1865 unsigned int alignment,
1866 unsigned long color,
1867 const struct insert_mode *mode)
1868{
Chris Wilson9a71e272016-12-22 08:36:29 +00001869 struct drm_mm_scan scan;
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001870 LIST_HEAD(evict_list);
1871 struct evict_node *e;
1872 struct drm_mm_node tmp;
1873 int err;
1874
Chris Wilson9a71e272016-12-22 08:36:29 +00001875 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001876 size, alignment, color,
1877 range_start, range_end);
Chris Wilson9a71e272016-12-22 08:36:29 +00001878 if (!evict_nodes(&scan,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001879 nodes, order, count,
1880 &evict_list))
1881 return -EINVAL;
1882
1883 memset(&tmp, 0, sizeof(tmp));
1884 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1885 mode->search_flags,
1886 mode->create_flags);
1887 if (err) {
1888 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1889 size, alignment, color, err);
Chris Wilson9a71e272016-12-22 08:36:29 +00001890 show_scan(&scan);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001891 show_holes(mm, 3);
1892 return err;
1893 }
1894
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001895 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1896 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1897 tmp.start, tmp.size, range_start, range_end);
1898 err = -EINVAL;
1899 }
1900
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001901 if (colors_abutt(&tmp))
1902 err = -EINVAL;
1903
1904 if (!assert_node(&tmp, mm, size, alignment, color)) {
1905 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
1906 tmp.size, size,
1907 alignment, misalignment(&tmp, alignment), tmp.start);
1908 err = -EINVAL;
1909 }
1910
1911 drm_mm_remove_node(&tmp);
1912 if (err)
1913 return err;
1914
1915 list_for_each_entry(e, &evict_list, link) {
1916 err = drm_mm_reserve_node(mm, &e->node);
1917 if (err) {
1918 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1919 e->node.start);
1920 return err;
1921 }
1922 }
1923
1924 return 0;
1925}
1926
1927static int igt_color_evict(void *ignored)
1928{
1929 DRM_RND_STATE(prng, random_seed);
1930 const unsigned int total_size = min(8192u, max_iterations);
1931 const struct insert_mode *mode;
1932 unsigned long color = 0;
1933 struct drm_mm mm;
1934 struct evict_node *nodes;
1935 struct drm_mm_node *node, *next;
1936 unsigned int *order, n;
1937 int ret, err;
1938
1939 /* Check that the drm_mm_scan also honours color adjustment when
1940 * choosing its victims to create a hole. Our color_adjust does not
1941 * allow two nodes to be placed together without an intervening hole
1942 * enlarging the set of victims that must be evicted.
1943 */
1944
1945 ret = -ENOMEM;
1946 nodes = vzalloc(total_size * sizeof(*nodes));
1947 if (!nodes)
1948 goto err;
1949
1950 order = drm_random_order(total_size, &prng);
1951 if (!order)
1952 goto err_nodes;
1953
1954 ret = -EINVAL;
1955 drm_mm_init(&mm, 0, 2*total_size - 1);
1956 mm.color_adjust = separate_adjacent_colors;
1957 for (n = 0; n < total_size; n++) {
1958 if (!expect_insert(&mm, &nodes[n].node,
1959 1, 0, color++,
1960 &insert_modes[0])) {
1961 pr_err("insert failed, step %d\n", n);
1962 goto out;
1963 }
1964 }
1965
1966 for (mode = evict_modes; mode->name; mode++) {
1967 for (n = 1; n <= total_size; n <<= 1) {
1968 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001969 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001970 nodes, order, total_size,
1971 n, 1, color++,
1972 mode);
1973 if (err) {
1974 pr_err("%s evict_color(size=%u) failed\n",
1975 mode->name, n);
1976 goto out;
1977 }
1978 }
1979
1980 for (n = 1; n < total_size; n <<= 1) {
1981 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001982 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001983 nodes, order, total_size,
1984 total_size/2, n, color++,
1985 mode);
1986 if (err) {
1987 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
1988 mode->name, total_size/2, n);
1989 goto out;
1990 }
1991 }
1992
1993 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
1994 unsigned int nsize = (total_size - n + 1) / 2;
1995
1996 DRM_MM_BUG_ON(!nsize);
1997
1998 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001999 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002000 nodes, order, total_size,
2001 nsize, n, color++,
2002 mode);
2003 if (err) {
2004 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2005 mode->name, nsize, n);
2006 goto out;
2007 }
2008 }
2009 }
2010
2011 ret = 0;
2012out:
2013 if (ret)
2014 drm_mm_debug_table(&mm, __func__);
2015 drm_mm_for_each_node_safe(node, next, &mm)
2016 drm_mm_remove_node(node);
2017 drm_mm_takedown(&mm);
2018 kfree(order);
2019err_nodes:
2020 vfree(nodes);
2021err:
2022 return ret;
2023}
2024
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002025static int igt_color_evict_range(void *ignored)
2026{
2027 DRM_RND_STATE(prng, random_seed);
2028 const unsigned int total_size = 8192;
2029 const unsigned int range_size = total_size / 2;
2030 const unsigned int range_start = total_size / 4;
2031 const unsigned int range_end = range_start + range_size;
2032 const struct insert_mode *mode;
2033 unsigned long color = 0;
2034 struct drm_mm mm;
2035 struct evict_node *nodes;
2036 struct drm_mm_node *node, *next;
2037 unsigned int *order, n;
2038 int ret, err;
2039
2040 /* Like igt_color_evict(), but limited to small portion of the full
2041 * drm_mm range.
2042 */
2043
2044 ret = -ENOMEM;
2045 nodes = vzalloc(total_size * sizeof(*nodes));
2046 if (!nodes)
2047 goto err;
2048
2049 order = drm_random_order(total_size, &prng);
2050 if (!order)
2051 goto err_nodes;
2052
2053 ret = -EINVAL;
2054 drm_mm_init(&mm, 0, 2*total_size - 1);
2055 mm.color_adjust = separate_adjacent_colors;
2056 for (n = 0; n < total_size; n++) {
2057 if (!expect_insert(&mm, &nodes[n].node,
2058 1, 0, color++,
2059 &insert_modes[0])) {
2060 pr_err("insert failed, step %d\n", n);
2061 goto out;
2062 }
2063 }
2064
2065 for (mode = evict_modes; mode->name; mode++) {
2066 for (n = 1; n <= range_size; n <<= 1) {
2067 drm_random_reorder(order, range_size, &prng);
2068 err = evict_color(&mm, range_start, range_end,
2069 nodes, order, total_size,
2070 n, 1, color++,
2071 mode);
2072 if (err) {
2073 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2074 mode->name, n, range_start, range_end);
2075 goto out;
2076 }
2077 }
2078
2079 for (n = 1; n < range_size; n <<= 1) {
2080 drm_random_reorder(order, total_size, &prng);
2081 err = evict_color(&mm, range_start, range_end,
2082 nodes, order, total_size,
2083 range_size/2, n, color++,
2084 mode);
2085 if (err) {
2086 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2087 mode->name, total_size/2, n, range_start, range_end);
2088 goto out;
2089 }
2090 }
2091
2092 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2093 unsigned int nsize = (range_size - n + 1) / 2;
2094
2095 DRM_MM_BUG_ON(!nsize);
2096
2097 drm_random_reorder(order, total_size, &prng);
2098 err = evict_color(&mm, range_start, range_end,
2099 nodes, order, total_size,
2100 nsize, n, color++,
2101 mode);
2102 if (err) {
2103 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2104 mode->name, nsize, n, range_start, range_end);
2105 goto out;
2106 }
2107 }
2108 }
2109
2110 ret = 0;
2111out:
2112 if (ret)
2113 drm_mm_debug_table(&mm, __func__);
2114 drm_mm_for_each_node_safe(node, next, &mm)
2115 drm_mm_remove_node(node);
2116 drm_mm_takedown(&mm);
2117 kfree(order);
2118err_nodes:
2119 vfree(nodes);
2120err:
2121 return ret;
2122}
2123
Chris Wilson50f00332016-12-22 08:36:09 +00002124#include "drm_selftest.c"
2125
2126static int __init test_drm_mm_init(void)
2127{
2128 int err;
2129
2130 while (!random_seed)
2131 random_seed = get_random_int();
2132
2133 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2134 random_seed, max_iterations, max_prime);
2135 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2136
2137 return err > 0 ? 0 : err;
2138}
2139
2140static void __exit test_drm_mm_exit(void)
2141{
2142}
2143
2144module_init(test_drm_mm_init);
2145module_exit(test_drm_mm_exit);
2146
2147module_param(random_seed, uint, 0400);
2148module_param(max_iterations, uint, 0400);
2149module_param(max_prime, uint, 0400);
2150
2151MODULE_AUTHOR("Intel Corporation");
2152MODULE_LICENSE("GPL");