blob: 1bbfc24342c547a167eb6ba164683fd476e96245 [file] [log] [blame]
Chris Wilson50f00332016-12-22 08:36:09 +00001/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
Chris Wilson78866922016-12-22 08:36:13 +000024enum {
25 DEFAULT,
26 TOPDOWN,
27 BEST,
28};
29
30static const struct insert_mode {
31 const char *name;
32 unsigned int search_flags;
33 unsigned int create_flags;
34} insert_modes[] = {
35 [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
36 [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
37 [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
38 {}
Chris Wilson560b3282016-12-22 08:36:17 +000039}, evict_modes[] = {
40 { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
41 { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
42 {}
Chris Wilson78866922016-12-22 08:36:13 +000043};
44
Chris Wilson50f00332016-12-22 08:36:09 +000045static int igt_sanitycheck(void *ignored)
46{
47 pr_info("%s - ok!\n", __func__);
48 return 0;
49}
50
Chris Wilson393b50f2016-12-22 08:36:10 +000051static bool assert_no_holes(const struct drm_mm *mm)
52{
53 struct drm_mm_node *hole;
54 u64 hole_start, hole_end;
55 unsigned long count;
56
57 count = 0;
58 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
59 count++;
60 if (count) {
61 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
62 return false;
63 }
64
65 drm_mm_for_each_node(hole, mm) {
66 if (hole->hole_follows) {
67 pr_err("Hole follows node, expected none!\n");
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
76{
77 struct drm_mm_node *hole;
78 u64 hole_start, hole_end;
79 unsigned long count;
80 bool ok = true;
81
82 if (end <= start)
83 return true;
84
85 count = 0;
86 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
87 if (start != hole_start || end != hole_end) {
88 if (ok)
89 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
90 hole_start, hole_end,
91 start, end);
92 ok = false;
93 }
94 count++;
95 }
96 if (count != 1) {
97 pr_err("Expected to find one hole, found %lu instead\n", count);
98 ok = false;
99 }
100
101 return ok;
102}
103
Chris Wilson900537d2016-12-22 08:36:12 +0000104static bool assert_continuous(const struct drm_mm *mm, u64 size)
105{
106 struct drm_mm_node *node, *check, *found;
107 unsigned long n;
108 u64 addr;
109
110 if (!assert_no_holes(mm))
111 return false;
112
113 n = 0;
114 addr = 0;
115 drm_mm_for_each_node(node, mm) {
116 if (node->start != addr) {
117 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
118 n, addr, node->start);
119 return false;
120 }
121
122 if (node->size != size) {
123 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
124 n, size, node->size);
125 return false;
126 }
127
128 if (node->hole_follows) {
129 pr_err("node[%ld] is followed by a hole!\n", n);
130 return false;
131 }
132
133 found = NULL;
134 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
135 if (node != check) {
136 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
137 node->start, check->start);
138 return false;
139 }
140 found = check;
141 }
142 if (!found) {
143 pr_err("lookup failed for node %llx + %llx\n",
144 addr, size);
145 return false;
146 }
147
148 addr += size;
149 n++;
150 }
151
152 return true;
153}
154
Chris Wilson78866922016-12-22 08:36:13 +0000155static u64 misalignment(struct drm_mm_node *node, u64 alignment)
156{
157 u64 rem;
158
159 if (!alignment)
160 return 0;
161
162 div64_u64_rem(node->start, alignment, &rem);
163 return rem;
164}
165
166static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
167 u64 size, u64 alignment, unsigned long color)
168{
169 bool ok = true;
170
171 if (!drm_mm_node_allocated(node) || node->mm != mm) {
172 pr_err("node not allocated\n");
173 ok = false;
174 }
175
176 if (node->size != size) {
177 pr_err("node has wrong size, found %llu, expected %llu\n",
178 node->size, size);
179 ok = false;
180 }
181
182 if (misalignment(node, alignment)) {
183 pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n",
184 node->start, misalignment(node, alignment), alignment);
185 ok = false;
186 }
187
188 if (node->color != color) {
189 pr_err("node has wrong color, found %lu, expected %lu\n",
190 node->color, color);
191 ok = false;
192 }
193
194 return ok;
195}
196
Chris Wilson393b50f2016-12-22 08:36:10 +0000197static int igt_init(void *ignored)
198{
199 const unsigned int size = 4096;
200 struct drm_mm mm;
201 struct drm_mm_node tmp;
202 int ret = -EINVAL;
203
204 /* Start with some simple checks on initialising the struct drm_mm */
205 memset(&mm, 0, sizeof(mm));
206 if (drm_mm_initialized(&mm)) {
207 pr_err("zeroed mm claims to be initialized\n");
208 return ret;
209 }
210
211 memset(&mm, 0xff, sizeof(mm));
212 drm_mm_init(&mm, 0, size);
213 if (!drm_mm_initialized(&mm)) {
214 pr_err("mm claims not to be initialized\n");
215 goto out;
216 }
217
218 if (!drm_mm_clean(&mm)) {
219 pr_err("mm not empty on creation\n");
220 goto out;
221 }
222
223 /* After creation, it should all be one massive hole */
224 if (!assert_one_hole(&mm, 0, size)) {
225 ret = -EINVAL;
226 goto out;
227 }
228
229 memset(&tmp, 0, sizeof(tmp));
230 tmp.start = 0;
231 tmp.size = size;
232 ret = drm_mm_reserve_node(&mm, &tmp);
233 if (ret) {
234 pr_err("failed to reserve whole drm_mm\n");
235 goto out;
236 }
237
238 /* After filling the range entirely, there should be no holes */
239 if (!assert_no_holes(&mm)) {
240 ret = -EINVAL;
241 goto out;
242 }
243
244 /* And then after emptying it again, the massive hole should be back */
245 drm_mm_remove_node(&tmp);
246 if (!assert_one_hole(&mm, 0, size)) {
247 ret = -EINVAL;
248 goto out;
249 }
250
251out:
252 if (ret)
253 drm_mm_debug_table(&mm, __func__);
254 drm_mm_takedown(&mm);
255 return ret;
256}
257
Chris Wilson06df8ac2016-12-22 08:36:11 +0000258static int igt_debug(void *ignored)
259{
260 struct drm_mm mm;
261 struct drm_mm_node nodes[2];
262 int ret;
263
264 /* Create a small drm_mm with a couple of nodes and a few holes, and
265 * check that the debug iterator doesn't explode over a trivial drm_mm.
266 */
267
268 drm_mm_init(&mm, 0, 4096);
269
270 memset(nodes, 0, sizeof(nodes));
271 nodes[0].start = 512;
272 nodes[0].size = 1024;
273 ret = drm_mm_reserve_node(&mm, &nodes[0]);
274 if (ret) {
275 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
276 nodes[0].start, nodes[0].size);
277 return ret;
278 }
279
280 nodes[1].size = 1024;
281 nodes[1].start = 4096 - 512 - nodes[1].size;
282 ret = drm_mm_reserve_node(&mm, &nodes[1]);
283 if (ret) {
284 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
285 nodes[1].start, nodes[1].size);
286 return ret;
287 }
288
289 drm_mm_debug_table(&mm, __func__);
290 return 0;
291}
292
Chris Wilson900537d2016-12-22 08:36:12 +0000293static struct drm_mm_node *set_node(struct drm_mm_node *node,
294 u64 start, u64 size)
295{
296 node->start = start;
297 node->size = size;
298 return node;
299}
300
301static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
302{
303 int err;
304
305 err = drm_mm_reserve_node(mm, node);
306 if (likely(err == -ENOSPC))
307 return true;
308
309 if (!err) {
310 pr_err("impossible reserve succeeded, node %llu + %llu\n",
311 node->start, node->size);
312 drm_mm_remove_node(node);
313 } else {
314 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
315 err, -ENOSPC, node->start, node->size);
316 }
317 return false;
318}
319
320static bool check_reserve_boundaries(struct drm_mm *mm,
321 unsigned int count,
322 u64 size)
323{
324 const struct boundary {
325 u64 start, size;
326 const char *name;
327 } boundaries[] = {
328#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
329 B(0, 0),
330 B(-size, 0),
331 B(size, 0),
332 B(size * count, 0),
333 B(-size, size),
334 B(-size, -size),
335 B(-size, 2*size),
336 B(0, -size),
337 B(size, -size),
338 B(count*size, size),
339 B(count*size, -size),
340 B(count*size, count*size),
341 B(count*size, -count*size),
342 B(count*size, -(count+1)*size),
343 B((count+1)*size, size),
344 B((count+1)*size, -size),
345 B((count+1)*size, -2*size),
346#undef B
347 };
348 struct drm_mm_node tmp = {};
349 int n;
350
351 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
352 if (!expect_reserve_fail(mm,
353 set_node(&tmp,
354 boundaries[n].start,
355 boundaries[n].size))) {
356 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
357 n, boundaries[n].name, count, size);
358 return false;
359 }
360 }
361
362 return true;
363}
364
365static int __igt_reserve(unsigned int count, u64 size)
366{
367 DRM_RND_STATE(prng, random_seed);
368 struct drm_mm mm;
369 struct drm_mm_node tmp, *nodes, *node, *next;
370 unsigned int *order, n, m, o = 0;
371 int ret, err;
372
373 /* For exercising drm_mm_reserve_node(), we want to check that
374 * reservations outside of the drm_mm range are rejected, and to
375 * overlapping and otherwise already occupied ranges. Afterwards,
376 * the tree and nodes should be intact.
377 */
378
379 DRM_MM_BUG_ON(!count);
380 DRM_MM_BUG_ON(!size);
381
382 ret = -ENOMEM;
383 order = drm_random_order(count, &prng);
384 if (!order)
385 goto err;
386
387 nodes = vzalloc(sizeof(*nodes) * count);
388 if (!nodes)
389 goto err_order;
390
391 ret = -EINVAL;
392 drm_mm_init(&mm, 0, count * size);
393
394 if (!check_reserve_boundaries(&mm, count, size))
395 goto out;
396
397 for (n = 0; n < count; n++) {
398 nodes[n].start = order[n] * size;
399 nodes[n].size = size;
400
401 err = drm_mm_reserve_node(&mm, &nodes[n]);
402 if (err) {
403 pr_err("reserve failed, step %d, start %llu\n",
404 n, nodes[n].start);
405 ret = err;
406 goto out;
407 }
408
409 if (!drm_mm_node_allocated(&nodes[n])) {
410 pr_err("reserved node not allocated! step %d, start %llu\n",
411 n, nodes[n].start);
412 goto out;
413 }
414
415 if (!expect_reserve_fail(&mm, &nodes[n]))
416 goto out;
417 }
418
419 /* After random insertion the nodes should be in order */
420 if (!assert_continuous(&mm, size))
421 goto out;
422
423 /* Repeated use should then fail */
424 drm_random_reorder(order, count, &prng);
425 for (n = 0; n < count; n++) {
426 if (!expect_reserve_fail(&mm,
427 set_node(&tmp, order[n] * size, 1)))
428 goto out;
429
430 /* Remove and reinsert should work */
431 drm_mm_remove_node(&nodes[order[n]]);
432 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
433 if (err) {
434 pr_err("reserve failed, step %d, start %llu\n",
435 n, nodes[n].start);
436 ret = err;
437 goto out;
438 }
439 }
440
441 if (!assert_continuous(&mm, size))
442 goto out;
443
444 /* Overlapping use should then fail */
445 for (n = 0; n < count; n++) {
446 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
447 goto out;
448 }
449 for (n = 0; n < count; n++) {
450 if (!expect_reserve_fail(&mm,
451 set_node(&tmp,
452 size * n,
453 size * (count - n))))
454 goto out;
455 }
456
457 /* Remove several, reinsert, check full */
458 for_each_prime_number(n, min(max_prime, count)) {
459 for (m = 0; m < n; m++) {
460 node = &nodes[order[(o + m) % count]];
461 drm_mm_remove_node(node);
462 }
463
464 for (m = 0; m < n; m++) {
465 node = &nodes[order[(o + m) % count]];
466 err = drm_mm_reserve_node(&mm, node);
467 if (err) {
468 pr_err("reserve failed, step %d/%d, start %llu\n",
469 m, n, node->start);
470 ret = err;
471 goto out;
472 }
473 }
474
475 o += n;
476
477 if (!assert_continuous(&mm, size))
478 goto out;
479 }
480
481 ret = 0;
482out:
483 drm_mm_for_each_node_safe(node, next, &mm)
484 drm_mm_remove_node(node);
485 drm_mm_takedown(&mm);
486 vfree(nodes);
487err_order:
488 kfree(order);
489err:
490 return ret;
491}
492
493static int igt_reserve(void *ignored)
494{
495 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
496 int n, ret;
497
498 for_each_prime_number_from(n, 1, 54) {
499 u64 size = BIT_ULL(n);
500
501 ret = __igt_reserve(count, size - 1);
502 if (ret)
503 return ret;
504
505 ret = __igt_reserve(count, size);
506 if (ret)
507 return ret;
508
509 ret = __igt_reserve(count, size + 1);
510 if (ret)
511 return ret;
512 }
513
514 return 0;
515}
516
Chris Wilson78866922016-12-22 08:36:13 +0000517static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
518 u64 size, u64 alignment, unsigned long color,
519 const struct insert_mode *mode)
520{
521 int err;
522
523 err = drm_mm_insert_node_generic(mm, node,
524 size, alignment, color,
525 mode->search_flags,
526 mode->create_flags);
527 if (err) {
528 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
529 size, alignment, color, mode->name, err);
530 return false;
531 }
532
533 if (!assert_node(node, mm, size, alignment, color)) {
534 drm_mm_remove_node(node);
535 return false;
536 }
537
538 return true;
539}
540
541static bool expect_insert_fail(struct drm_mm *mm, u64 size)
542{
543 struct drm_mm_node tmp = {};
544 int err;
545
546 err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT);
547 if (likely(err == -ENOSPC))
548 return true;
549
550 if (!err) {
551 pr_err("impossible insert succeeded, node %llu + %llu\n",
552 tmp.start, tmp.size);
553 drm_mm_remove_node(&tmp);
554 } else {
555 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
556 err, -ENOSPC, size);
557 }
558 return false;
559}
560
Chris Wilson2bd966d2016-12-22 08:36:14 +0000561static int __igt_insert(unsigned int count, u64 size, bool replace)
Chris Wilson78866922016-12-22 08:36:13 +0000562{
563 DRM_RND_STATE(prng, random_seed);
564 const struct insert_mode *mode;
565 struct drm_mm mm;
566 struct drm_mm_node *nodes, *node, *next;
567 unsigned int *order, n, m, o = 0;
568 int ret;
569
570 /* Fill a range with lots of nodes, check it doesn't fail too early */
571
572 DRM_MM_BUG_ON(!count);
573 DRM_MM_BUG_ON(!size);
574
575 ret = -ENOMEM;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000576 nodes = vmalloc(count * sizeof(*nodes));
Chris Wilson78866922016-12-22 08:36:13 +0000577 if (!nodes)
578 goto err;
579
580 order = drm_random_order(count, &prng);
581 if (!order)
582 goto err_nodes;
583
584 ret = -EINVAL;
585 drm_mm_init(&mm, 0, count * size);
586
587 for (mode = insert_modes; mode->name; mode++) {
588 for (n = 0; n < count; n++) {
Chris Wilson2bd966d2016-12-22 08:36:14 +0000589 struct drm_mm_node tmp;
590
591 node = replace ? &tmp : &nodes[n];
592 memset(node, 0, sizeof(*node));
593 if (!expect_insert(&mm, node, size, 0, n, mode)) {
Chris Wilson78866922016-12-22 08:36:13 +0000594 pr_err("%s insert failed, size %llu step %d\n",
595 mode->name, size, n);
596 goto out;
597 }
Chris Wilson2bd966d2016-12-22 08:36:14 +0000598
599 if (replace) {
600 drm_mm_replace_node(&tmp, &nodes[n]);
601 if (drm_mm_node_allocated(&tmp)) {
602 pr_err("replaced old-node still allocated! step %d\n",
603 n);
604 goto out;
605 }
606
607 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
608 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
609 size, n);
610 goto out;
611 }
612
613 if (tmp.start != nodes[n].start) {
614 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
615 tmp.start, size,
616 nodes[n].start, nodes[n].size);
617 goto out;
618 }
619 }
Chris Wilson78866922016-12-22 08:36:13 +0000620 }
621
622 /* After random insertion the nodes should be in order */
623 if (!assert_continuous(&mm, size))
624 goto out;
625
626 /* Repeated use should then fail */
627 if (!expect_insert_fail(&mm, size))
628 goto out;
629
630 /* Remove one and reinsert, as the only hole it should refill itself */
631 for (n = 0; n < count; n++) {
632 u64 addr = nodes[n].start;
633
634 drm_mm_remove_node(&nodes[n]);
635 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
636 pr_err("%s reinsert failed, size %llu step %d\n",
637 mode->name, size, n);
638 goto out;
639 }
640
641 if (nodes[n].start != addr) {
642 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
643 mode->name, n, addr, nodes[n].start);
644 goto out;
645 }
646
647 if (!assert_continuous(&mm, size))
648 goto out;
649 }
650
651 /* Remove several, reinsert, check full */
652 for_each_prime_number(n, min(max_prime, count)) {
653 for (m = 0; m < n; m++) {
654 node = &nodes[order[(o + m) % count]];
655 drm_mm_remove_node(node);
656 }
657
658 for (m = 0; m < n; m++) {
659 node = &nodes[order[(o + m) % count]];
660 if (!expect_insert(&mm, node, size, 0, n, mode)) {
661 pr_err("%s multiple reinsert failed, size %llu step %d\n",
662 mode->name, size, n);
663 goto out;
664 }
665 }
666
667 o += n;
668
669 if (!assert_continuous(&mm, size))
670 goto out;
671
672 if (!expect_insert_fail(&mm, size))
673 goto out;
674 }
675
676 drm_mm_for_each_node_safe(node, next, &mm)
677 drm_mm_remove_node(node);
678 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
679 }
680
681 ret = 0;
682out:
683 drm_mm_for_each_node_safe(node, next, &mm)
684 drm_mm_remove_node(node);
685 drm_mm_takedown(&mm);
686 kfree(order);
687err_nodes:
688 vfree(nodes);
689err:
690 return ret;
691}
692
693static int igt_insert(void *ignored)
694{
695 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
696 unsigned int n;
697 int ret;
698
699 for_each_prime_number_from(n, 1, 54) {
700 u64 size = BIT_ULL(n);
701
Chris Wilson2bd966d2016-12-22 08:36:14 +0000702 ret = __igt_insert(count, size - 1, false);
Chris Wilson78866922016-12-22 08:36:13 +0000703 if (ret)
704 return ret;
705
Chris Wilson2bd966d2016-12-22 08:36:14 +0000706 ret = __igt_insert(count, size, false);
Chris Wilson78866922016-12-22 08:36:13 +0000707 if (ret)
708 return ret;
709
Chris Wilson2bd966d2016-12-22 08:36:14 +0000710 ret = __igt_insert(count, size + 1, false);
711 }
712
713 return 0;
714}
715
716static int igt_replace(void *ignored)
717{
718 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
719 unsigned int n;
720 int ret;
721
722 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
723 * then replacing it with the intended node. We want to check that
724 * the tree is intact and all the information we need is carried
725 * across to the target node.
726 */
727
728 for_each_prime_number_from(n, 1, 54) {
729 u64 size = BIT_ULL(n);
730
731 ret = __igt_insert(count, size - 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000732 if (ret)
733 return ret;
Chris Wilson2bd966d2016-12-22 08:36:14 +0000734
735 ret = __igt_insert(count, size, true);
736 if (ret)
737 return ret;
738
739 ret = __igt_insert(count, size + 1, true);
Chris Wilson78866922016-12-22 08:36:13 +0000740 }
741
742 return 0;
743}
744
Chris Wilson2fba0de2016-12-22 08:36:15 +0000745static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
746 u64 size, u64 alignment, unsigned long color,
747 u64 range_start, u64 range_end,
748 const struct insert_mode *mode)
749{
750 int err;
751
752 err = drm_mm_insert_node_in_range_generic(mm, node,
753 size, alignment, color,
754 range_start, range_end,
755 mode->search_flags,
756 mode->create_flags);
757 if (err) {
758 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
759 size, alignment, color, mode->name,
760 range_start, range_end, err);
761 return false;
762 }
763
764 if (!assert_node(node, mm, size, alignment, color)) {
765 drm_mm_remove_node(node);
766 return false;
767 }
768
769 return true;
770}
771
772static bool expect_insert_in_range_fail(struct drm_mm *mm,
773 u64 size,
774 u64 range_start,
775 u64 range_end)
776{
777 struct drm_mm_node tmp = {};
778 int err;
779
780 err = drm_mm_insert_node_in_range_generic(mm, &tmp,
781 size, 0, 0,
782 range_start, range_end,
783 DRM_MM_SEARCH_DEFAULT,
784 DRM_MM_CREATE_DEFAULT);
785 if (likely(err == -ENOSPC))
786 return true;
787
788 if (!err) {
789 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
790 tmp.start, tmp.size, range_start, range_end);
791 drm_mm_remove_node(&tmp);
792 } else {
793 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
794 err, -ENOSPC, size, range_start, range_end);
795 }
796
797 return false;
798}
799
800static bool assert_contiguous_in_range(struct drm_mm *mm,
801 u64 size,
802 u64 start,
803 u64 end)
804{
805 struct drm_mm_node *node;
806 unsigned int n;
807
808 if (!expect_insert_in_range_fail(mm, size, start, end))
809 return false;
810
811 n = div64_u64(start + size - 1, size);
812 drm_mm_for_each_node(node, mm) {
813 if (node->start < start || node->start + node->size > end) {
814 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
815 n, node->start, node->start + node->size, start, end);
816 return false;
817 }
818
819 if (node->start != n * size) {
820 pr_err("node %d out of order, expected start %llx, found %llx\n",
821 n, n * size, node->start);
822 return false;
823 }
824
825 if (node->size != size) {
826 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
827 n, size, node->size);
828 return false;
829 }
830
831 if (node->hole_follows && drm_mm_hole_node_end(node) < end) {
832 pr_err("node %d is followed by a hole!\n", n);
833 return false;
834 }
835
836 n++;
837 }
838
839 drm_mm_for_each_node_in_range(node, mm, 0, start) {
840 if (node) {
841 pr_err("node before start: node=%llx+%llu, start=%llx\n",
842 node->start, node->size, start);
843 return false;
844 }
845 }
846
847 drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) {
848 if (node) {
849 pr_err("node after end: node=%llx+%llu, end=%llx\n",
850 node->start, node->size, end);
851 return false;
852 }
853 }
854
855 return true;
856}
857
858static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
859{
860 const struct insert_mode *mode;
861 struct drm_mm mm;
862 struct drm_mm_node *nodes, *node, *next;
863 unsigned int n, start_n, end_n;
864 int ret;
865
866 DRM_MM_BUG_ON(!count);
867 DRM_MM_BUG_ON(!size);
868 DRM_MM_BUG_ON(end <= start);
869
870 /* Very similar to __igt_insert(), but now instead of populating the
871 * full range of the drm_mm, we try to fill a small portion of it.
872 */
873
874 ret = -ENOMEM;
875 nodes = vzalloc(count * sizeof(*nodes));
876 if (!nodes)
877 goto err;
878
879 ret = -EINVAL;
880 drm_mm_init(&mm, 0, count * size);
881
882 start_n = div64_u64(start + size - 1, size);
883 end_n = div64_u64(end - size, size);
884
885 for (mode = insert_modes; mode->name; mode++) {
886 for (n = start_n; n <= end_n; n++) {
887 if (!expect_insert_in_range(&mm, &nodes[n],
888 size, size, n,
889 start, end, mode)) {
890 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
891 mode->name, size, n,
892 start_n, end_n,
893 start, end);
894 goto out;
895 }
896 }
897
898 if (!assert_contiguous_in_range(&mm, size, start, end)) {
899 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
900 mode->name, start, end, size);
901 goto out;
902 }
903
904 /* Remove one and reinsert, it should refill itself */
905 for (n = start_n; n <= end_n; n++) {
906 u64 addr = nodes[n].start;
907
908 drm_mm_remove_node(&nodes[n]);
909 if (!expect_insert_in_range(&mm, &nodes[n],
910 size, size, n,
911 start, end, mode)) {
912 pr_err("%s reinsert failed, step %d\n", mode->name, n);
913 goto out;
914 }
915
916 if (nodes[n].start != addr) {
917 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
918 mode->name, n, addr, nodes[n].start);
919 goto out;
920 }
921 }
922
923 if (!assert_contiguous_in_range(&mm, size, start, end)) {
924 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
925 mode->name, start, end, size);
926 goto out;
927 }
928
929 drm_mm_for_each_node_safe(node, next, &mm)
930 drm_mm_remove_node(node);
931 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
932 }
933
934 ret = 0;
935out:
936 drm_mm_for_each_node_safe(node, next, &mm)
937 drm_mm_remove_node(node);
938 drm_mm_takedown(&mm);
939 vfree(nodes);
940err:
941 return ret;
942}
943
944static int insert_outside_range(void)
945{
946 struct drm_mm mm;
947 const unsigned int start = 1024;
948 const unsigned int end = 2048;
949 const unsigned int size = end - start;
950
951 drm_mm_init(&mm, start, size);
952
953 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
954 return -EINVAL;
955
956 if (!expect_insert_in_range_fail(&mm, size,
957 start - size/2, start + (size+1)/2))
958 return -EINVAL;
959
960 if (!expect_insert_in_range_fail(&mm, size,
961 end - (size+1)/2, end + size/2))
962 return -EINVAL;
963
964 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
965 return -EINVAL;
966
967 drm_mm_takedown(&mm);
968 return 0;
969}
970
971static int igt_insert_range(void *ignored)
972{
973 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
974 unsigned int n;
975 int ret;
976
977 /* Check that requests outside the bounds of drm_mm are rejected. */
978 ret = insert_outside_range();
979 if (ret)
980 return ret;
981
982 for_each_prime_number_from(n, 1, 50) {
983 const u64 size = BIT_ULL(n);
984 const u64 max = count * size;
985
986 ret = __igt_insert_range(count, size, 0, max);
987 if (ret)
988 return ret;
989
990 ret = __igt_insert_range(count, size, 1, max);
991 if (ret)
992 return ret;
993
994 ret = __igt_insert_range(count, size, 0, max - 1);
995 if (ret)
996 return ret;
997
998 ret = __igt_insert_range(count, size, 0, max/2);
999 if (ret)
1000 return ret;
1001
1002 ret = __igt_insert_range(count, size, max/2, max);
1003 if (ret)
1004 return ret;
1005
1006 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1007 if (ret)
1008 return ret;
1009 }
1010
1011 return 0;
1012}
1013
Chris Wilson9b26f2e2016-12-22 08:36:16 +00001014static int igt_align(void *ignored)
1015{
1016 const struct insert_mode *mode;
1017 const unsigned int max_count = min(8192u, max_prime);
1018 struct drm_mm mm;
1019 struct drm_mm_node *nodes, *node, *next;
1020 unsigned int prime;
1021 int ret = -EINVAL;
1022
1023 /* For each of the possible insertion modes, we pick a few
1024 * arbitrary alignments and check that the inserted node
1025 * meets our requirements.
1026 */
1027
1028 nodes = vzalloc(max_count * sizeof(*nodes));
1029 if (!nodes)
1030 goto err;
1031
1032 drm_mm_init(&mm, 1, U64_MAX - 2);
1033
1034 for (mode = insert_modes; mode->name; mode++) {
1035 unsigned int i = 0;
1036
1037 for_each_prime_number_from(prime, 1, max_count) {
1038 u64 size = next_prime_number(prime);
1039
1040 if (!expect_insert(&mm, &nodes[i],
1041 size, prime, i,
1042 mode)) {
1043 pr_err("%s insert failed with alignment=%d",
1044 mode->name, prime);
1045 goto out;
1046 }
1047
1048 i++;
1049 }
1050
1051 drm_mm_for_each_node_safe(node, next, &mm)
1052 drm_mm_remove_node(node);
1053 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1054 }
1055
1056 ret = 0;
1057out:
1058 drm_mm_for_each_node_safe(node, next, &mm)
1059 drm_mm_remove_node(node);
1060 drm_mm_takedown(&mm);
1061 vfree(nodes);
1062err:
1063 return ret;
1064}
1065
1066static int igt_align_pot(int max)
1067{
1068 struct drm_mm mm;
1069 struct drm_mm_node *node, *next;
1070 int bit;
1071 int ret = -EINVAL;
1072
1073 /* Check that we can align to the full u64 address space */
1074
1075 drm_mm_init(&mm, 1, U64_MAX - 2);
1076
1077 for (bit = max - 1; bit; bit--) {
1078 u64 align, size;
1079
1080 node = kzalloc(sizeof(*node), GFP_KERNEL);
1081 if (!node) {
1082 ret = -ENOMEM;
1083 goto out;
1084 }
1085
1086 align = BIT_ULL(bit);
1087 size = BIT_ULL(bit-1) + 1;
1088 if (!expect_insert(&mm, node,
1089 size, align, bit,
1090 &insert_modes[0])) {
1091 pr_err("insert failed with alignment=%llx [%d]",
1092 align, bit);
1093 goto out;
1094 }
1095 }
1096
1097 ret = 0;
1098out:
1099 drm_mm_for_each_node_safe(node, next, &mm) {
1100 drm_mm_remove_node(node);
1101 kfree(node);
1102 }
1103 drm_mm_takedown(&mm);
1104 return ret;
1105}
1106
1107static int igt_align32(void *ignored)
1108{
1109 return igt_align_pot(32);
1110}
1111
1112static int igt_align64(void *ignored)
1113{
1114 return igt_align_pot(64);
1115}
1116
Chris Wilson9a71e272016-12-22 08:36:29 +00001117static void show_scan(const struct drm_mm_scan *scan)
Chris Wilson560b3282016-12-22 08:36:17 +00001118{
Chris Wilson71733202016-12-22 08:36:24 +00001119 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001120 scan->hit_start, scan->hit_end,
1121 scan->size, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001122}
1123
1124static void show_holes(const struct drm_mm *mm, int count)
1125{
1126 u64 hole_start, hole_end;
1127 struct drm_mm_node *hole;
1128
1129 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1130 struct drm_mm_node *next = list_next_entry(hole, node_list);
1131 const char *node1 = NULL, *node2 = NULL;
1132
1133 if (hole->allocated)
1134 node1 = kasprintf(GFP_KERNEL,
1135 "[%llx + %lld, color=%ld], ",
1136 hole->start, hole->size, hole->color);
1137
1138 if (next->allocated)
1139 node2 = kasprintf(GFP_KERNEL,
1140 ", [%llx + %lld, color=%ld]",
1141 next->start, next->size, next->color);
1142
1143 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1144 node1,
1145 hole_start, hole_end, hole_end - hole_start,
1146 node2);
1147
1148 kfree(node2);
1149 kfree(node1);
1150
1151 if (!--count)
1152 break;
1153 }
1154}
1155
1156struct evict_node {
1157 struct drm_mm_node node;
1158 struct list_head link;
1159};
1160
Chris Wilson9a71e272016-12-22 08:36:29 +00001161static bool evict_nodes(struct drm_mm_scan *scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001162 struct evict_node *nodes,
1163 unsigned int *order,
1164 unsigned int count,
1165 struct list_head *evict_list)
1166{
1167 struct evict_node *e, *en;
1168 unsigned int i;
1169
1170 for (i = 0; i < count; i++) {
1171 e = &nodes[order ? order[i] : i];
1172 list_add(&e->link, evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001173 if (drm_mm_scan_add_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001174 break;
1175 }
1176 list_for_each_entry_safe(e, en, evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001177 if (!drm_mm_scan_remove_block(scan, &e->node))
Chris Wilson560b3282016-12-22 08:36:17 +00001178 list_del(&e->link);
1179 }
1180 if (list_empty(evict_list)) {
Chris Wilson71733202016-12-22 08:36:24 +00001181 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
Chris Wilson9a71e272016-12-22 08:36:29 +00001182 scan->size, count, scan->alignment, scan->color);
Chris Wilson560b3282016-12-22 08:36:17 +00001183 return false;
1184 }
1185
1186 list_for_each_entry(e, evict_list, link)
1187 drm_mm_remove_node(&e->node);
1188
1189 return true;
1190}
1191
1192static bool evict_nothing(struct drm_mm *mm,
1193 unsigned int total_size,
1194 struct evict_node *nodes)
1195{
Chris Wilson9a71e272016-12-22 08:36:29 +00001196 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001197 LIST_HEAD(evict_list);
1198 struct evict_node *e;
1199 struct drm_mm_node *node;
1200 unsigned int n;
1201
Chris Wilson0b04d472016-12-22 08:36:33 +00001202 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001203 for (n = 0; n < total_size; n++) {
1204 e = &nodes[n];
1205 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001206 drm_mm_scan_add_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001207 }
1208 list_for_each_entry(e, &evict_list, link)
Chris Wilson9a71e272016-12-22 08:36:29 +00001209 drm_mm_scan_remove_block(&scan, &e->node);
Chris Wilson560b3282016-12-22 08:36:17 +00001210
1211 for (n = 0; n < total_size; n++) {
1212 e = &nodes[n];
1213
1214 if (!drm_mm_node_allocated(&e->node)) {
1215 pr_err("node[%d] no longer allocated!\n", n);
1216 return false;
1217 }
1218
1219 e->link.next = NULL;
1220 }
1221
1222 drm_mm_for_each_node(node, mm) {
1223 e = container_of(node, typeof(*e), node);
1224 e->link.next = &e->link;
1225 }
1226
1227 for (n = 0; n < total_size; n++) {
1228 e = &nodes[n];
1229
1230 if (!e->link.next) {
1231 pr_err("node[%d] no longer connected!\n", n);
1232 return false;
1233 }
1234 }
1235
1236 return assert_continuous(mm, nodes[0].node.size);
1237}
1238
1239static bool evict_everything(struct drm_mm *mm,
1240 unsigned int total_size,
1241 struct evict_node *nodes)
1242{
Chris Wilson9a71e272016-12-22 08:36:29 +00001243 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001244 LIST_HEAD(evict_list);
1245 struct evict_node *e;
1246 unsigned int n;
1247 int err;
1248
Chris Wilson0b04d472016-12-22 08:36:33 +00001249 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
Chris Wilson560b3282016-12-22 08:36:17 +00001250 for (n = 0; n < total_size; n++) {
1251 e = &nodes[n];
1252 list_add(&e->link, &evict_list);
Chris Wilson9a71e272016-12-22 08:36:29 +00001253 if (drm_mm_scan_add_block(&scan, &e->node))
1254 break;
Chris Wilson560b3282016-12-22 08:36:17 +00001255 }
1256 list_for_each_entry(e, &evict_list, link) {
Chris Wilson9a71e272016-12-22 08:36:29 +00001257 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
Chris Wilson560b3282016-12-22 08:36:17 +00001258 pr_err("Node %lld not marked for eviction!\n",
1259 e->node.start);
1260 list_del(&e->link);
1261 }
1262 }
1263
1264 list_for_each_entry(e, &evict_list, link)
1265 drm_mm_remove_node(&e->node);
1266
1267 if (!assert_one_hole(mm, 0, total_size))
1268 return false;
1269
1270 list_for_each_entry(e, &evict_list, link) {
1271 err = drm_mm_reserve_node(mm, &e->node);
1272 if (err) {
1273 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1274 e->node.start);
1275 return false;
1276 }
1277 }
1278
1279 return assert_continuous(mm, nodes[0].node.size);
1280}
1281
1282static int evict_something(struct drm_mm *mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001283 u64 range_start, u64 range_end,
Chris Wilson560b3282016-12-22 08:36:17 +00001284 struct evict_node *nodes,
1285 unsigned int *order,
1286 unsigned int count,
1287 unsigned int size,
1288 unsigned int alignment,
1289 const struct insert_mode *mode)
1290{
Chris Wilson9a71e272016-12-22 08:36:29 +00001291 struct drm_mm_scan scan;
Chris Wilson560b3282016-12-22 08:36:17 +00001292 LIST_HEAD(evict_list);
1293 struct evict_node *e;
1294 struct drm_mm_node tmp;
1295 int err;
1296
Chris Wilson9a71e272016-12-22 08:36:29 +00001297 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilson0e483252016-12-22 08:36:18 +00001298 size, alignment, 0,
Chris Wilson0b04d472016-12-22 08:36:33 +00001299 range_start, range_end,
1300 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001301 if (!evict_nodes(&scan,
Chris Wilson560b3282016-12-22 08:36:17 +00001302 nodes, order, count,
1303 &evict_list))
1304 return -EINVAL;
1305
1306 memset(&tmp, 0, sizeof(tmp));
1307 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1308 mode->search_flags,
1309 mode->create_flags);
1310 if (err) {
1311 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1312 size, alignment);
Chris Wilson9a71e272016-12-22 08:36:29 +00001313 show_scan(&scan);
Chris Wilson560b3282016-12-22 08:36:17 +00001314 show_holes(mm, 3);
1315 return err;
1316 }
1317
Chris Wilson0e483252016-12-22 08:36:18 +00001318 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1319 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1320 tmp.start, tmp.size, range_start, range_end);
1321 err = -EINVAL;
1322 }
1323
Chris Wilson560b3282016-12-22 08:36:17 +00001324 if (!assert_node(&tmp, mm, size, alignment, 0) || tmp.hole_follows) {
1325 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1326 tmp.size, size,
1327 alignment, misalignment(&tmp, alignment),
1328 tmp.start, tmp.hole_follows);
1329 err = -EINVAL;
1330 }
1331
1332 drm_mm_remove_node(&tmp);
1333 if (err)
1334 return err;
1335
1336 list_for_each_entry(e, &evict_list, link) {
1337 err = drm_mm_reserve_node(mm, &e->node);
1338 if (err) {
1339 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1340 e->node.start);
1341 return err;
1342 }
1343 }
1344
1345 if (!assert_continuous(mm, nodes[0].node.size)) {
1346 pr_err("range is no longer continuous\n");
1347 return -EINVAL;
1348 }
1349
1350 return 0;
1351}
1352
1353static int igt_evict(void *ignored)
1354{
1355 DRM_RND_STATE(prng, random_seed);
1356 const unsigned int size = 8192;
1357 const struct insert_mode *mode;
1358 struct drm_mm mm;
1359 struct evict_node *nodes;
1360 struct drm_mm_node *node, *next;
1361 unsigned int *order, n;
1362 int ret, err;
1363
1364 /* Here we populate a full drm_mm and then try and insert a new node
1365 * by evicting other nodes in a random order. The drm_mm_scan should
1366 * pick the first matching hole it finds from the random list. We
1367 * repeat that for different allocation strategies, alignments and
1368 * sizes to try and stress the hole finder.
1369 */
1370
1371 ret = -ENOMEM;
1372 nodes = vzalloc(size * sizeof(*nodes));
1373 if (!nodes)
1374 goto err;
1375
1376 order = drm_random_order(size, &prng);
1377 if (!order)
1378 goto err_nodes;
1379
1380 ret = -EINVAL;
1381 drm_mm_init(&mm, 0, size);
1382 for (n = 0; n < size; n++) {
1383 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1384 DRM_MM_SEARCH_DEFAULT);
1385 if (err) {
1386 pr_err("insert failed, step %d\n", n);
1387 ret = err;
1388 goto out;
1389 }
1390 }
1391
1392 /* First check that using the scanner doesn't break the mm */
1393 if (!evict_nothing(&mm, size, nodes)) {
1394 pr_err("evict_nothing() failed\n");
1395 goto out;
1396 }
1397 if (!evict_everything(&mm, size, nodes)) {
1398 pr_err("evict_everything() failed\n");
1399 goto out;
1400 }
1401
1402 for (mode = evict_modes; mode->name; mode++) {
1403 for (n = 1; n <= size; n <<= 1) {
1404 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001405 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001406 nodes, order, size,
1407 n, 1,
1408 mode);
1409 if (err) {
1410 pr_err("%s evict_something(size=%u) failed\n",
1411 mode->name, n);
1412 ret = err;
1413 goto out;
1414 }
1415 }
1416
1417 for (n = 1; n < size; n <<= 1) {
1418 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001419 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001420 nodes, order, size,
1421 size/2, n,
1422 mode);
1423 if (err) {
1424 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1425 mode->name, size/2, n);
1426 ret = err;
1427 goto out;
1428 }
1429 }
1430
1431 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1432 unsigned int nsize = (size - n + 1) / 2;
1433
1434 DRM_MM_BUG_ON(!nsize);
1435
1436 drm_random_reorder(order, size, &prng);
Chris Wilson0e483252016-12-22 08:36:18 +00001437 err = evict_something(&mm, 0, U64_MAX,
Chris Wilson560b3282016-12-22 08:36:17 +00001438 nodes, order, size,
1439 nsize, n,
1440 mode);
1441 if (err) {
1442 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1443 mode->name, nsize, n);
1444 ret = err;
1445 goto out;
1446 }
1447 }
1448 }
1449
1450 ret = 0;
1451out:
1452 drm_mm_for_each_node_safe(node, next, &mm)
1453 drm_mm_remove_node(node);
1454 drm_mm_takedown(&mm);
1455 kfree(order);
1456err_nodes:
1457 vfree(nodes);
1458err:
1459 return ret;
1460}
1461
Chris Wilson0e483252016-12-22 08:36:18 +00001462static int igt_evict_range(void *ignored)
1463{
1464 DRM_RND_STATE(prng, random_seed);
1465 const unsigned int size = 8192;
1466 const unsigned int range_size = size / 2;
1467 const unsigned int range_start = size / 4;
1468 const unsigned int range_end = range_start + range_size;
1469 const struct insert_mode *mode;
1470 struct drm_mm mm;
1471 struct evict_node *nodes;
1472 struct drm_mm_node *node, *next;
1473 unsigned int *order, n;
1474 int ret, err;
1475
1476 /* Like igt_evict() but now we are limiting the search to a
1477 * small portion of the full drm_mm.
1478 */
1479
1480 ret = -ENOMEM;
1481 nodes = vzalloc(size * sizeof(*nodes));
1482 if (!nodes)
1483 goto err;
1484
1485 order = drm_random_order(size, &prng);
1486 if (!order)
1487 goto err_nodes;
1488
1489 ret = -EINVAL;
1490 drm_mm_init(&mm, 0, size);
1491 for (n = 0; n < size; n++) {
1492 err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
1493 DRM_MM_SEARCH_DEFAULT);
1494 if (err) {
1495 pr_err("insert failed, step %d\n", n);
1496 ret = err;
1497 goto out;
1498 }
1499 }
1500
1501 for (mode = evict_modes; mode->name; mode++) {
1502 for (n = 1; n <= range_size; n <<= 1) {
1503 drm_random_reorder(order, size, &prng);
1504 err = evict_something(&mm, range_start, range_end,
1505 nodes, order, size,
1506 n, 1,
1507 mode);
1508 if (err) {
1509 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1510 mode->name, n, range_start, range_end);
1511 goto out;
1512 }
1513 }
1514
1515 for (n = 1; n <= range_size; n <<= 1) {
1516 drm_random_reorder(order, size, &prng);
1517 err = evict_something(&mm, range_start, range_end,
1518 nodes, order, size,
1519 range_size/2, n,
1520 mode);
1521 if (err) {
1522 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1523 mode->name, range_size/2, n, range_start, range_end);
1524 goto out;
1525 }
1526 }
1527
1528 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1529 unsigned int nsize = (range_size - n + 1) / 2;
1530
1531 DRM_MM_BUG_ON(!nsize);
1532
1533 drm_random_reorder(order, size, &prng);
1534 err = evict_something(&mm, range_start, range_end,
1535 nodes, order, size,
1536 nsize, n,
1537 mode);
1538 if (err) {
1539 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1540 mode->name, nsize, n, range_start, range_end);
1541 goto out;
1542 }
1543 }
1544 }
1545
1546 ret = 0;
1547out:
1548 drm_mm_for_each_node_safe(node, next, &mm)
1549 drm_mm_remove_node(node);
1550 drm_mm_takedown(&mm);
1551 kfree(order);
1552err_nodes:
1553 vfree(nodes);
1554err:
1555 return ret;
1556}
1557
Chris Wilson05ab3c22016-12-22 08:36:19 +00001558static unsigned int node_index(const struct drm_mm_node *node)
1559{
1560 return div64_u64(node->start, node->size);
1561}
1562
1563static int igt_topdown(void *ignored)
1564{
1565 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1566 DRM_RND_STATE(prng, random_seed);
1567 const unsigned int count = 8192;
1568 unsigned int size;
1569 unsigned long *bitmap = NULL;
1570 struct drm_mm mm;
1571 struct drm_mm_node *nodes, *node, *next;
1572 unsigned int *order, n, m, o = 0;
1573 int ret;
1574
1575 /* When allocating top-down, we expect to be returned a node
1576 * from a suitable hole at the top of the drm_mm. We check that
1577 * the returned node does match the highest available slot.
1578 */
1579
1580 ret = -ENOMEM;
1581 nodes = vzalloc(count * sizeof(*nodes));
1582 if (!nodes)
1583 goto err;
1584
1585 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1586 GFP_TEMPORARY);
1587 if (!bitmap)
1588 goto err_nodes;
1589
1590 order = drm_random_order(count, &prng);
1591 if (!order)
1592 goto err_bitmap;
1593
1594 ret = -EINVAL;
1595 for (size = 1; size <= 64; size <<= 1) {
1596 drm_mm_init(&mm, 0, size*count);
1597 for (n = 0; n < count; n++) {
1598 if (!expect_insert(&mm, &nodes[n],
1599 size, 0, n,
1600 topdown)) {
1601 pr_err("insert failed, size %u step %d\n", size, n);
1602 goto out;
1603 }
1604
1605 if (nodes[n].hole_follows) {
1606 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1607 n, nodes[n].start, size);
1608 goto out;
1609 }
1610
1611 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1612 goto out;
1613 }
1614
1615 if (!assert_continuous(&mm, size))
1616 goto out;
1617
1618 drm_random_reorder(order, count, &prng);
1619 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1620 for (m = 0; m < n; m++) {
1621 node = &nodes[order[(o + m) % count]];
1622 drm_mm_remove_node(node);
1623 __set_bit(node_index(node), bitmap);
1624 }
1625
1626 for (m = 0; m < n; m++) {
1627 unsigned int last;
1628
1629 node = &nodes[order[(o + m) % count]];
1630 if (!expect_insert(&mm, node,
1631 size, 0, 0,
1632 topdown)) {
1633 pr_err("insert failed, step %d/%d\n", m, n);
1634 goto out;
1635 }
1636
1637 if (node->hole_follows) {
1638 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1639 m, n, node->start);
1640 goto out;
1641 }
1642
1643 last = find_last_bit(bitmap, count);
1644 if (node_index(node) != last) {
1645 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1646 m, n, size, last, node_index(node));
1647 goto out;
1648 }
1649
1650 __clear_bit(last, bitmap);
1651 }
1652
1653 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1654
1655 o += n;
1656 }
1657
1658 drm_mm_for_each_node_safe(node, next, &mm)
1659 drm_mm_remove_node(node);
1660 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1661 }
1662
1663 ret = 0;
1664out:
1665 drm_mm_for_each_node_safe(node, next, &mm)
1666 drm_mm_remove_node(node);
1667 drm_mm_takedown(&mm);
1668 kfree(order);
1669err_bitmap:
1670 kfree(bitmap);
1671err_nodes:
1672 vfree(nodes);
1673err:
1674 return ret;
1675}
1676
Chris Wilson4c2ba552016-12-22 08:36:20 +00001677static void separate_adjacent_colors(const struct drm_mm_node *node,
1678 unsigned long color,
1679 u64 *start,
1680 u64 *end)
1681{
1682 if (node->allocated && node->color != color)
1683 ++*start;
1684
1685 node = list_next_entry(node, node_list);
1686 if (node->allocated && node->color != color)
1687 --*end;
1688}
1689
1690static bool colors_abutt(const struct drm_mm_node *node)
1691{
1692 if (!node->hole_follows &&
1693 list_next_entry(node, node_list)->allocated) {
1694 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1695 node->color, node->start, node->size,
1696 list_next_entry(node, node_list)->color,
1697 list_next_entry(node, node_list)->start,
1698 list_next_entry(node, node_list)->size);
1699 return true;
1700 }
1701
1702 return false;
1703}
1704
1705static int igt_color(void *ignored)
1706{
1707 const unsigned int count = min(4096u, max_iterations);
1708 const struct insert_mode *mode;
1709 struct drm_mm mm;
1710 struct drm_mm_node *node, *nn;
1711 unsigned int n;
1712 int ret = -EINVAL, err;
1713
1714 /* Color adjustment complicates everything. First we just check
1715 * that when we insert a node we apply any color_adjustment callback.
1716 * The callback we use should ensure that there is a gap between
1717 * any two nodes, and so after each insertion we check that those
1718 * holes are inserted and that they are preserved.
1719 */
1720
1721 drm_mm_init(&mm, 0, U64_MAX);
1722
1723 for (n = 1; n <= count; n++) {
1724 node = kzalloc(sizeof(*node), GFP_KERNEL);
1725 if (!node) {
1726 ret = -ENOMEM;
1727 goto out;
1728 }
1729
1730 if (!expect_insert(&mm, node,
1731 n, 0, n,
1732 &insert_modes[0])) {
1733 pr_err("insert failed, step %d\n", n);
1734 kfree(node);
1735 goto out;
1736 }
1737 }
1738
1739 drm_mm_for_each_node_safe(node, nn, &mm) {
1740 if (node->color != node->size) {
1741 pr_err("invalid color stored: expected %lld, found %ld\n",
1742 node->size, node->color);
1743
1744 goto out;
1745 }
1746
1747 drm_mm_remove_node(node);
1748 kfree(node);
1749 }
1750
1751 /* Now, let's start experimenting with applying a color callback */
1752 mm.color_adjust = separate_adjacent_colors;
1753 for (mode = insert_modes; mode->name; mode++) {
1754 u64 last;
1755
1756 node = kzalloc(sizeof(*node), GFP_KERNEL);
1757 if (!node) {
1758 ret = -ENOMEM;
1759 goto out;
1760 }
1761
1762 node->size = 1 + 2*count;
1763 node->color = node->size;
1764
1765 err = drm_mm_reserve_node(&mm, node);
1766 if (err) {
1767 pr_err("initial reserve failed!\n");
1768 ret = err;
1769 goto out;
1770 }
1771
1772 last = node->start + node->size;
1773
1774 for (n = 1; n <= count; n++) {
1775 int rem;
1776
1777 node = kzalloc(sizeof(*node), GFP_KERNEL);
1778 if (!node) {
1779 ret = -ENOMEM;
1780 goto out;
1781 }
1782
1783 node->start = last;
1784 node->size = n + count;
1785 node->color = node->size;
1786
1787 err = drm_mm_reserve_node(&mm, node);
1788 if (err != -ENOSPC) {
1789 pr_err("reserve %d did not report color overlap! err=%d\n",
1790 n, err);
1791 goto out;
1792 }
1793
1794 node->start += n + 1;
1795 rem = misalignment(node, n + count);
1796 node->start += n + count - rem;
1797
1798 err = drm_mm_reserve_node(&mm, node);
1799 if (err) {
1800 pr_err("reserve %d failed, err=%d\n", n, err);
1801 ret = err;
1802 goto out;
1803 }
1804
1805 last = node->start + node->size;
1806 }
1807
1808 for (n = 1; n <= count; n++) {
1809 node = kzalloc(sizeof(*node), GFP_KERNEL);
1810 if (!node) {
1811 ret = -ENOMEM;
1812 goto out;
1813 }
1814
1815 if (!expect_insert(&mm, node,
1816 n, n, n,
1817 mode)) {
1818 pr_err("%s insert failed, step %d\n",
1819 mode->name, n);
1820 kfree(node);
1821 goto out;
1822 }
1823 }
1824
1825 drm_mm_for_each_node_safe(node, nn, &mm) {
1826 u64 rem;
1827
1828 if (node->color != node->size) {
1829 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1830 mode->name, node->size, node->color);
1831
1832 goto out;
1833 }
1834
1835 if (colors_abutt(node))
1836 goto out;
1837
1838 div64_u64_rem(node->start, node->size, &rem);
1839 if (rem) {
1840 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1841 mode->name, node->start, node->size, rem);
1842 goto out;
1843 }
1844
1845 drm_mm_remove_node(node);
1846 kfree(node);
1847 }
1848 }
1849
1850 ret = 0;
1851out:
1852 drm_mm_for_each_node_safe(node, nn, &mm) {
1853 drm_mm_remove_node(node);
1854 kfree(node);
1855 }
1856 drm_mm_takedown(&mm);
1857 return ret;
1858}
1859
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001860static int evict_color(struct drm_mm *mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001861 u64 range_start, u64 range_end,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001862 struct evict_node *nodes,
1863 unsigned int *order,
1864 unsigned int count,
1865 unsigned int size,
1866 unsigned int alignment,
1867 unsigned long color,
1868 const struct insert_mode *mode)
1869{
Chris Wilson9a71e272016-12-22 08:36:29 +00001870 struct drm_mm_scan scan;
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001871 LIST_HEAD(evict_list);
1872 struct evict_node *e;
1873 struct drm_mm_node tmp;
1874 int err;
1875
Chris Wilson9a71e272016-12-22 08:36:29 +00001876 drm_mm_scan_init_with_range(&scan, mm,
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001877 size, alignment, color,
Chris Wilson0b04d472016-12-22 08:36:33 +00001878 range_start, range_end,
1879 mode->create_flags);
Chris Wilson9a71e272016-12-22 08:36:29 +00001880 if (!evict_nodes(&scan,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001881 nodes, order, count,
1882 &evict_list))
1883 return -EINVAL;
1884
1885 memset(&tmp, 0, sizeof(tmp));
1886 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
1887 mode->search_flags,
1888 mode->create_flags);
1889 if (err) {
1890 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
1891 size, alignment, color, err);
Chris Wilson9a71e272016-12-22 08:36:29 +00001892 show_scan(&scan);
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001893 show_holes(mm, 3);
1894 return err;
1895 }
1896
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001897 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1898 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1899 tmp.start, tmp.size, range_start, range_end);
1900 err = -EINVAL;
1901 }
1902
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001903 if (colors_abutt(&tmp))
1904 err = -EINVAL;
1905
1906 if (!assert_node(&tmp, mm, size, alignment, color)) {
1907 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
1908 tmp.size, size,
1909 alignment, misalignment(&tmp, alignment), tmp.start);
1910 err = -EINVAL;
1911 }
1912
1913 drm_mm_remove_node(&tmp);
1914 if (err)
1915 return err;
1916
1917 list_for_each_entry(e, &evict_list, link) {
1918 err = drm_mm_reserve_node(mm, &e->node);
1919 if (err) {
1920 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1921 e->node.start);
1922 return err;
1923 }
1924 }
1925
1926 return 0;
1927}
1928
1929static int igt_color_evict(void *ignored)
1930{
1931 DRM_RND_STATE(prng, random_seed);
1932 const unsigned int total_size = min(8192u, max_iterations);
1933 const struct insert_mode *mode;
1934 unsigned long color = 0;
1935 struct drm_mm mm;
1936 struct evict_node *nodes;
1937 struct drm_mm_node *node, *next;
1938 unsigned int *order, n;
1939 int ret, err;
1940
1941 /* Check that the drm_mm_scan also honours color adjustment when
1942 * choosing its victims to create a hole. Our color_adjust does not
1943 * allow two nodes to be placed together without an intervening hole
1944 * enlarging the set of victims that must be evicted.
1945 */
1946
1947 ret = -ENOMEM;
1948 nodes = vzalloc(total_size * sizeof(*nodes));
1949 if (!nodes)
1950 goto err;
1951
1952 order = drm_random_order(total_size, &prng);
1953 if (!order)
1954 goto err_nodes;
1955
1956 ret = -EINVAL;
1957 drm_mm_init(&mm, 0, 2*total_size - 1);
1958 mm.color_adjust = separate_adjacent_colors;
1959 for (n = 0; n < total_size; n++) {
1960 if (!expect_insert(&mm, &nodes[n].node,
1961 1, 0, color++,
1962 &insert_modes[0])) {
1963 pr_err("insert failed, step %d\n", n);
1964 goto out;
1965 }
1966 }
1967
1968 for (mode = evict_modes; mode->name; mode++) {
1969 for (n = 1; n <= total_size; n <<= 1) {
1970 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001971 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001972 nodes, order, total_size,
1973 n, 1, color++,
1974 mode);
1975 if (err) {
1976 pr_err("%s evict_color(size=%u) failed\n",
1977 mode->name, n);
1978 goto out;
1979 }
1980 }
1981
1982 for (n = 1; n < total_size; n <<= 1) {
1983 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00001984 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00001985 nodes, order, total_size,
1986 total_size/2, n, color++,
1987 mode);
1988 if (err) {
1989 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
1990 mode->name, total_size/2, n);
1991 goto out;
1992 }
1993 }
1994
1995 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
1996 unsigned int nsize = (total_size - n + 1) / 2;
1997
1998 DRM_MM_BUG_ON(!nsize);
1999
2000 drm_random_reorder(order, total_size, &prng);
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002001 err = evict_color(&mm, 0, U64_MAX,
Chris Wilsonc1b702c2016-12-22 08:36:21 +00002002 nodes, order, total_size,
2003 nsize, n, color++,
2004 mode);
2005 if (err) {
2006 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2007 mode->name, nsize, n);
2008 goto out;
2009 }
2010 }
2011 }
2012
2013 ret = 0;
2014out:
2015 if (ret)
2016 drm_mm_debug_table(&mm, __func__);
2017 drm_mm_for_each_node_safe(node, next, &mm)
2018 drm_mm_remove_node(node);
2019 drm_mm_takedown(&mm);
2020 kfree(order);
2021err_nodes:
2022 vfree(nodes);
2023err:
2024 return ret;
2025}
2026
Chris Wilsond1bac3a2016-12-22 08:36:22 +00002027static int igt_color_evict_range(void *ignored)
2028{
2029 DRM_RND_STATE(prng, random_seed);
2030 const unsigned int total_size = 8192;
2031 const unsigned int range_size = total_size / 2;
2032 const unsigned int range_start = total_size / 4;
2033 const unsigned int range_end = range_start + range_size;
2034 const struct insert_mode *mode;
2035 unsigned long color = 0;
2036 struct drm_mm mm;
2037 struct evict_node *nodes;
2038 struct drm_mm_node *node, *next;
2039 unsigned int *order, n;
2040 int ret, err;
2041
2042 /* Like igt_color_evict(), but limited to small portion of the full
2043 * drm_mm range.
2044 */
2045
2046 ret = -ENOMEM;
2047 nodes = vzalloc(total_size * sizeof(*nodes));
2048 if (!nodes)
2049 goto err;
2050
2051 order = drm_random_order(total_size, &prng);
2052 if (!order)
2053 goto err_nodes;
2054
2055 ret = -EINVAL;
2056 drm_mm_init(&mm, 0, 2*total_size - 1);
2057 mm.color_adjust = separate_adjacent_colors;
2058 for (n = 0; n < total_size; n++) {
2059 if (!expect_insert(&mm, &nodes[n].node,
2060 1, 0, color++,
2061 &insert_modes[0])) {
2062 pr_err("insert failed, step %d\n", n);
2063 goto out;
2064 }
2065 }
2066
2067 for (mode = evict_modes; mode->name; mode++) {
2068 for (n = 1; n <= range_size; n <<= 1) {
2069 drm_random_reorder(order, range_size, &prng);
2070 err = evict_color(&mm, range_start, range_end,
2071 nodes, order, total_size,
2072 n, 1, color++,
2073 mode);
2074 if (err) {
2075 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2076 mode->name, n, range_start, range_end);
2077 goto out;
2078 }
2079 }
2080
2081 for (n = 1; n < range_size; n <<= 1) {
2082 drm_random_reorder(order, total_size, &prng);
2083 err = evict_color(&mm, range_start, range_end,
2084 nodes, order, total_size,
2085 range_size/2, n, color++,
2086 mode);
2087 if (err) {
2088 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2089 mode->name, total_size/2, n, range_start, range_end);
2090 goto out;
2091 }
2092 }
2093
2094 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2095 unsigned int nsize = (range_size - n + 1) / 2;
2096
2097 DRM_MM_BUG_ON(!nsize);
2098
2099 drm_random_reorder(order, total_size, &prng);
2100 err = evict_color(&mm, range_start, range_end,
2101 nodes, order, total_size,
2102 nsize, n, color++,
2103 mode);
2104 if (err) {
2105 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2106 mode->name, nsize, n, range_start, range_end);
2107 goto out;
2108 }
2109 }
2110 }
2111
2112 ret = 0;
2113out:
2114 if (ret)
2115 drm_mm_debug_table(&mm, __func__);
2116 drm_mm_for_each_node_safe(node, next, &mm)
2117 drm_mm_remove_node(node);
2118 drm_mm_takedown(&mm);
2119 kfree(order);
2120err_nodes:
2121 vfree(nodes);
2122err:
2123 return ret;
2124}
2125
Chris Wilson50f00332016-12-22 08:36:09 +00002126#include "drm_selftest.c"
2127
2128static int __init test_drm_mm_init(void)
2129{
2130 int err;
2131
2132 while (!random_seed)
2133 random_seed = get_random_int();
2134
2135 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2136 random_seed, max_iterations, max_prime);
2137 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2138
2139 return err > 0 ? 0 : err;
2140}
2141
2142static void __exit test_drm_mm_exit(void)
2143{
2144}
2145
2146module_init(test_drm_mm_init);
2147module_exit(test_drm_mm_exit);
2148
2149module_param(random_seed, uint, 0400);
2150module_param(max_iterations, uint, 0400);
2151module_param(max_prime, uint, 0400);
2152
2153MODULE_AUTHOR("Intel Corporation");
2154MODULE_LICENSE("GPL");