blob: 567c5b3c6f3a0ddb0879b1dd214c240a16200bf9 [file] [log] [blame]
Alex Deucherd38ceaf2015-04-20 16:55:21 -04001/*
2 * Copyright 2009 VMware, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Michel Dänzer
23 */
24#include <drm/drmP.h>
25#include <drm/amdgpu_drm.h>
26#include "amdgpu.h"
27#include "amdgpu_uvd.h"
28#include "amdgpu_vce.h"
29
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31static void amdgpu_do_test_moves(struct amdgpu_device *adev)
32{
33 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
34 struct amdgpu_bo *vram_obj = NULL;
35 struct amdgpu_bo **gtt_obj = NULL;
36 uint64_t gtt_addr, vram_addr;
37 unsigned n, size;
38 int i, r;
39
40 size = 1024 * 1024;
41
42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */
45 n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
46 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
47 if (adev->rings[i])
48 n -= adev->rings[i]->ring_size;
49 if (adev->wb.wb_obj)
50 n -= AMDGPU_GPU_PAGE_SIZE;
51 if (adev->irq.ih.ring_obj)
52 n -= adev->irq.ih.ring_size;
53 n /= size;
54
55 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
56 if (!gtt_obj) {
57 DRM_ERROR("Failed to allocate %d pointers\n", n);
58 r = 1;
59 goto out_cleanup;
60 }
61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
63 NULL, &vram_obj);
64 if (r) {
65 DRM_ERROR("Failed to create VRAM object\n");
66 goto out_cleanup;
67 }
68 r = amdgpu_bo_reserve(vram_obj, false);
69 if (unlikely(r != 0))
70 goto out_unref;
71 r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
72 if (r) {
73 DRM_ERROR("Failed to pin VRAM object\n");
74 goto out_unres;
75 }
76 for (i = 0; i < n; i++) {
77 void *gtt_map, *vram_map;
78 void **gtt_start, **gtt_end;
79 void **vram_start, **vram_end;
80 struct amdgpu_fence *fence = NULL;
81
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
83 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
84 if (r) {
85 DRM_ERROR("Failed to create GTT object %d\n", i);
86 goto out_lclean;
87 }
88
89 r = amdgpu_bo_reserve(gtt_obj[i], false);
90 if (unlikely(r != 0))
91 goto out_lclean_unref;
92 r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
93 if (r) {
94 DRM_ERROR("Failed to pin GTT object %d\n", i);
95 goto out_lclean_unres;
96 }
97
98 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
99 if (r) {
100 DRM_ERROR("Failed to map GTT object %d\n", i);
101 goto out_lclean_unpin;
102 }
103
104 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
105 gtt_start < gtt_end;
106 gtt_start++)
107 *gtt_start = gtt_start;
108
109 amdgpu_bo_kunmap(gtt_obj[i]);
110
111 r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
112 size, NULL, &fence);
113
114 if (r) {
115 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
116 goto out_lclean_unpin;
117 }
118
119 r = amdgpu_fence_wait(fence, false);
120 if (r) {
121 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
122 goto out_lclean_unpin;
123 }
124
125 amdgpu_fence_unref(&fence);
126
127 r = amdgpu_bo_kmap(vram_obj, &vram_map);
128 if (r) {
129 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
130 goto out_lclean_unpin;
131 }
132
133 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
134 vram_start = vram_map, vram_end = vram_map + size;
135 vram_start < vram_end;
136 gtt_start++, vram_start++) {
137 if (*vram_start != gtt_start) {
138 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
139 "expected 0x%p (GTT/VRAM offset "
140 "0x%16llx/0x%16llx)\n",
141 i, *vram_start, gtt_start,
142 (unsigned long long)
143 (gtt_addr - adev->mc.gtt_start +
144 (void*)gtt_start - gtt_map),
145 (unsigned long long)
146 (vram_addr - adev->mc.vram_start +
147 (void*)gtt_start - gtt_map));
148 amdgpu_bo_kunmap(vram_obj);
149 goto out_lclean_unpin;
150 }
151 *vram_start = vram_start;
152 }
153
154 amdgpu_bo_kunmap(vram_obj);
155
156 r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
157 size, NULL, &fence);
158
159 if (r) {
160 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
161 goto out_lclean_unpin;
162 }
163
164 r = amdgpu_fence_wait(fence, false);
165 if (r) {
166 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
167 goto out_lclean_unpin;
168 }
169
170 amdgpu_fence_unref(&fence);
171
172 r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
173 if (r) {
174 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
175 goto out_lclean_unpin;
176 }
177
178 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
179 vram_start = vram_map, vram_end = vram_map + size;
180 gtt_start < gtt_end;
181 gtt_start++, vram_start++) {
182 if (*gtt_start != vram_start) {
183 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
184 "expected 0x%p (VRAM/GTT offset "
185 "0x%16llx/0x%16llx)\n",
186 i, *gtt_start, vram_start,
187 (unsigned long long)
188 (vram_addr - adev->mc.vram_start +
189 (void*)vram_start - vram_map),
190 (unsigned long long)
191 (gtt_addr - adev->mc.gtt_start +
192 (void*)vram_start - vram_map));
193 amdgpu_bo_kunmap(gtt_obj[i]);
194 goto out_lclean_unpin;
195 }
196 }
197
198 amdgpu_bo_kunmap(gtt_obj[i]);
199
200 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
201 gtt_addr - adev->mc.gtt_start);
202 continue;
203
204out_lclean_unpin:
205 amdgpu_bo_unpin(gtt_obj[i]);
206out_lclean_unres:
207 amdgpu_bo_unreserve(gtt_obj[i]);
208out_lclean_unref:
209 amdgpu_bo_unref(&gtt_obj[i]);
210out_lclean:
211 for (--i; i >= 0; --i) {
212 amdgpu_bo_unpin(gtt_obj[i]);
213 amdgpu_bo_unreserve(gtt_obj[i]);
214 amdgpu_bo_unref(&gtt_obj[i]);
215 }
216 if (fence)
217 amdgpu_fence_unref(&fence);
218 break;
219 }
220
221 amdgpu_bo_unpin(vram_obj);
222out_unres:
223 amdgpu_bo_unreserve(vram_obj);
224out_unref:
225 amdgpu_bo_unref(&vram_obj);
226out_cleanup:
227 kfree(gtt_obj);
228 if (r) {
229 printk(KERN_WARNING "Error while testing BO move.\n");
230 }
231}
232
233void amdgpu_test_moves(struct amdgpu_device *adev)
234{
235 if (adev->mman.buffer_funcs)
236 amdgpu_do_test_moves(adev);
237}
238
239static int amdgpu_test_create_and_emit_fence(struct amdgpu_device *adev,
240 struct amdgpu_ring *ring,
241 struct amdgpu_fence **fence)
242{
243 uint32_t handle = ring->idx ^ 0xdeafbeef;
244 int r;
245
246 if (ring == &adev->uvd.ring) {
Chunming Zhou0e3f1542015-08-03 13:11:04 +0800247 struct fence *f = NULL;
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400248 r = amdgpu_uvd_get_create_msg(ring, handle, NULL);
249 if (r) {
250 DRM_ERROR("Failed to get dummy create msg\n");
251 return r;
252 }
253
Chunming Zhou0e3f1542015-08-03 13:11:04 +0800254 r = amdgpu_uvd_get_destroy_msg(ring, handle, &f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400255 if (r) {
256 DRM_ERROR("Failed to get dummy destroy msg\n");
257 return r;
258 }
Chunming Zhou0e3f1542015-08-03 13:11:04 +0800259 *fence = to_amdgpu_fence(f);
Alex Deucherd38ceaf2015-04-20 16:55:21 -0400260
261 } else if (ring == &adev->vce.ring[0] ||
262 ring == &adev->vce.ring[1]) {
263 r = amdgpu_vce_get_create_msg(ring, handle, NULL);
264 if (r) {
265 DRM_ERROR("Failed to get dummy create msg\n");
266 return r;
267 }
268
269 r = amdgpu_vce_get_destroy_msg(ring, handle, fence);
270 if (r) {
271 DRM_ERROR("Failed to get dummy destroy msg\n");
272 return r;
273 }
274
275 } else {
276 r = amdgpu_ring_lock(ring, 64);
277 if (r) {
278 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
279 return r;
280 }
281 amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence);
282 amdgpu_ring_unlock_commit(ring);
283 }
284 return 0;
285}
286
287void amdgpu_test_ring_sync(struct amdgpu_device *adev,
288 struct amdgpu_ring *ringA,
289 struct amdgpu_ring *ringB)
290{
291 struct amdgpu_fence *fence1 = NULL, *fence2 = NULL;
292 struct amdgpu_semaphore *semaphore = NULL;
293 int r;
294
295 r = amdgpu_semaphore_create(adev, &semaphore);
296 if (r) {
297 DRM_ERROR("Failed to create semaphore\n");
298 goto out_cleanup;
299 }
300
301 r = amdgpu_ring_lock(ringA, 64);
302 if (r) {
303 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
304 goto out_cleanup;
305 }
306 amdgpu_semaphore_emit_wait(ringA, semaphore);
307 amdgpu_ring_unlock_commit(ringA);
308
309 r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence1);
310 if (r)
311 goto out_cleanup;
312
313 r = amdgpu_ring_lock(ringA, 64);
314 if (r) {
315 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
316 goto out_cleanup;
317 }
318 amdgpu_semaphore_emit_wait(ringA, semaphore);
319 amdgpu_ring_unlock_commit(ringA);
320
321 r = amdgpu_test_create_and_emit_fence(adev, ringA, &fence2);
322 if (r)
323 goto out_cleanup;
324
325 mdelay(1000);
326
327 if (amdgpu_fence_signaled(fence1)) {
328 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
329 goto out_cleanup;
330 }
331
332 r = amdgpu_ring_lock(ringB, 64);
333 if (r) {
334 DRM_ERROR("Failed to lock ring B %p\n", ringB);
335 goto out_cleanup;
336 }
337 amdgpu_semaphore_emit_signal(ringB, semaphore);
338 amdgpu_ring_unlock_commit(ringB);
339
340 r = amdgpu_fence_wait(fence1, false);
341 if (r) {
342 DRM_ERROR("Failed to wait for sync fence 1\n");
343 goto out_cleanup;
344 }
345
346 mdelay(1000);
347
348 if (amdgpu_fence_signaled(fence2)) {
349 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
350 goto out_cleanup;
351 }
352
353 r = amdgpu_ring_lock(ringB, 64);
354 if (r) {
355 DRM_ERROR("Failed to lock ring B %p\n", ringB);
356 goto out_cleanup;
357 }
358 amdgpu_semaphore_emit_signal(ringB, semaphore);
359 amdgpu_ring_unlock_commit(ringB);
360
361 r = amdgpu_fence_wait(fence2, false);
362 if (r) {
363 DRM_ERROR("Failed to wait for sync fence 1\n");
364 goto out_cleanup;
365 }
366
367out_cleanup:
368 amdgpu_semaphore_free(adev, &semaphore, NULL);
369
370 if (fence1)
371 amdgpu_fence_unref(&fence1);
372
373 if (fence2)
374 amdgpu_fence_unref(&fence2);
375
376 if (r)
377 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
378}
379
380static void amdgpu_test_ring_sync2(struct amdgpu_device *adev,
381 struct amdgpu_ring *ringA,
382 struct amdgpu_ring *ringB,
383 struct amdgpu_ring *ringC)
384{
385 struct amdgpu_fence *fenceA = NULL, *fenceB = NULL;
386 struct amdgpu_semaphore *semaphore = NULL;
387 bool sigA, sigB;
388 int i, r;
389
390 r = amdgpu_semaphore_create(adev, &semaphore);
391 if (r) {
392 DRM_ERROR("Failed to create semaphore\n");
393 goto out_cleanup;
394 }
395
396 r = amdgpu_ring_lock(ringA, 64);
397 if (r) {
398 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
399 goto out_cleanup;
400 }
401 amdgpu_semaphore_emit_wait(ringA, semaphore);
402 amdgpu_ring_unlock_commit(ringA);
403
404 r = amdgpu_test_create_and_emit_fence(adev, ringA, &fenceA);
405 if (r)
406 goto out_cleanup;
407
408 r = amdgpu_ring_lock(ringB, 64);
409 if (r) {
410 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
411 goto out_cleanup;
412 }
413 amdgpu_semaphore_emit_wait(ringB, semaphore);
414 amdgpu_ring_unlock_commit(ringB);
415 r = amdgpu_test_create_and_emit_fence(adev, ringB, &fenceB);
416 if (r)
417 goto out_cleanup;
418
419 mdelay(1000);
420
421 if (amdgpu_fence_signaled(fenceA)) {
422 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
423 goto out_cleanup;
424 }
425 if (amdgpu_fence_signaled(fenceB)) {
426 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
427 goto out_cleanup;
428 }
429
430 r = amdgpu_ring_lock(ringC, 64);
431 if (r) {
432 DRM_ERROR("Failed to lock ring B %p\n", ringC);
433 goto out_cleanup;
434 }
435 amdgpu_semaphore_emit_signal(ringC, semaphore);
436 amdgpu_ring_unlock_commit(ringC);
437
438 for (i = 0; i < 30; ++i) {
439 mdelay(100);
440 sigA = amdgpu_fence_signaled(fenceA);
441 sigB = amdgpu_fence_signaled(fenceB);
442 if (sigA || sigB)
443 break;
444 }
445
446 if (!sigA && !sigB) {
447 DRM_ERROR("Neither fence A nor B has been signaled\n");
448 goto out_cleanup;
449 } else if (sigA && sigB) {
450 DRM_ERROR("Both fence A and B has been signaled\n");
451 goto out_cleanup;
452 }
453
454 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
455
456 r = amdgpu_ring_lock(ringC, 64);
457 if (r) {
458 DRM_ERROR("Failed to lock ring B %p\n", ringC);
459 goto out_cleanup;
460 }
461 amdgpu_semaphore_emit_signal(ringC, semaphore);
462 amdgpu_ring_unlock_commit(ringC);
463
464 mdelay(1000);
465
466 r = amdgpu_fence_wait(fenceA, false);
467 if (r) {
468 DRM_ERROR("Failed to wait for sync fence A\n");
469 goto out_cleanup;
470 }
471 r = amdgpu_fence_wait(fenceB, false);
472 if (r) {
473 DRM_ERROR("Failed to wait for sync fence B\n");
474 goto out_cleanup;
475 }
476
477out_cleanup:
478 amdgpu_semaphore_free(adev, &semaphore, NULL);
479
480 if (fenceA)
481 amdgpu_fence_unref(&fenceA);
482
483 if (fenceB)
484 amdgpu_fence_unref(&fenceB);
485
486 if (r)
487 printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
488}
489
490static bool amdgpu_test_sync_possible(struct amdgpu_ring *ringA,
491 struct amdgpu_ring *ringB)
492{
493 if (ringA == &ringA->adev->vce.ring[0] &&
494 ringB == &ringB->adev->vce.ring[1])
495 return false;
496
497 return true;
498}
499
500void amdgpu_test_syncing(struct amdgpu_device *adev)
501{
502 int i, j, k;
503
504 for (i = 1; i < AMDGPU_MAX_RINGS; ++i) {
505 struct amdgpu_ring *ringA = adev->rings[i];
506 if (!ringA || !ringA->ready)
507 continue;
508
509 for (j = 0; j < i; ++j) {
510 struct amdgpu_ring *ringB = adev->rings[j];
511 if (!ringB || !ringB->ready)
512 continue;
513
514 if (!amdgpu_test_sync_possible(ringA, ringB))
515 continue;
516
517 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
518 amdgpu_test_ring_sync(adev, ringA, ringB);
519
520 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
521 amdgpu_test_ring_sync(adev, ringB, ringA);
522
523 for (k = 0; k < j; ++k) {
524 struct amdgpu_ring *ringC = adev->rings[k];
525 if (!ringC || !ringC->ready)
526 continue;
527
528 if (!amdgpu_test_sync_possible(ringA, ringC))
529 continue;
530
531 if (!amdgpu_test_sync_possible(ringB, ringC))
532 continue;
533
534 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
535 amdgpu_test_ring_sync2(adev, ringA, ringB, ringC);
536
537 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
538 amdgpu_test_ring_sync2(adev, ringA, ringC, ringB);
539
540 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
541 amdgpu_test_ring_sync2(adev, ringB, ringA, ringC);
542
543 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
544 amdgpu_test_ring_sync2(adev, ringB, ringC, ringA);
545
546 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
547 amdgpu_test_ring_sync2(adev, ringC, ringA, ringB);
548
549 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
550 amdgpu_test_ring_sync2(adev, ringC, ringB, ringA);
551 }
552 }
553 }
554}