blob: f5e9abfadb560879ad98e889987e031ac9f1379b [file] [log] [blame]
Michel Dänzerecc0b322009-07-21 11:23:57 +02001/*
2 * Copyright 2009 VMware, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Michel Dänzer
23 */
24#include <drm/drmP.h>
25#include <drm/radeon_drm.h>
26#include "radeon_reg.h"
27#include "radeon.h"
28
Alex Deucher009ee7a2012-06-04 18:45:15 -040029#define RADEON_TEST_COPY_BLIT 1
30#define RADEON_TEST_COPY_DMA 0
31
Michel Dänzerecc0b322009-07-21 11:23:57 +020032
33/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
Alex Deucher009ee7a2012-06-04 18:45:15 -040034static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
Michel Dänzerecc0b322009-07-21 11:23:57 +020035{
Jerome Glisse4c788672009-11-20 14:29:23 +010036 struct radeon_bo *vram_obj = NULL;
37 struct radeon_bo **gtt_obj = NULL;
Michel Dänzerecc0b322009-07-21 11:23:57 +020038 uint64_t gtt_addr, vram_addr;
Dan Carpenter89cd67b2013-07-01 19:39:34 +030039 unsigned n, size;
40 int i, r, ring;
Alex Deucher009ee7a2012-06-04 18:45:15 -040041
42 switch (flag) {
43 case RADEON_TEST_COPY_DMA:
44 ring = radeon_copy_dma_ring_index(rdev);
45 break;
46 case RADEON_TEST_COPY_BLIT:
47 ring = radeon_copy_blit_ring_index(rdev);
48 break;
49 default:
50 DRM_ERROR("Unknown copy method\n");
51 return;
52 }
Michel Dänzerecc0b322009-07-21 11:23:57 +020053
54 size = 1024 * 1024;
55
56 /* Number of tests =
Michel Dänzer24cae9e2011-08-19 15:24:16 +000057 * (Total GTT - IB pool - writeback page - ring buffers) / test size
Michel Dänzerecc0b322009-07-21 11:23:57 +020058 */
Alex Deucher2c6316c2014-07-17 12:20:32 -040059 n = rdev->mc.gtt_size - rdev->gart_pin_size;
Michel Dänzer24cae9e2011-08-19 15:24:16 +000060 n /= size;
Michel Dänzerecc0b322009-07-21 11:23:57 +020061
62 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
63 if (!gtt_obj) {
64 DRM_ERROR("Failed to allocate %d pointers\n", n);
65 r = 1;
66 goto out_cleanup;
67 }
68
Daniel Vetter441921d2011-02-18 17:59:16 +010069 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
Maarten Lankhorst831b6962014-09-18 14:11:56 +020070 0, NULL, NULL, &vram_obj);
Michel Dänzerecc0b322009-07-21 11:23:57 +020071 if (r) {
72 DRM_ERROR("Failed to create VRAM object\n");
73 goto out_cleanup;
74 }
Jerome Glisse4c788672009-11-20 14:29:23 +010075 r = radeon_bo_reserve(vram_obj, false);
76 if (unlikely(r != 0))
Maarten Lankhorst977c38d2013-06-27 13:48:26 +020077 goto out_unref;
Jerome Glisse4c788672009-11-20 14:29:23 +010078 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
Michel Dänzerecc0b322009-07-21 11:23:57 +020079 if (r) {
80 DRM_ERROR("Failed to pin VRAM object\n");
Maarten Lankhorst977c38d2013-06-27 13:48:26 +020081 goto out_unres;
Michel Dänzerecc0b322009-07-21 11:23:57 +020082 }
Michel Dänzerecc0b322009-07-21 11:23:57 +020083 for (i = 0; i < n; i++) {
84 void *gtt_map, *vram_map;
85 void **gtt_start, **gtt_end;
86 void **vram_start, **vram_end;
Maarten Lankhorst977c38d2013-06-27 13:48:26 +020087 struct radeon_fence *fence = NULL;
Michel Dänzerecc0b322009-07-21 11:23:57 +020088
Daniel Vetter441921d2011-02-18 17:59:16 +010089 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
Maarten Lankhorst831b6962014-09-18 14:11:56 +020090 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
91 gtt_obj + i);
Michel Dänzerecc0b322009-07-21 11:23:57 +020092 if (r) {
93 DRM_ERROR("Failed to create GTT object %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +020094 goto out_lclean;
Michel Dänzerecc0b322009-07-21 11:23:57 +020095 }
96
Jerome Glisse4c788672009-11-20 14:29:23 +010097 r = radeon_bo_reserve(gtt_obj[i], false);
98 if (unlikely(r != 0))
Maarten Lankhorst977c38d2013-06-27 13:48:26 +020099 goto out_lclean_unref;
Jerome Glisse4c788672009-11-20 14:29:23 +0100100 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200101 if (r) {
102 DRM_ERROR("Failed to pin GTT object %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200103 goto out_lclean_unres;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200104 }
105
Jerome Glisse4c788672009-11-20 14:29:23 +0100106 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200107 if (r) {
108 DRM_ERROR("Failed to map GTT object %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200109 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200110 }
111
112 for (gtt_start = gtt_map, gtt_end = gtt_map + size;
113 gtt_start < gtt_end;
114 gtt_start++)
115 *gtt_start = gtt_start;
116
Jerome Glisse4c788672009-11-20 14:29:23 +0100117 radeon_bo_kunmap(gtt_obj[i]);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200118
Alex Deucher009ee7a2012-06-04 18:45:15 -0400119 if (ring == R600_RING_TYPE_DMA_INDEX)
Christian König57d20a42014-09-04 20:01:53 +0200120 fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
121 size / RADEON_GPU_PAGE_SIZE,
Ilija Hadzic92b712b2015-01-30 00:38:44 -0500122 vram_obj->tbo.resv);
Alex Deucher009ee7a2012-06-04 18:45:15 -0400123 else
Christian König57d20a42014-09-04 20:01:53 +0200124 fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
125 size / RADEON_GPU_PAGE_SIZE,
Ilija Hadzic92b712b2015-01-30 00:38:44 -0500126 vram_obj->tbo.resv);
Christian König57d20a42014-09-04 20:01:53 +0200127 if (IS_ERR(fence)) {
Michel Dänzerecc0b322009-07-21 11:23:57 +0200128 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
Christian König57d20a42014-09-04 20:01:53 +0200129 r = PTR_ERR(fence);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200130 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200131 }
132
133 r = radeon_fence_wait(fence, false);
134 if (r) {
135 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200136 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200137 }
138
139 radeon_fence_unref(&fence);
140
Jerome Glisse4c788672009-11-20 14:29:23 +0100141 r = radeon_bo_kmap(vram_obj, &vram_map);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200142 if (r) {
143 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200144 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200145 }
146
147 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
148 vram_start = vram_map, vram_end = vram_map + size;
149 vram_start < vram_end;
150 gtt_start++, vram_start++) {
151 if (*vram_start != gtt_start) {
152 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
Michel Dänzer4fb1a352011-08-19 15:24:17 +0000153 "expected 0x%p (GTT/VRAM offset "
154 "0x%16llx/0x%16llx)\n",
155 i, *vram_start, gtt_start,
156 (unsigned long long)
157 (gtt_addr - rdev->mc.gtt_start +
158 (void*)gtt_start - gtt_map),
159 (unsigned long long)
160 (vram_addr - rdev->mc.vram_start +
161 (void*)gtt_start - gtt_map));
Jerome Glisse4c788672009-11-20 14:29:23 +0100162 radeon_bo_kunmap(vram_obj);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200163 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200164 }
165 *vram_start = vram_start;
166 }
167
Jerome Glisse4c788672009-11-20 14:29:23 +0100168 radeon_bo_kunmap(vram_obj);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200169
Alex Deucher009ee7a2012-06-04 18:45:15 -0400170 if (ring == R600_RING_TYPE_DMA_INDEX)
Christian König57d20a42014-09-04 20:01:53 +0200171 fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
172 size / RADEON_GPU_PAGE_SIZE,
Ilija Hadzic92b712b2015-01-30 00:38:44 -0500173 vram_obj->tbo.resv);
Alex Deucher009ee7a2012-06-04 18:45:15 -0400174 else
Christian König57d20a42014-09-04 20:01:53 +0200175 fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
176 size / RADEON_GPU_PAGE_SIZE,
Ilija Hadzic92b712b2015-01-30 00:38:44 -0500177 vram_obj->tbo.resv);
Christian König57d20a42014-09-04 20:01:53 +0200178 if (IS_ERR(fence)) {
Michel Dänzerecc0b322009-07-21 11:23:57 +0200179 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
Christian König57d20a42014-09-04 20:01:53 +0200180 r = PTR_ERR(fence);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200181 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200182 }
183
184 r = radeon_fence_wait(fence, false);
185 if (r) {
186 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200187 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200188 }
189
190 radeon_fence_unref(&fence);
191
Jerome Glisse4c788672009-11-20 14:29:23 +0100192 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200193 if (r) {
194 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200195 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200196 }
197
198 for (gtt_start = gtt_map, gtt_end = gtt_map + size,
199 vram_start = vram_map, vram_end = vram_map + size;
200 gtt_start < gtt_end;
201 gtt_start++, vram_start++) {
202 if (*gtt_start != vram_start) {
203 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
Michel Dänzer4fb1a352011-08-19 15:24:17 +0000204 "expected 0x%p (VRAM/GTT offset "
205 "0x%16llx/0x%16llx)\n",
206 i, *gtt_start, vram_start,
207 (unsigned long long)
208 (vram_addr - rdev->mc.vram_start +
209 (void*)vram_start - vram_map),
210 (unsigned long long)
211 (gtt_addr - rdev->mc.gtt_start +
212 (void*)vram_start - vram_map));
Jerome Glisse4c788672009-11-20 14:29:23 +0100213 radeon_bo_kunmap(gtt_obj[i]);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200214 goto out_lclean_unpin;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200215 }
216 }
217
Jerome Glisse4c788672009-11-20 14:29:23 +0100218 radeon_bo_kunmap(gtt_obj[i]);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200219
220 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
Jerome Glissed594e462010-02-17 21:54:29 +0000221 gtt_addr - rdev->mc.gtt_start);
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200222 continue;
223
224out_lclean_unpin:
225 radeon_bo_unpin(gtt_obj[i]);
226out_lclean_unres:
227 radeon_bo_unreserve(gtt_obj[i]);
228out_lclean_unref:
229 radeon_bo_unref(&gtt_obj[i]);
230out_lclean:
231 for (--i; i >= 0; --i) {
232 radeon_bo_unpin(gtt_obj[i]);
233 radeon_bo_unreserve(gtt_obj[i]);
234 radeon_bo_unref(&gtt_obj[i]);
235 }
Christian König57d20a42014-09-04 20:01:53 +0200236 if (fence && !IS_ERR(fence))
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200237 radeon_fence_unref(&fence);
238 break;
Michel Dänzerecc0b322009-07-21 11:23:57 +0200239 }
240
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200241 radeon_bo_unpin(vram_obj);
242out_unres:
243 radeon_bo_unreserve(vram_obj);
244out_unref:
245 radeon_bo_unref(&vram_obj);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200246out_cleanup:
Maarten Lankhorst977c38d2013-06-27 13:48:26 +0200247 kfree(gtt_obj);
Michel Dänzerecc0b322009-07-21 11:23:57 +0200248 if (r) {
Joe Perches7ca85292017-02-28 04:55:52 -0800249 pr_warn("Error while testing BO move\n");
Michel Dänzerecc0b322009-07-21 11:23:57 +0200250 }
251}
Christian König60a7e392011-09-27 12:31:00 +0200252
Alex Deucher009ee7a2012-06-04 18:45:15 -0400253void radeon_test_moves(struct radeon_device *rdev)
254{
255 if (rdev->asic->copy.dma)
256 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
257 if (rdev->asic->copy.blit)
258 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
259}
260
Christian Königf2ba57b2013-04-08 12:41:29 +0200261static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
262 struct radeon_ring *ring,
263 struct radeon_fence **fence)
264{
Christian Königd93f7932013-05-23 12:10:04 +0200265 uint32_t handle = ring->idx ^ 0xdeafbeef;
Christian Königf2ba57b2013-04-08 12:41:29 +0200266 int r;
267
268 if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
Christian Königd93f7932013-05-23 12:10:04 +0200269 r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
Christian Königf2ba57b2013-04-08 12:41:29 +0200270 if (r) {
271 DRM_ERROR("Failed to get dummy create msg\n");
272 return r;
273 }
274
Christian Königd93f7932013-05-23 12:10:04 +0200275 r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
Christian Königf2ba57b2013-04-08 12:41:29 +0200276 if (r) {
277 DRM_ERROR("Failed to get dummy destroy msg\n");
278 return r;
279 }
Christian Königd93f7932013-05-23 12:10:04 +0200280
281 } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
282 ring->idx == TN_RING_TYPE_VCE2_INDEX) {
283 r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
284 if (r) {
285 DRM_ERROR("Failed to get dummy create msg\n");
286 return r;
287 }
288
289 r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
290 if (r) {
291 DRM_ERROR("Failed to get dummy destroy msg\n");
292 return r;
293 }
294
Christian Königf2ba57b2013-04-08 12:41:29 +0200295 } else {
296 r = radeon_ring_lock(rdev, ring, 64);
297 if (r) {
298 DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
299 return r;
300 }
Pan Bian2f2429c2017-04-24 16:45:51 +0800301 r = radeon_fence_emit(rdev, fence, ring->idx);
302 if (r) {
303 DRM_ERROR("Failed to emit fence\n");
304 radeon_ring_unlock_undo(rdev, ring);
305 return r;
306 }
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900307 radeon_ring_unlock_commit(rdev, ring, false);
Christian Königf2ba57b2013-04-08 12:41:29 +0200308 }
309 return 0;
310}
311
Christian König60a7e392011-09-27 12:31:00 +0200312void radeon_test_ring_sync(struct radeon_device *rdev,
Christian Könige32eb502011-10-23 12:56:27 +0200313 struct radeon_ring *ringA,
314 struct radeon_ring *ringB)
Christian König60a7e392011-09-27 12:31:00 +0200315{
Christian Königce954882011-11-17 15:22:44 +0100316 struct radeon_fence *fence1 = NULL, *fence2 = NULL;
Christian König60a7e392011-09-27 12:31:00 +0200317 struct radeon_semaphore *semaphore = NULL;
Christian König60a7e392011-09-27 12:31:00 +0200318 int r;
319
Christian König60a7e392011-09-27 12:31:00 +0200320 r = radeon_semaphore_create(rdev, &semaphore);
321 if (r) {
322 DRM_ERROR("Failed to create semaphore\n");
323 goto out_cleanup;
324 }
325
Christian Könige32eb502011-10-23 12:56:27 +0200326 r = radeon_ring_lock(rdev, ringA, 64);
Christian König60a7e392011-09-27 12:31:00 +0200327 if (r) {
Alex Deucher8b25ed32012-07-17 14:02:30 -0400328 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
Christian König60a7e392011-09-27 12:31:00 +0200329 goto out_cleanup;
330 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400331 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900332 radeon_ring_unlock_commit(rdev, ringA, false);
Christian Königf2ba57b2013-04-08 12:41:29 +0200333
334 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
335 if (r)
336 goto out_cleanup;
337
338 r = radeon_ring_lock(rdev, ringA, 64);
Christian König876dc9f2012-05-08 14:24:01 +0200339 if (r) {
Christian Königf2ba57b2013-04-08 12:41:29 +0200340 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
Christian König876dc9f2012-05-08 14:24:01 +0200341 goto out_cleanup;
342 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400343 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900344 radeon_ring_unlock_commit(rdev, ringA, false);
Christian König60a7e392011-09-27 12:31:00 +0200345
Christian Königf2ba57b2013-04-08 12:41:29 +0200346 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
347 if (r)
348 goto out_cleanup;
349
Christian König60a7e392011-09-27 12:31:00 +0200350 mdelay(1000);
351
Christian Königce954882011-11-17 15:22:44 +0100352 if (radeon_fence_signaled(fence1)) {
353 DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
Christian König60a7e392011-09-27 12:31:00 +0200354 goto out_cleanup;
355 }
356
Christian Könige32eb502011-10-23 12:56:27 +0200357 r = radeon_ring_lock(rdev, ringB, 64);
Christian König60a7e392011-09-27 12:31:00 +0200358 if (r) {
Christian Könige32eb502011-10-23 12:56:27 +0200359 DRM_ERROR("Failed to lock ring B %p\n", ringB);
Christian König60a7e392011-09-27 12:31:00 +0200360 goto out_cleanup;
361 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400362 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900363 radeon_ring_unlock_commit(rdev, ringB, false);
Christian König60a7e392011-09-27 12:31:00 +0200364
Christian Königce954882011-11-17 15:22:44 +0100365 r = radeon_fence_wait(fence1, false);
Christian König60a7e392011-09-27 12:31:00 +0200366 if (r) {
Christian Königce954882011-11-17 15:22:44 +0100367 DRM_ERROR("Failed to wait for sync fence 1\n");
Christian König60a7e392011-09-27 12:31:00 +0200368 goto out_cleanup;
369 }
370
Christian Königce954882011-11-17 15:22:44 +0100371 mdelay(1000);
372
373 if (radeon_fence_signaled(fence2)) {
374 DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
375 goto out_cleanup;
376 }
377
378 r = radeon_ring_lock(rdev, ringB, 64);
379 if (r) {
380 DRM_ERROR("Failed to lock ring B %p\n", ringB);
381 goto out_cleanup;
382 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400383 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900384 radeon_ring_unlock_commit(rdev, ringB, false);
Christian Königce954882011-11-17 15:22:44 +0100385
386 r = radeon_fence_wait(fence2, false);
387 if (r) {
388 DRM_ERROR("Failed to wait for sync fence 1\n");
389 goto out_cleanup;
390 }
Christian König60a7e392011-09-27 12:31:00 +0200391
392out_cleanup:
Christian König220907d2012-05-10 16:46:43 +0200393 radeon_semaphore_free(rdev, &semaphore, NULL);
Christian König60a7e392011-09-27 12:31:00 +0200394
Christian Königce954882011-11-17 15:22:44 +0100395 if (fence1)
396 radeon_fence_unref(&fence1);
397
398 if (fence2)
399 radeon_fence_unref(&fence2);
400
401 if (r)
Joe Perches7ca85292017-02-28 04:55:52 -0800402 pr_warn("Error while testing ring sync (%d)\n", r);
Christian Königce954882011-11-17 15:22:44 +0100403}
404
Lauri Kasanen1109ca02012-08-31 13:43:50 -0400405static void radeon_test_ring_sync2(struct radeon_device *rdev,
Christian Königce954882011-11-17 15:22:44 +0100406 struct radeon_ring *ringA,
407 struct radeon_ring *ringB,
408 struct radeon_ring *ringC)
409{
410 struct radeon_fence *fenceA = NULL, *fenceB = NULL;
411 struct radeon_semaphore *semaphore = NULL;
Christian Königce954882011-11-17 15:22:44 +0100412 bool sigA, sigB;
413 int i, r;
414
Christian Königce954882011-11-17 15:22:44 +0100415 r = radeon_semaphore_create(rdev, &semaphore);
416 if (r) {
417 DRM_ERROR("Failed to create semaphore\n");
418 goto out_cleanup;
419 }
420
421 r = radeon_ring_lock(rdev, ringA, 64);
422 if (r) {
Alex Deucher8b25ed32012-07-17 14:02:30 -0400423 DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
Christian Königce954882011-11-17 15:22:44 +0100424 goto out_cleanup;
425 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400426 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900427 radeon_ring_unlock_commit(rdev, ringA, false);
Christian Königce954882011-11-17 15:22:44 +0100428
Christian Königf2ba57b2013-04-08 12:41:29 +0200429 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
430 if (r)
431 goto out_cleanup;
432
Christian Königce954882011-11-17 15:22:44 +0100433 r = radeon_ring_lock(rdev, ringB, 64);
434 if (r) {
Alex Deucher8b25ed32012-07-17 14:02:30 -0400435 DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
Christian Königce954882011-11-17 15:22:44 +0100436 goto out_cleanup;
437 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400438 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900439 radeon_ring_unlock_commit(rdev, ringB, false);
Christian Königf2ba57b2013-04-08 12:41:29 +0200440 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
441 if (r)
442 goto out_cleanup;
Christian Königce954882011-11-17 15:22:44 +0100443
444 mdelay(1000);
445
446 if (radeon_fence_signaled(fenceA)) {
447 DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
448 goto out_cleanup;
449 }
450 if (radeon_fence_signaled(fenceB)) {
Christian Königf2ba57b2013-04-08 12:41:29 +0200451 DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
Christian Königce954882011-11-17 15:22:44 +0100452 goto out_cleanup;
453 }
454
455 r = radeon_ring_lock(rdev, ringC, 64);
456 if (r) {
457 DRM_ERROR("Failed to lock ring B %p\n", ringC);
458 goto out_cleanup;
459 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400460 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900461 radeon_ring_unlock_commit(rdev, ringC, false);
Christian Königce954882011-11-17 15:22:44 +0100462
463 for (i = 0; i < 30; ++i) {
464 mdelay(100);
465 sigA = radeon_fence_signaled(fenceA);
466 sigB = radeon_fence_signaled(fenceB);
467 if (sigA || sigB)
468 break;
469 }
470
471 if (!sigA && !sigB) {
472 DRM_ERROR("Neither fence A nor B has been signaled\n");
473 goto out_cleanup;
474 } else if (sigA && sigB) {
475 DRM_ERROR("Both fence A and B has been signaled\n");
476 goto out_cleanup;
477 }
478
479 DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
480
481 r = radeon_ring_lock(rdev, ringC, 64);
482 if (r) {
483 DRM_ERROR("Failed to lock ring B %p\n", ringC);
484 goto out_cleanup;
485 }
Alex Deucher8b25ed32012-07-17 14:02:30 -0400486 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
Michel Dänzer1538a9e2014-08-18 17:34:55 +0900487 radeon_ring_unlock_commit(rdev, ringC, false);
Christian Königce954882011-11-17 15:22:44 +0100488
489 mdelay(1000);
490
491 r = radeon_fence_wait(fenceA, false);
492 if (r) {
493 DRM_ERROR("Failed to wait for sync fence A\n");
494 goto out_cleanup;
495 }
496 r = radeon_fence_wait(fenceB, false);
497 if (r) {
498 DRM_ERROR("Failed to wait for sync fence B\n");
499 goto out_cleanup;
500 }
501
502out_cleanup:
Christian König220907d2012-05-10 16:46:43 +0200503 radeon_semaphore_free(rdev, &semaphore, NULL);
Christian Königce954882011-11-17 15:22:44 +0100504
505 if (fenceA)
506 radeon_fence_unref(&fenceA);
507
508 if (fenceB)
509 radeon_fence_unref(&fenceB);
Christian König60a7e392011-09-27 12:31:00 +0200510
511 if (r)
Joe Perches7ca85292017-02-28 04:55:52 -0800512 pr_warn("Error while testing ring sync (%d)\n", r);
Christian König60a7e392011-09-27 12:31:00 +0200513}
514
Christian Königd93f7932013-05-23 12:10:04 +0200515static bool radeon_test_sync_possible(struct radeon_ring *ringA,
516 struct radeon_ring *ringB)
517{
518 if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
519 ringB->idx == TN_RING_TYPE_VCE1_INDEX)
520 return false;
521
522 return true;
523}
524
Christian König60a7e392011-09-27 12:31:00 +0200525void radeon_test_syncing(struct radeon_device *rdev)
526{
Christian Königce954882011-11-17 15:22:44 +0100527 int i, j, k;
Christian König60a7e392011-09-27 12:31:00 +0200528
529 for (i = 1; i < RADEON_NUM_RINGS; ++i) {
Christian Könige32eb502011-10-23 12:56:27 +0200530 struct radeon_ring *ringA = &rdev->ring[i];
531 if (!ringA->ready)
Christian König60a7e392011-09-27 12:31:00 +0200532 continue;
533
534 for (j = 0; j < i; ++j) {
Christian Könige32eb502011-10-23 12:56:27 +0200535 struct radeon_ring *ringB = &rdev->ring[j];
536 if (!ringB->ready)
Christian König60a7e392011-09-27 12:31:00 +0200537 continue;
538
Christian Königd93f7932013-05-23 12:10:04 +0200539 if (!radeon_test_sync_possible(ringA, ringB))
540 continue;
541
Christian Königce954882011-11-17 15:22:44 +0100542 DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
Christian Könige32eb502011-10-23 12:56:27 +0200543 radeon_test_ring_sync(rdev, ringA, ringB);
Christian König60a7e392011-09-27 12:31:00 +0200544
Christian Königce954882011-11-17 15:22:44 +0100545 DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
Christian Könige32eb502011-10-23 12:56:27 +0200546 radeon_test_ring_sync(rdev, ringB, ringA);
Christian Königce954882011-11-17 15:22:44 +0100547
548 for (k = 0; k < j; ++k) {
549 struct radeon_ring *ringC = &rdev->ring[k];
Alex Deucher1f2e1242012-01-05 10:02:42 +0000550 if (!ringC->ready)
551 continue;
Christian Königce954882011-11-17 15:22:44 +0100552
Christian Königd93f7932013-05-23 12:10:04 +0200553 if (!radeon_test_sync_possible(ringA, ringC))
554 continue;
555
556 if (!radeon_test_sync_possible(ringB, ringC))
557 continue;
558
Christian Königce954882011-11-17 15:22:44 +0100559 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
560 radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
561
562 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
563 radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
564
565 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
566 radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
567
568 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
569 radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
570
571 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
572 radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
573
574 DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
575 radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
576 }
Christian König60a7e392011-09-27 12:31:00 +0200577 }
578 }
579}