drm/radeon: add the infrastructure for concurrent buffer access

This allows us to specify if we want to sync to
the shared fences of a reservation object or not.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 62d1f4d..eca2ce6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -233,6 +233,7 @@
 	struct radeon_device *rdev;
 	uint64_t old_start, new_start;
 	struct radeon_fence *fence;
+	unsigned num_pages;
 	int r, ridx;
 
 	rdev = radeon_get_rdev(bo->bdev);
@@ -269,12 +270,11 @@
 
 	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 
-	/* sync other rings */
-	fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv);
-	r = radeon_copy(rdev, old_start, new_start,
-			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
-			&fence);
-	/* FIXME: handle copy error */
+	num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
+	fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+	if (IS_ERR(fence))
+		return PTR_ERR(fence);
+
 	r = ttm_bo_move_accel_cleanup(bo, &fence->base,
 				      evict, no_wait_gpu, new_mem);
 	radeon_fence_unref(&fence);