drm/ttm: make ttm reservation calls behave like reservation calls

This commit converts the source of the val_seq counter to
the ww_mutex api. The reservation objects are converted later,
because there is still a lockdep splat in nouveau that has to
resolved first.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a424949..7e3fef4 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -979,6 +979,7 @@
 	u32			cs_flags;
 	u32			ring;
 	s32			priority;
+	struct ww_acquire_ctx	ticket;
 };
 
 extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 4f6b22b..13a130f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -106,7 +106,7 @@
 		radeon_bo_list_add_object(&p->relocs[i].lobj,
 					  &p->validated);
 	}
-	return radeon_bo_list_validate(&p->validated, p->ring);
+	return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
 }
 
 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -314,15 +314,17 @@
  * If error is set than unvalidate buffer, otherwise just free memory
  * used by parsing context.
  **/
-static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
 {
 	unsigned i;
 
 	if (!error) {
-		ttm_eu_fence_buffer_objects(&parser->validated,
+		ttm_eu_fence_buffer_objects(&parser->ticket,
+					    &parser->validated,
 					    parser->ib.fence);
-	} else {
-		ttm_eu_backoff_reservation(&parser->validated);
+	} else if (backoff) {
+		ttm_eu_backoff_reservation(&parser->ticket,
+					   &parser->validated);
 	}
 
 	if (parser->relocs != NULL) {
@@ -535,7 +537,7 @@
 	r = radeon_cs_parser_init(&parser, data);
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
-		radeon_cs_parser_fini(&parser, r);
+		radeon_cs_parser_fini(&parser, r, false);
 		up_read(&rdev->exclusive_lock);
 		r = radeon_cs_handle_lockup(rdev, r);
 		return r;
@@ -544,7 +546,7 @@
 	if (r) {
 		if (r != -ERESTARTSYS)
 			DRM_ERROR("Failed to parse relocation %d!\n", r);
-		radeon_cs_parser_fini(&parser, r);
+		radeon_cs_parser_fini(&parser, r, false);
 		up_read(&rdev->exclusive_lock);
 		r = radeon_cs_handle_lockup(rdev, r);
 		return r;
@@ -563,7 +565,7 @@
 		goto out;
 	}
 out:
-	radeon_cs_parser_fini(&parser, r);
+	radeon_cs_parser_fini(&parser, r, true);
 	up_read(&rdev->exclusive_lock);
 	r = radeon_cs_handle_lockup(rdev, r);
 	return r;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 07af5a9..71287bb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -349,14 +349,15 @@
 	}
 }
 
-int radeon_bo_list_validate(struct list_head *head, int ring)
+int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
+			    struct list_head *head, int ring)
 {
 	struct radeon_bo_list *lobj;
 	struct radeon_bo *bo;
 	u32 domain;
 	int r;
 
-	r = ttm_eu_reserve_buffers(head);
+	r = ttm_eu_reserve_buffers(ticket, head);
 	if (unlikely(r != 0)) {
 		return r;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index e2cb80a..3e62a3a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -128,7 +128,8 @@
 extern void radeon_bo_fini(struct radeon_device *rdev);
 extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
 				struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, int ring);
+extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
+				   struct list_head *head, int ring);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 				struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index ce5a10c..41efcec 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -550,6 +550,7 @@
 			       struct radeon_fence **fence)
 {
 	struct ttm_validate_buffer tv;
+	struct ww_acquire_ctx ticket;
 	struct list_head head;
 	struct radeon_ib ib;
 	uint64_t addr;
@@ -561,7 +562,7 @@
 	INIT_LIST_HEAD(&head);
 	list_add(&tv.head, &head);
 
-	r = ttm_eu_reserve_buffers(&head);
+	r = ttm_eu_reserve_buffers(&ticket, &head);
 	if (r)
 		return r;
 
@@ -569,16 +570,12 @@
 	radeon_uvd_force_into_uvd_segment(bo);
 
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
-	if (r) {
-		ttm_eu_backoff_reservation(&head);
-		return r;
-	}
+	if (r) 
+		goto err;
 
 	r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
-	if (r) {
-		ttm_eu_backoff_reservation(&head);
-		return r;
-	}
+	if (r)
+		goto err;
 
 	addr = radeon_bo_gpu_offset(bo);
 	ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
@@ -592,11 +589,9 @@
 	ib.length_dw = 16;
 
 	r = radeon_ib_schedule(rdev, &ib, NULL);
-	if (r) {
-		ttm_eu_backoff_reservation(&head);
-		return r;
-	}
-	ttm_eu_fence_buffer_objects(&head, ib.fence);
+	if (r)
+		goto err;
+	ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
 
 	if (fence)
 		*fence = radeon_fence_ref(ib.fence);
@@ -604,6 +599,10 @@
 	radeon_ib_free(rdev, &ib);
 	radeon_bo_unref(&bo);
 	return 0;
+
+err:
+	ttm_eu_backoff_reservation(&ticket, &head);
+	return r;
 }
 
 /* multiple fence commands without any stream commands in between can