drm/msm: introduce msm_fence_context
Better encapsulate the per-timeline stuff into fence-context. For now
there is just a single fence-context, but eventually we'll also have one
per-CRTC to enable fully explicit fencing.
Signed-off-by: Rob Clark <robdclark@gmail.com>
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index af007ac..6c3f67b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -49,6 +49,7 @@
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct msm_fence_context;
struct msm_fence_cb;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -101,9 +102,6 @@
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
- wait_queue_head_t fence_event;
-
struct msm_rd_state *rd;
struct msm_perf_state *perf;
@@ -112,9 +110,6 @@
struct workqueue_struct *wq;
- /* callbacks deferred until bo is inactive: */
- struct list_head fence_cbs;
-
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
@@ -194,8 +189,6 @@
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
- struct msm_fence_cb *cb);
void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);