drm/msm/sde: Avoid same work queue to multiple workers
In vblank enable, same work is queued to different display
thread workers based on CRTC. This results in kernel
warnings as same work can't be used with >1 worker. Add
changes to fix this by using multiple works.
Change-Id: I39995bbbdf754d2cfb2a8e50d3354eccf77f7132
Signed-off-by: Jayant Shekhar <jshekhar@codeaurora.org>
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0f565d3..c564a09 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -201,62 +201,46 @@ u32 msm_readl(const void __iomem *addr)
return val;
}
-struct vblank_event {
- struct list_head node;
+struct vblank_work {
+ struct kthread_work work;
int crtc_id;
bool enable;
+ struct msm_drm_private *priv;
};
static void vblank_ctrl_worker(struct kthread_work *work)
{
- struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
- struct msm_vblank_ctrl, work);
- struct msm_drm_private *priv = container_of(vbl_ctrl,
- struct msm_drm_private, vblank_ctrl);
+ struct vblank_work *cur_work = container_of(work,
+ struct vblank_work, work);
+ struct msm_drm_private *priv = cur_work->priv;
struct msm_kms *kms = priv->kms;
- struct vblank_event *vbl_ev, *tmp;
- unsigned long flags;
- LIST_HEAD(tmp_head);
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- list_add_tail(&vbl_ev->node, &tmp_head);
- }
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ if (cur_work->enable)
+ kms->funcs->enable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
+ else
+ kms->funcs->disable_vblank(kms, priv->crtcs[cur_work->crtc_id]);
- list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
- if (vbl_ev->enable)
- kms->funcs->enable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
- else
- kms->funcs->disable_vblank(kms,
- priv->crtcs[vbl_ev->crtc_id]);
-
- kfree(vbl_ev);
- }
+ kfree(cur_work);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
int crtc_id, bool enable)
{
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev;
- unsigned long flags;
+ struct vblank_work *cur_work;
- vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
- if (!vbl_ev)
+ if (!priv || crtc_id >= priv->num_crtcs)
+ return -EINVAL;
+
+ cur_work = kzalloc(sizeof(*cur_work), GFP_ATOMIC);
+ if (!cur_work)
return -ENOMEM;
- vbl_ev->crtc_id = crtc_id;
- vbl_ev->enable = enable;
+ kthread_init_work(&cur_work->work, vblank_ctrl_worker);
+ cur_work->crtc_id = crtc_id;
+ cur_work->enable = enable;
+ cur_work->priv = priv;
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
- list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
- kthread_queue_work(&priv->disp_thread[crtc_id].worker,
- &vbl_ctrl->work);
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker, &cur_work->work);
return 0;
}
@@ -268,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
- struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
- struct vblank_event *vbl_ev, *tmp;
int i;
- /* We must cancel and cleanup any pending vblank enable/disable
- * work before drm_irq_uninstall() to avoid work re-enabling an
- * irq after uninstall has disabled it.
- */
- kthread_flush_work(&vbl_ctrl->work);
- list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
- list_del(&vbl_ev->node);
- kfree(vbl_ev);
- }
-
/* clean up display commit/event worker threads */
for (i = 0; i < priv->num_crtcs; i++) {
if (priv->disp_thread[i].thread) {
@@ -522,9 +494,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
INIT_LIST_HEAD(&priv->client_event_list);
INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
- spin_lock_init(&priv->vblank_ctrl.lock);
ret = sde_power_resource_init(pdev, &priv->phandle);
if (ret) {