drm/msm: fix potential deadlock in gpu init
Somewhere along the way, the firmware loader sprouted another lock
dependency, resulting in possible deadlock scenario:
&dev->struct_mutex --> &sb->s_type->i_mutex_key#2 --> &mm->mmap_sem
which is problematic vs things like gem mmap.
So introduce a separate mutex to synchronize gpu init.
Signed-off-by: Rob Clark <robdclark@gmail.com>
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 76c1df7..655ce5b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -95,7 +95,7 @@
DBG("%s", gpu->name);
- ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
@@ -370,8 +370,10 @@
return ret;
}
+ mutex_lock(&drm->struct_mutex);
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->memptrs_bo)) {
ret = PTR_ERR(gpu->memptrs_bo);
gpu->memptrs_bo = NULL;
@@ -379,13 +381,13 @@
return ret;
}
- gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
if (!gpu->memptrs) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
- ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
&gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index a2f5bf6..b447c01 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -181,7 +181,6 @@
struct msm_kms *kms;
int ret;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev->dev, "failed to allocate private data\n");
@@ -314,13 +313,15 @@
static void load_gpu(struct drm_device *dev)
{
+ static DEFINE_MUTEX(init_lock);
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu;
- if (priv->gpu)
- return;
+ mutex_lock(&init_lock);
- mutex_lock(&dev->struct_mutex);
+ if (priv->gpu)
+ goto out;
+
gpu = a3xx_gpu_init(dev);
if (IS_ERR(gpu)) {
dev_warn(dev->dev, "failed to load a3xx gpu\n");
@@ -330,7 +331,9 @@
if (gpu) {
int ret;
+ mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
+ mutex_unlock(&dev->struct_mutex);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -340,12 +343,12 @@
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
-
}
priv->gpu = gpu;
- mutex_unlock(&dev->struct_mutex);
+out:
+ mutex_unlock(&init_lock);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 915240b..4a0dce5 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -612,8 +612,11 @@
}
gpu->id = msm_register_mmu(drm, gpu->mmu);
+
/* Create ringbuffer: */
+ mutex_lock(&drm->struct_mutex);
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb);
gpu->rb = NULL;