iommu: msm: Refactor use of remote spinlock
The remote spinlock was originally meant to synchronize access between
GPU and CPU accessing SMMU. However, there are more use cases that
this remote spinlock serves. This change decouples the spinlock from
GPU and adds support for taking the spinlock only for SMMUs that are
specified in the device tree.
CRs-fixed: 517873
Change-Id: Ic50992d0d1a102fbd05855e09e254e627f99ec33
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 474efdf..5a1806e 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -194,7 +194,7 @@
struct msm_scm_fault_regs_dump *regs;
int tmp, ret = IRQ_HANDLED;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
BUG_ON(!pdev);
@@ -266,7 +266,7 @@
free_regs:
kfree(regs);
lock_release:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
return ret;
}
@@ -510,12 +510,12 @@
{
struct msm_iommu_priv *priv;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
priv = domain->priv;
domain->priv = NULL;
kfree(priv);
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
}
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -526,7 +526,7 @@
struct msm_iommu_ctx_drvdata *tmp_drvdata;
int ret = 0;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
priv = domain->priv;
if (!priv || !dev) {
@@ -581,12 +581,12 @@
ctx_drvdata->attached_domain = domain;
++iommu_drvdata->ctx_attach_count;
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
msm_iommu_attached(dev->parent);
return ret;
fail:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
return ret;
}
@@ -598,7 +598,7 @@
msm_iommu_detached(dev->parent);
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
if (!dev)
goto fail;
@@ -614,7 +614,7 @@
BUG_ON(iommu_drvdata->ctx_attach_count == 0);
--iommu_drvdata->ctx_attach_count;
fail:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
}
static int get_drvdata(struct iommu_domain *domain,
@@ -644,7 +644,7 @@
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret = 0;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
if (ret)
@@ -655,7 +655,7 @@
va, pa, len);
iommu_access_ops->iommu_clk_off(iommu_drvdata);
fail:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
return ret;
}
@@ -666,7 +666,7 @@
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret = -ENODEV;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
if (ret)
@@ -677,7 +677,7 @@
va, len);
iommu_access_ops->iommu_clk_off(iommu_drvdata);
fail:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
/* the IOMMU API requires us to return how many bytes were unmapped */
len = ret ? 0 : len;
@@ -692,7 +692,7 @@
struct msm_iommu_drvdata *iommu_drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
if (ret)
@@ -702,7 +702,7 @@
va, sg, len);
iommu_access_ops->iommu_clk_off(iommu_drvdata);
fail:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
return ret;
}
@@ -714,7 +714,7 @@
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret;
- iommu_access_ops->iommu_lock_acquire();
+ iommu_access_ops->iommu_lock_acquire(0);
ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
if (ret)
@@ -725,7 +725,7 @@
iommu_access_ops->iommu_clk_off(iommu_drvdata);
fail:
- iommu_access_ops->iommu_lock_release();
+ iommu_access_ops->iommu_lock_release(0);
return 0;
}