iommu: msm: Refactor use of remote spinlock

The remote spinlock was originally meant to synchronize access between
GPU and CPU accessing SMMU. However, there are more use cases that
this remote spinlock serves. This change decouples the spinlock from
GPU and adds support for taking the spinlock only for SMMUs that are
specified in the device tree.

CRs-fixed: 517873
Change-Id: Ic50992d0d1a102fbd05855e09e254e627f99ec33
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/drivers/iommu/msm_iommu-v0.c b/drivers/iommu/msm_iommu-v0.c
index 56aa7b2..99d071b 100644
--- a/drivers/iommu/msm_iommu-v0.c
+++ b/drivers/iommu/msm_iommu-v0.c
@@ -91,8 +91,11 @@
 			sizeof(*msm_iommu_remote_lock.lock));
 }
 
-void msm_iommu_remote_p0_spin_lock(void)
+void msm_iommu_remote_p0_spin_lock(unsigned int need_lock)
 {
+	if (!need_lock)
+		return;
+
 	msm_iommu_remote_lock.lock->flag[PROC_APPS] = 1;
 	msm_iommu_remote_lock.lock->turn = 1;
 
@@ -103,8 +106,11 @@
 		cpu_relax();
 }
 
-void msm_iommu_remote_p0_spin_unlock(void)
+void msm_iommu_remote_p0_spin_unlock(unsigned int need_lock)
 {
+	if (!need_lock)
+		return;
+
 	smp_mb();
 
 	msm_iommu_remote_lock.lock->flag[PROC_APPS] = 0;
@@ -200,15 +206,15 @@
 	return msm_iommu_lock_initialize();
 }
 
-static void _iommu_lock_acquire(void)
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
 {
 	msm_iommu_mutex_lock();
-	msm_iommu_remote_spin_lock();
+	msm_iommu_remote_spin_lock(need_extra_lock);
 }
 
-static void _iommu_lock_release(void)
+static void _iommu_lock_release(unsigned int need_extra_lock)
 {
-	msm_iommu_remote_spin_unlock();
+	msm_iommu_remote_spin_unlock(need_extra_lock);
 	msm_iommu_mutex_unlock();
 }
 
@@ -243,7 +249,7 @@
 		if (ret)
 			goto fail;
 
-		msm_iommu_remote_spin_lock();
+		msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
 
 		asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
 					   ctx_drvdata->num);
@@ -252,7 +258,7 @@
 			   asid | (va & TLBIVA_VA));
 		mb();
 
-		msm_iommu_remote_spin_unlock();
+		msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
 
 		__disable_clocks(iommu_drvdata);
 	}
@@ -280,7 +286,7 @@
 		if (ret)
 			goto fail;
 
-		msm_iommu_remote_spin_lock();
+		msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
 
 		asid = GET_CONTEXTIDR_ASID(iommu_drvdata->base,
 					   ctx_drvdata->num);
@@ -288,7 +294,7 @@
 		SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
 		mb();
 
-		msm_iommu_remote_spin_unlock();
+		msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
 
 		__disable_clocks(iommu_drvdata);
 	}
@@ -320,13 +326,16 @@
 	mb();
 }
 
-static void __program_context(void __iomem *base, void __iomem *glb_base,
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
 			      int ctx, int ncb, phys_addr_t pgtable,
 			      int redirect, int ttbr_split)
 {
+	void __iomem *base = iommu_drvdata->base;
+	void __iomem *glb_base = iommu_drvdata->glb_base;
 	unsigned int prrr, nmrr;
 	int i, j, found;
-	msm_iommu_remote_spin_lock();
+
+	msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
 
 	__reset_context(base, glb_base, ctx);
 
@@ -418,7 +427,7 @@
 	SET_M(base, ctx, 1);
 	mb();
 
-	msm_iommu_remote_spin_unlock();
+	msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
 }
 
 static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
@@ -528,7 +537,7 @@
 	if (ret)
 		goto unlock;
 
-	__program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
+	__program_context(iommu_drvdata,
 			  ctx_drvdata->num, iommu_drvdata->ncb,
 			  __pa(priv->pt.fl_table), priv->pt.redirect,
 			  iommu_drvdata->ttbr_split);
@@ -579,7 +588,7 @@
 	if (ret)
 		goto unlock;
 
-	msm_iommu_remote_spin_lock();
+	msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
 
 	SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
 		    GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
@@ -587,7 +596,7 @@
 	__reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
 			ctx_drvdata->num);
 
-	msm_iommu_remote_spin_unlock();
+	msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
 
 	__disable_clocks(iommu_drvdata);
 
@@ -1248,7 +1257,7 @@
 	if (ret)
 		goto fail;
 
-	msm_iommu_remote_spin_lock();
+	msm_iommu_remote_spin_lock(iommu_drvdata->needs_rem_spinlock);
 
 	SET_V2PPR(base, ctx, va & V2Pxx_VA);
 
@@ -1264,7 +1273,7 @@
 	if (GET_FAULT(base, ctx))
 		ret = 0;
 
-	msm_iommu_remote_spin_unlock();
+	msm_iommu_remote_spin_unlock(iommu_drvdata->needs_rem_spinlock);
 
 	__disable_clocks(iommu_drvdata);
 fail:
@@ -1326,7 +1335,7 @@
 	if (ret)
 		goto fail;
 
-	msm_iommu_remote_spin_lock();
+	msm_iommu_remote_spin_lock(drvdata->needs_rem_spinlock);
 
 	fsr = GET_FSR(base, num);
 
@@ -1359,7 +1368,7 @@
 	} else
 		ret = IRQ_NONE;
 
-	msm_iommu_remote_spin_unlock();
+	msm_iommu_remote_spin_unlock(drvdata->needs_rem_spinlock);
 
 	__disable_clocks(drvdata);
 fail:
diff --git a/drivers/iommu/msm_iommu-v1.c b/drivers/iommu/msm_iommu-v1.c
index b9c4cae..c81aa0ac 100644
--- a/drivers/iommu/msm_iommu-v1.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -123,12 +123,12 @@
 	clk_disable_unprepare(drvdata->pclk);
 }
 
-static void _iommu_lock_acquire(void)
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
 {
 	mutex_lock(&msm_iommu_lock);
 }
 
-static void _iommu_lock_release(void)
+static void _iommu_lock_release(unsigned int need_extra_lock)
 {
 	mutex_unlock(&msm_iommu_lock);
 }
diff --git a/drivers/iommu/msm_iommu_perfmon-v0.c b/drivers/iommu/msm_iommu_perfmon-v0.c
index 1073623..b08a9ec 100644
--- a/drivers/iommu/msm_iommu_perfmon-v0.c
+++ b/drivers/iommu/msm_iommu_perfmon-v0.c
@@ -176,9 +176,9 @@
 		goto out;
 	}
 
-	iommu->ops->iommu_lock_acquire();
+	iommu->ops->iommu_lock_acquire(1);
 	iommu_pm_check_for_overflow(pmon);
-	iommu->ops->iommu_lock_release();
+	iommu->ops->iommu_lock_release(1);
 
 	mutex_unlock(&pmon->lock);
 
diff --git a/drivers/iommu/msm_iommu_perfmon-v1.c b/drivers/iommu/msm_iommu_perfmon-v1.c
index 7d6dd34..2b55184 100644
--- a/drivers/iommu/msm_iommu_perfmon-v1.c
+++ b/drivers/iommu/msm_iommu_perfmon-v1.c
@@ -151,9 +151,9 @@
 		goto out;
 	}
 
-	iommu->ops->iommu_lock_acquire();
+	iommu->ops->iommu_lock_acquire(0);
 	iommu_pm_check_for_overflow(pmon);
-	iommu->ops->iommu_lock_release();
+	iommu->ops->iommu_lock_release(0);
 
 	mutex_unlock(&pmon->lock);
 
diff --git a/drivers/iommu/msm_iommu_perfmon.c b/drivers/iommu/msm_iommu_perfmon.c
index 958c6ca..503d4ab 100644
--- a/drivers/iommu/msm_iommu_perfmon.c
+++ b/drivers/iommu/msm_iommu_perfmon.c
@@ -188,11 +188,11 @@
 
 	if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
 		if (iommu->hw_ops->is_hw_access_OK(pmon)) {
-			iommu->ops->iommu_lock_acquire();
+			iommu->ops->iommu_lock_acquire(1);
 			iommu->hw_ops->counter_disable(iommu, counter);
 			iommu->hw_ops->ovfl_int_disable(iommu, counter);
 			iommu->hw_ops->set_event_class(pmon, count_no, 0);
-			iommu->ops->iommu_lock_release();
+			iommu->ops->iommu_lock_release(1);
 		}
 		counter->overflow_count = 0;
 		counter->value = 0;
@@ -200,12 +200,12 @@
 		counter->overflow_count = 0;
 		counter->value = 0;
 		if (iommu->hw_ops->is_hw_access_OK(pmon)) {
-			iommu->ops->iommu_lock_acquire();
+			iommu->ops->iommu_lock_acquire(1);
 			iommu->hw_ops->set_event_class(pmon, count_no,
 					event_class);
 			iommu->hw_ops->ovfl_int_enable(iommu, counter);
 			iommu->hw_ops->counter_enable(iommu, counter);
-			iommu->ops->iommu_lock_release();
+			iommu->ops->iommu_lock_release(1);
 		}
 	}
 }
@@ -261,9 +261,9 @@
 	iommu->ops->iommu_clk_on(iommu_drvdata);
 
 	/* Reset counters in HW */
-	iommu->ops->iommu_lock_acquire();
+	iommu->ops->iommu_lock_acquire(1);
 	iommu->hw_ops->reset_counters(&pmon->iommu);
-	iommu->ops->iommu_lock_release();
+	iommu->ops->iommu_lock_release(1);
 
 	/* Reset SW counters */
 	iommu_pm_reset_counts(pmon);
@@ -272,7 +272,7 @@
 
 	iommu_pm_set_all_counters(pmon);
 
-	iommu->ops->iommu_lock_acquire();
+	iommu->ops->iommu_lock_acquire(1);
 
 	/* enable all counter group */
 	for (i = 0; i < pmon->num_groups; ++i)
@@ -280,7 +280,7 @@
 
 	/* enable global counters */
 	iommu->hw_ops->enable_pm(iommu);
-	iommu->ops->iommu_lock_release();
+	iommu->ops->iommu_lock_release(1);
 
 	pr_info("%s: TLB performance monitoring turned ON\n",
 		pmon->iommu.iommu_name);
@@ -295,7 +295,7 @@
 
 	pmon->enabled = 0;
 
-	iommu->ops->iommu_lock_acquire();
+	iommu->ops->iommu_lock_acquire(1);
 
 	/* disable global counters */
 	iommu->hw_ops->disable_pm(iommu);
@@ -310,7 +310,7 @@
 	/* Update cached copy of counters before turning off power */
 	iommu_pm_read_all_counters(pmon);
 
-	iommu->ops->iommu_lock_release();
+	iommu->ops->iommu_lock_release(1);
 	iommu->ops->iommu_clk_off(iommu_drvdata);
 	iommu->ops->iommu_bus_vote(iommu_drvdata, 0);
 	iommu->ops->iommu_power_off(iommu_drvdata);
@@ -341,9 +341,9 @@
 	mutex_lock(&pmon->lock);
 
 	if (iommu->hw_ops->is_hw_access_OK(pmon)) {
-		iommu->ops->iommu_lock_acquire();
+		iommu->ops->iommu_lock_acquire(1);
 		counter->value = iommu->hw_ops->read_counter(counter);
-		iommu->ops->iommu_lock_release();
+		iommu->ops->iommu_lock_release(1);
 	}
 	full_count = (unsigned long long) counter->value +
 		     ((unsigned long long)counter->overflow_count *
@@ -448,9 +448,9 @@
 		rv = kstrtoul(buf, 10, &cmd);
 		if (!rv && (cmd == 1)) {
 			if (iommu->hw_ops->is_hw_access_OK(pmon)) {
-				iommu->ops->iommu_lock_acquire();
+				iommu->ops->iommu_lock_acquire(1);
 				iommu->hw_ops->reset_counters(&pmon->iommu);
-				iommu->ops->iommu_lock_release();
+				iommu->ops->iommu_lock_release(1);
 			}
 			iommu_pm_reset_counts(pmon);
 			pr_info("TLB performance counters reset\n");
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
index 474efdf..5a1806e 100644
--- a/drivers/iommu/msm_iommu_sec.c
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -194,7 +194,7 @@
 	struct msm_scm_fault_regs_dump *regs;
 	int tmp, ret = IRQ_HANDLED;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 
 	BUG_ON(!pdev);
 
@@ -266,7 +266,7 @@
 free_regs:
 	kfree(regs);
 lock_release:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 	return ret;
 }
 
@@ -510,12 +510,12 @@
 {
 	struct msm_iommu_priv *priv;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 	priv = domain->priv;
 	domain->priv = NULL;
 
 	kfree(priv);
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 }
 
 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -526,7 +526,7 @@
 	struct msm_iommu_ctx_drvdata *tmp_drvdata;
 	int ret = 0;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 
 	priv = domain->priv;
 	if (!priv || !dev) {
@@ -581,12 +581,12 @@
 	ctx_drvdata->attached_domain = domain;
 	++iommu_drvdata->ctx_attach_count;
 
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 
 	msm_iommu_attached(dev->parent);
 	return ret;
 fail:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 	return ret;
 }
 
@@ -598,7 +598,7 @@
 
 	msm_iommu_detached(dev->parent);
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 	if (!dev)
 		goto fail;
 
@@ -614,7 +614,7 @@
 	BUG_ON(iommu_drvdata->ctx_attach_count == 0);
 	--iommu_drvdata->ctx_attach_count;
 fail:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 }
 
 static int get_drvdata(struct iommu_domain *domain,
@@ -644,7 +644,7 @@
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret = 0;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 
 	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
 	if (ret)
@@ -655,7 +655,7 @@
 					va, pa, len);
 	iommu_access_ops->iommu_clk_off(iommu_drvdata);
 fail:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 	return ret;
 }
 
@@ -666,7 +666,7 @@
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret = -ENODEV;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 
 	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
 	if (ret)
@@ -677,7 +677,7 @@
 					va, len);
 	iommu_access_ops->iommu_clk_off(iommu_drvdata);
 fail:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 
 	/* the IOMMU API requires us to return how many bytes were unmapped */
 	len = ret ? 0 : len;
@@ -692,7 +692,7 @@
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 
 	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
 	if (ret)
@@ -702,7 +702,7 @@
 						va, sg, len);
 	iommu_access_ops->iommu_clk_off(iommu_drvdata);
 fail:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 	return ret;
 }
 
@@ -714,7 +714,7 @@
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret;
 
-	iommu_access_ops->iommu_lock_acquire();
+	iommu_access_ops->iommu_lock_acquire(0);
 
 	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
 	if (ret)
@@ -725,7 +725,7 @@
 	iommu_access_ops->iommu_clk_off(iommu_drvdata);
 
 fail:
-	iommu_access_ops->iommu_lock_release();
+	iommu_access_ops->iommu_lock_release(0);
 	return 0;
 }