drm/msm: use upstream iommu

Downstream kernel IOMMU had a non-standard way of dealing with multiple
devices and multiple ports/contexts.  We don't need that on upstream
kernel, so rip out the crazy.

Note that we have to move the pinning of the ringbuffer to after the
IOMMU is attached.  No idea how that managed to work properly on the
downstream kernel.

For now, I am leaving the IOMMU port name stuff in place, to simplify
things for folks trying to backport latest drm/msm to device kernels.
Once we no longer have to care about pre-DT kernels, we can drop this
and instead backport upstream IOMMU driver.

Signed-off-by: Rob Clark <robdclark@gmail.com>
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f123889..c99c50d 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,6 @@
 config DRM_MSM
 	tristate "MSM DRM"
 	depends on DRM
-	depends on MSM_IOMMU
 	depends on ARCH_QCOM || (ARM && COMPILE_TEST)
 	select DRM_KMS_HELPER
 	select SHMEM
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 28ca8cd..76c1df7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -91,9 +91,17 @@
 int adreno_hw_init(struct msm_gpu *gpu)
 {
 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+	int ret;
 
 	DBG("%s", gpu->name);
 
+	ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+	if (ret) {
+		gpu->rb_iova = 0;
+		dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+		return ret;
+	}
+
 	/* Setup REG_CP_RB_CNTL: */
 	gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
 			/* size is log2(quad-words): */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index a9579ce..733646c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -361,7 +361,7 @@
 	mdelay(16);
 
 	if (config->iommu) {
-		mmu = msm_iommu_new(dev, config->iommu);
+		mmu = msm_iommu_new(&pdev->dev, config->iommu);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
 			goto fail;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 33b826d..17175b5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -320,7 +320,7 @@
 	mdelay(16);
 
 	if (config->iommu) {
-		mmu = msm_iommu_new(dev, config->iommu);
+		mmu = msm_iommu_new(&pdev->dev, config->iommu);
 		if (IS_ERR(mmu)) {
 			ret = PTR_ERR(mmu);
 			dev_err(dev->dev, "failed to init iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c632219..915240b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -606,7 +606,7 @@
 	iommu = iommu_domain_alloc(&platform_bus_type);
 	if (iommu) {
 		dev_info(drm->dev, "%s: using IOMMU\n", name);
-		gpu->mmu = msm_iommu_new(drm, iommu);
+		gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
 	} else {
 		dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 	}
@@ -621,13 +621,6 @@
 		goto fail;
 	}
 
-	ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
-	if (ret) {
-		gpu->rb_iova = 0;
-		dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
-		goto fail;
-	}
-
 	bs_init(gpu);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4b2ad91..099af48 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -33,39 +33,14 @@
 
 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
 {
-	struct drm_device *dev = mmu->dev;
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
-	int i, ret;
-
-	for (i = 0; i < cnt; i++) {
-		struct device *msm_iommu_get_ctx(const char *ctx_name);
-		struct device *ctx = msm_iommu_get_ctx(names[i]);
-		if (IS_ERR_OR_NULL(ctx)) {
-			dev_warn(dev->dev, "couldn't get %s context", names[i]);
-			continue;
-		}
-		ret = iommu_attach_device(iommu->domain, ctx);
-		if (ret) {
-			dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
-			return ret;
-		}
-	}
-
-	return 0;
+	return iommu_attach_device(iommu->domain, mmu->dev);
 }
 
 static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
 {
 	struct msm_iommu *iommu = to_msm_iommu(mmu);
-	int i;
-
-	for (i = 0; i < cnt; i++) {
-		struct device *msm_iommu_get_ctx(const char *ctx_name);
-		struct device *ctx = msm_iommu_get_ctx(names[i]);
-		if (IS_ERR_OR_NULL(ctx))
-			continue;
-		iommu_detach_device(iommu->domain, ctx);
-	}
+	iommu_detach_device(iommu->domain, mmu->dev);
 }
 
 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -149,7 +124,7 @@
 		.destroy = msm_iommu_destroy,
 };
 
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
 {
 	struct msm_iommu *iommu;
 
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 21da6d1..7cd88d9 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,17 +32,17 @@
 
 struct msm_mmu {
 	const struct msm_mmu_funcs *funcs;
-	struct drm_device *dev;
+	struct device *dev;
 };
 
-static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
 		const struct msm_mmu_funcs *funcs)
 {
 	mmu->dev = dev;
 	mmu->funcs = funcs;
 }
 
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
 
 #endif /* __MSM_MMU_H__ */