drm/nouveau/platform: probe IOMMU if present

Tegra SoCs have an IOMMU that can be used to present non-contiguous
physical memory as contiguous to the GPU and maximize the use of large
pages in the GPU MMU, leading to performance gains. This patch adds
support for probing such a IOMMU if present and make its properties
available in the nouveau_platform_gpu structure so subsystems can take
advantage of it.

Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index dc5900b..3691982 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -27,6 +27,7 @@
 #include <linux/of.h>
 #include <linux/reset.h>
 #include <linux/regulator/consumer.h>
+#include <linux/iommu.h>
 #include <soc/tegra/fuse.h>
 #include <soc/tegra/pmc.h>
 
@@ -91,6 +92,71 @@
 	return 0;
 }
 
+static void nouveau_platform_probe_iommu(struct device *dev,
+					 struct nouveau_platform_gpu *gpu)
+{
+	int err;
+	unsigned long pgsize_bitmap;
+
+	mutex_init(&gpu->iommu.mutex);
+
+	if (iommu_present(&platform_bus_type)) {
+		gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
+		if (IS_ERR(gpu->iommu.domain))
+			goto error;
+
+		/*
+		 * A IOMMU is only usable if it supports page sizes smaller
+		 * or equal to the system's PAGE_SIZE, with a preference if
+		 * both are equal.
+		 */
+		pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
+		if (pgsize_bitmap & PAGE_SIZE) {
+			gpu->iommu.pgshift = PAGE_SHIFT;
+		} else {
+			gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
+			if (gpu->iommu.pgshift == 0) {
+				dev_warn(dev, "unsupported IOMMU page size\n");
+				goto free_domain;
+			}
+			gpu->iommu.pgshift -= 1;
+		}
+
+		err = iommu_attach_device(gpu->iommu.domain, dev);
+		if (err)
+			goto free_domain;
+
+		err = nvkm_mm_init(&gpu->iommu._mm, 0,
+				   (1ULL << 40) >> gpu->iommu.pgshift, 1);
+		if (err)
+			goto detach_device;
+
+		gpu->iommu.mm = &gpu->iommu._mm;
+	}
+
+	return;
+
+detach_device:
+	iommu_detach_device(gpu->iommu.domain, dev);
+
+free_domain:
+	iommu_domain_free(gpu->iommu.domain);
+
+error:
+	gpu->iommu.domain = NULL;
+	gpu->iommu.pgshift = 0;
+	dev_err(dev, "cannot initialize IOMMU MM\n");
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+					  struct nouveau_platform_gpu *gpu)
+{
+	if (gpu->iommu.domain) {
+		iommu_detach_device(gpu->iommu.domain, dev);
+		iommu_domain_free(gpu->iommu.domain);
+	}
+}
+
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
 	struct nouveau_platform_gpu *gpu;
@@ -118,6 +184,8 @@
 	if (IS_ERR(gpu->clk_pwr))
 		return PTR_ERR(gpu->clk_pwr);
 
+	nouveau_platform_probe_iommu(&pdev->dev, gpu);
+
 	err = nouveau_platform_power_up(gpu);
 	if (err)
 		return err;
@@ -154,10 +222,15 @@
 	struct nouveau_drm *drm = nouveau_drm(drm_dev);
 	struct nvkm_device *device = nvxx_device(&drm->device);
 	struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
+	int err;
 
 	nouveau_drm_device_remove(drm_dev);
 
-	return nouveau_platform_power_down(gpu);
+	err = nouveau_platform_power_down(gpu);
+
+	nouveau_platform_remove_iommu(&pdev->dev, gpu);
+
+	return err;
 }
 
 #if IS_ENABLED(CONFIG_OF)