msm: kgsl: Remove active count get/put for secure context mappings

There is no long a need force the GPU on to do MMU map/unmap
for secure contexts. Voting the GPU on would force the MMU
driver to be enabled. Now that the MMU driver does its own
voting for resources, there is no need to trick the MMU on
by turning on the GPU. Remove this dead code.

Change-Id: Ib8fa38b1047ad6a12342a7f0b7bb3bd688b13c5c
Signed-off-by: Carter Cooper <ccooper@codeaurora.org>
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index b354ef2..0338c5fd 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -363,58 +363,19 @@
 	return ret;
 }
 
-static int _lock_if_secure_mmu(struct kgsl_memdesc *memdesc,
-		struct kgsl_mmu *mmu)
-{
-	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
-
-	if (!kgsl_memdesc_is_secured(memdesc))
-		return 0;
-
-	if (!kgsl_mmu_is_secured(mmu))
-		return -EINVAL;
-
-	mutex_lock(&device->mutex);
-	if (kgsl_active_count_get(device)) {
-		mutex_unlock(&device->mutex);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void _unlock_if_secure_mmu(struct kgsl_memdesc *memdesc,
-		struct kgsl_mmu *mmu)
-{
-	struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
-
-	if (!kgsl_memdesc_is_secured(memdesc) || !kgsl_mmu_is_secured(mmu))
-		return;
-
-	kgsl_active_count_put(device);
-	mutex_unlock(&device->mutex);
-}
-
 static int _iommu_map_sync_pc(struct kgsl_pagetable *pt,
-		struct kgsl_memdesc *memdesc,
 		uint64_t gpuaddr, phys_addr_t physaddr,
 		uint64_t size, unsigned int flags)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	int ret;
 
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
-
 	_iommu_sync_mmu_pc(true);
 
 	ret = iommu_map(iommu_pt->domain, gpuaddr, physaddr, size, flags);
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (ret) {
 		KGSL_CORE_ERR("map err: 0x%016llX, 0x%llx, 0x%x, %d\n",
 			gpuaddr, size, flags, ret);
@@ -425,15 +386,10 @@
 }
 
 static int _iommu_unmap_sync_pc(struct kgsl_pagetable *pt,
-		struct kgsl_memdesc *memdesc, uint64_t addr, uint64_t size)
+		uint64_t addr, uint64_t size)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	size_t unmapped = 0;
-	int ret;
-
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
 
 	_iommu_sync_mmu_pc(true);
 
@@ -441,8 +397,6 @@
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (unmapped != size) {
 		KGSL_CORE_ERR("unmap err: 0x%016llx, 0x%llx, %zd\n",
 			addr, size, unmapped);
@@ -453,8 +407,7 @@
 }
 
 static int _iommu_map_sg_offset_sync_pc(struct kgsl_pagetable *pt,
-		uint64_t addr, struct kgsl_memdesc *memdesc,
-		struct scatterlist *sg, int nents,
+		uint64_t addr, struct scatterlist *sg, int nents,
 		uint64_t offset, uint64_t size, unsigned int flags)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
@@ -466,10 +419,6 @@
 	phys_addr_t physaddr;
 	int ret;
 
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
-
 	_iommu_sync_mmu_pc(true);
 
 	for_each_sg(sg, s, nents, i) {
@@ -509,11 +458,9 @@
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (size != 0) {
 		/* Cleanup on error */
-		_iommu_unmap_sync_pc(pt, memdesc, addr, mapped);
+		_iommu_unmap_sync_pc(pt, addr, mapped);
 		KGSL_CORE_ERR(
 			"map sg offset err: 0x%016llX, %d, %x, %zd\n",
 			addr, nents, flags, mapped);
@@ -524,17 +471,11 @@
 }
 
 static int _iommu_map_sg_sync_pc(struct kgsl_pagetable *pt,
-		uint64_t addr, struct kgsl_memdesc *memdesc,
-		struct scatterlist *sg, int nents,
+		uint64_t addr, struct scatterlist *sg, int nents,
 		unsigned int flags)
 {
 	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	size_t mapped;
-	int ret;
-
-	ret = _lock_if_secure_mmu(memdesc, pt->mmu);
-	if (ret)
-		return ret;
 
 	_iommu_sync_mmu_pc(true);
 
@@ -542,8 +483,6 @@
 
 	_iommu_sync_mmu_pc(false);
 
-	_unlock_if_secure_mmu(memdesc, pt->mmu);
-
 	if (mapped == 0) {
 		KGSL_CORE_ERR("map sg err: 0x%016llX, %d, %x, %zd\n",
 			addr, nents, flags, mapped);
@@ -1754,7 +1693,7 @@
 	if (addr == 0)
 		return -EINVAL;
 
-	return _iommu_unmap_sync_pc(pt, memdesc, addr + offset, size);
+	return _iommu_unmap_sync_pc(pt, addr + offset, size);
 }
 
 static int
@@ -1819,7 +1758,7 @@
 		physaddr = page_to_phys(kgsl_guard_page);
 	}
 
-	return _iommu_map_sync_pc(pt, memdesc, gpuaddr, physaddr,
+	return _iommu_map_sync_pc(pt, gpuaddr, physaddr,
 			kgsl_memdesc_guard_page_size(memdesc),
 			protflags & ~IOMMU_WRITE);
 }
@@ -1864,14 +1803,13 @@
 	if (IS_ERR(sgt))
 		return PTR_ERR(sgt);
 
-	ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
-				sgt->nents, flags);
+	ret = _iommu_map_sg_sync_pc(pt, addr, sgt->sgl, sgt->nents, flags);
 	if (ret)
 		goto done;
 
 	ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
 	if (ret)
-		_iommu_unmap_sync_pc(pt, memdesc, addr, size);
+		_iommu_unmap_sync_pc(pt, addr, size);
 
 done:
 	if (memdesc->pages != NULL)
@@ -1910,8 +1848,7 @@
 			0, size, GFP_KERNEL);
 	if (ret == 0) {
 		ret = _iommu_map_sg_sync_pc(pt, memdesc->gpuaddr + offset,
-				memdesc, sgt.sgl, sgt.nents,
-				IOMMU_READ | IOMMU_NOEXEC);
+				sgt.sgl, sgt.nents, IOMMU_READ | IOMMU_NOEXEC);
 		sg_free_table(&sgt);
 	}
 
@@ -1964,7 +1901,7 @@
 	ret = sg_alloc_table_from_pages(&sgt, pages, count,
 			0, size, GFP_KERNEL);
 	if (ret == 0) {
-		ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt.sgl,
+		ret = _iommu_map_sg_sync_pc(pt, addr, sgt.sgl,
 				sgt.nents, map_flags);
 		sg_free_table(&sgt);
 	}
@@ -2013,7 +1950,7 @@
 				memdesc, physoffset, size, protflags);
 	else
 		ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
-				memdesc, sgt->sgl, sgt->nents,
+				sgt->sgl, sgt->nents,
 				physoffset, size, protflags);
 
 	if (memdesc->pages != NULL)