iommu: arm-smmu: Don't call iommu unmap with zero size

If guard pages are disabled, don't call iommu_unmap for them.
Additionally, ensure the iova passed to __fast_smmu_free_iova()
is properly aligned.

Change-Id: I26cb6b354deb8a15458e5b7bf704c9c77d1c24ec
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 6d1e618..0522f50 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1224,12 +1224,12 @@
 
 	addr = addr & PAGE_MASK;
 	size = PAGE_ALIGN(size);
-	if (mapping->min_iova_align)
+	if (mapping->min_iova_align) {
 		guard_len = ALIGN(size, mapping->min_iova_align) - size;
-	else
+		iommu_unmap(mapping->domain, addr + size, guard_len);
+	} else {
 		guard_len = 0;
-
-	iommu_unmap(mapping->domain, addr + size, guard_len);
+	}
 
 	start = (addr - mapping->base) >> PAGE_SHIFT;
 	count = (size + guard_len) >> PAGE_SHIFT;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 11666fa..57ae0dd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -290,11 +290,12 @@
 	unsigned long shift = iova_shift(iovad);
 	unsigned long guard_len;
 
-	if (cookie->min_iova_align)
+	if (cookie->min_iova_align) {
 		guard_len = ALIGN(size, cookie->min_iova_align) - size;
-	else
+		iommu_unmap(domain, iova + size, guard_len);
+	} else {
 		guard_len = 0;
-	iommu_unmap(domain, iova + size, guard_len);
+	}
 
 	free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
 }
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 2d71a91..ad7ee11 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -311,13 +311,14 @@
 	unsigned long nbits;
 	unsigned long guard_len;
 
-	if (mapping->min_iova_align)
+	if (mapping->min_iova_align) {
 		guard_len = ALIGN(size, mapping->min_iova_align) - size;
-	else
+		iommu_unmap(mapping->domain, iova + size, guard_len);
+	} else {
 		guard_len = 0;
+	}
 	nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
 
-	iommu_unmap(mapping->domain, iova + size, guard_len);
 
 	/*
 	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
@@ -436,7 +437,7 @@
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(pmd, len);
 	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
-	__fast_smmu_free_iova(mapping, iova, len);
+	__fast_smmu_free_iova(mapping, iova - offset, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 
 	trace_unmap(mapping->domain, iova - offset, len, len);
@@ -744,7 +745,7 @@
 
 	iommu_unmap(mapping->domain, addr - offset, len);
 	spin_lock_irqsave(&mapping->lock, flags);
-	__fast_smmu_free_iova(mapping, addr, len);
+	__fast_smmu_free_iova(mapping, addr - offset, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }