iommu: dma-mapping-fast: Only flush non-coherent page tables

Some SMMUs can walk page tables in the CPU cache, avoiding the need to
do cache maintenance operations on the page table memory.  Don't do
cache maintenance on SMMUs with coherent page tables.

Change-Id: I47e00e72548212209b59c13a0d7d59cb0ab964ff
Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index f7ed246..b543527 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -25,6 +25,13 @@
 #define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
 #define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
 
+static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
+				  void *start, void *end)
+{
+	if (!mapping->is_smmu_pt_coherent)
+		dmac_clean_range(start, end);
+}
+
 /*
  * Checks if the allocated range (ending at @end) covered the upcoming
  * stale bit.  We don't need to know exactly where the range starts since
@@ -302,8 +309,7 @@
 	if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
 		goto fail_free_iova;
 
-	if (!skip_sync)		/* TODO: should ask SMMU if coherent */
-		dmac_clean_range(pmd, pmd + nptes);
+	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
 
 	spin_unlock_irqrestore(&mapping->lock, flags);
 	return iova + offset_from_phys_to_map;
@@ -333,8 +339,7 @@
 
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(pmd, len);
-	if (!skip_sync)		/* TODO: should ask SMMU if coherent */
-		dmac_clean_range(pmd, pmd + nptes);
+	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
 	__fast_smmu_free_iova(mapping, iova, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
@@ -489,7 +494,7 @@
 			/* TODO: unwind previously successful mappings */
 			goto out_free_iova;
 		}
-		dmac_clean_range(ptep, ptep + nptes);
+		fast_dmac_clean_range(mapping, ptep, ptep + nptes);
 		iova_iter += miter.length;
 	}
 	sg_miter_stop(&miter);
@@ -511,7 +516,7 @@
 	spin_lock_irqsave(&mapping->lock, flags);
 	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
 	av8l_fast_unmap_public(ptep, size);
-	dmac_clean_range(ptep, ptep + count);
+	fast_dmac_clean_range(mapping, ptep, ptep + count);
 out_free_iova:
 	__fast_smmu_free_iova(mapping, dma_addr, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
@@ -544,7 +549,7 @@
 	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
 	spin_lock_irqsave(&mapping->lock, flags);
 	av8l_fast_unmap_public(ptep, size);
-	dmac_clean_range(ptep, ptep + count);
+	fast_dmac_clean_range(mapping, ptep, ptep + count);
 	__fast_smmu_free_iova(mapping, dma_handle, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 	__fast_smmu_free_pages(pages, count);
@@ -719,6 +724,10 @@
 	}
 	mapping->fast->pgtbl_pmds = info.pmds;
 
+	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+				  &mapping->fast->is_smmu_pt_coherent))
+		return -EINVAL;
+
 	mapping->fast->notifier.notifier_call = fast_smmu_notify;
 	av8l_register_notify(&mapping->fast->notifier);