iommu/arm-smmu: Add new lock to protect assign and unassign list

Simultaneous maps and/or unmaps will lead to a race condition,
where if the assign list has multiple entries and one call is
in the middle of an assign operation for one entry, and the other
call removes the next element from the list and queues itself for
the assign operation. When the first call returns from assign, and
the list is empty, it will return back to the caller without
completing the assign on all the ptes allocated for that mapping.
This results in a page fault when the client is actually accessing
the buffer.

Protect the assign and unassign lists with a mutex to ensure that
the operation is serialized and the control does not return back
to the client before all the ptes have been correctly assigned.

Change-Id: Idaf894671fee9fc9c8d339bfa17344b68dd0ad77
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 9f29abd..fab08a9 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -473,6 +473,7 @@
 	u32				secure_vmid;
 	struct list_head		pte_info_list;
 	struct list_head		unassign_list;
+	struct mutex			assign_lock;
 	struct iommu_domain		domain;
 };
 
@@ -514,7 +515,7 @@
 
 static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
 static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
-static void arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
+static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
 static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
 
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
@@ -543,6 +544,23 @@
 	return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
 }
 
+static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
+{
+	return (smmu_domain->secure_vmid != VMID_INVAL);
+}
+
+static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
+{
+	if (arm_smmu_is_domain_secure(smmu_domain))
+		mutex_lock(&smmu_domain->assign_lock);
+}
+
+static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
+{
+	if (arm_smmu_is_domain_secure(smmu_domain))
+		mutex_unlock(&smmu_domain->assign_lock);
+}
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -1551,7 +1569,9 @@
 	 * assign any page table memory that might have been allocated
 	 * during alloc_io_pgtable_ops
 	 */
+	arm_smmu_secure_domain_lock(smmu_domain);
 	arm_smmu_assign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 
 	/* Update the domain's page sizes to reflect the page table format */
 	domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
@@ -1622,7 +1642,9 @@
 		arm_smmu_free_asid(domain);
 		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
 		arm_smmu_power_off(smmu);
+		arm_smmu_secure_domain_lock(smmu_domain);
 		arm_smmu_unassign_table(smmu_domain);
+		arm_smmu_secure_domain_unlock(smmu_domain);
 		return;
 	}
 
@@ -1639,7 +1661,9 @@
 	}
 
 	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+	arm_smmu_secure_domain_lock(smmu_domain);
 	arm_smmu_unassign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
 
 	arm_smmu_power_off(smmu);
@@ -1673,6 +1697,7 @@
 	smmu_domain->secure_vmid = VMID_INVAL;
 	INIT_LIST_HEAD(&smmu_domain->pte_info_list);
 	INIT_LIST_HEAD(&smmu_domain->unassign_list);
+	mutex_init(&smmu_domain->assign_lock);
 
 	return &smmu_domain->domain;
 }
@@ -1854,19 +1879,18 @@
 	}
 }
 
-static void arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
+static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
 {
-	int ret;
+	int ret = 0;
 	int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
 	int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
 	int source_vmid = VMID_HLOS;
 	struct arm_smmu_pte_info *pte_info, *temp;
 
-	if (smmu_domain->secure_vmid == VMID_INVAL)
-		return;
+	if (!arm_smmu_is_domain_secure(smmu_domain))
+		return ret;
 
-	list_for_each_entry(pte_info, &smmu_domain->pte_info_list,
-								entry) {
+	list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
 		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
 				      PAGE_SIZE, &source_vmid, 1,
 				      dest_vmids, dest_perms, 2);
@@ -1879,6 +1903,7 @@
 		list_del(&pte_info->entry);
 		kfree(pte_info);
 	}
+	return ret;
 }
 
 static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
@@ -1889,7 +1914,7 @@
 	int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
 	struct arm_smmu_pte_info *pte_info, *temp;
 
-	if (smmu_domain->secure_vmid == VMID_INVAL)
+	if (!arm_smmu_is_domain_secure(smmu_domain))
 		return;
 
 	list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
@@ -1913,7 +1938,7 @@
 	struct arm_smmu_domain *smmu_domain = cookie;
 	struct arm_smmu_pte_info *pte_info;
 
-	if (smmu_domain->secure_vmid == VMID_INVAL) {
+	if (!arm_smmu_is_domain_secure(smmu_domain)) {
 		free_pages_exact(addr, size);
 		return;
 	}
@@ -1932,7 +1957,7 @@
 	struct arm_smmu_domain *smmu_domain = cookie;
 	struct arm_smmu_pte_info *pte_info;
 
-	if (smmu_domain->secure_vmid == VMID_INVAL)
+	if (!arm_smmu_is_domain_secure(smmu_domain))
 		return -EINVAL;
 
 	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
@@ -2029,11 +2054,14 @@
 	if (!ops)
 		return -ENODEV;
 
+	arm_smmu_secure_domain_lock(smmu_domain);
+
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	ret = ops->map(ops, iova, paddr, size, prot);
 	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
 
 	arm_smmu_assign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 
 	return ret;
 }
@@ -2053,6 +2081,8 @@
 	if (ret)
 		return ret;
 
+	arm_smmu_secure_domain_lock(smmu_domain);
+
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	ret = ops->unmap(ops, iova, size);
 	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
@@ -2066,6 +2096,7 @@
 	arm_smmu_assign_table(smmu_domain);
 	/* Also unassign any pages that were free'd during unmap */
 	arm_smmu_unassign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 	return ret;
 }