iommu: arm-smmu: Preallocate memory for map operation

page allocation failure: order:0, mode:0x2088020(GFP_ATOMIC|__GFP_ZERO)
 Call trace:
 [<ffffff80080f15c8>] dump_backtrace+0x0/0x248
 [<ffffff80080f1894>] show_stack+0x18/0x28
 [<ffffff8008484984>] dump_stack+0x98/0xc0
 [<ffffff8008231b0c>] warn_alloc+0x114/0x134
 [<ffffff8008231f7c>] __alloc_pages_nodemask+0x3e8/0xd30
 [<ffffff8008232b2c>] alloc_pages_exact+0x4c/0xa4
 [<ffffff800866bec4>] arm_smmu_alloc_pages_exact+0x188/0x1bc
 [<ffffff8008664b28>] io_pgtable_alloc_pages_exact+0x30/0xa0
 [<ffffff8008664ff8>] __arm_lpae_alloc_pages+0x40/0x1c8
 [<ffffff8008665cb4>] __arm_lpae_map+0x224/0x3b4
 [<ffffff8008665b98>] __arm_lpae_map+0x108/0x3b4
 [<ffffff8008666474>] arm_lpae_map+0x78/0x9c
 [<ffffff800866aed4>] arm_smmu_map+0x80/0xdc
 [<ffffff800866015c>] iommu_map+0x118/0x284
 [<ffffff8008c66294>] cam_smmu_alloc_firmware+0x188/0x3c0
 [<ffffff8008cc8afc>] cam_icp_mgr_hw_open+0x88/0x874
 [<ffffff8008cca030>] cam_icp_mgr_acquire_hw+0x2d4/0xc9c
 [<ffffff8008c5fe84>] cam_context_acquire_dev_to_hw+0xb0/0x26c
 [<ffffff8008cd0ce0>] __cam_icp_acquire_dev_in_available+0x1c/0xf0
 [<ffffff8008c5ea98>] cam_context_handle_acquire_dev+0x5c/0x1a8
 [<ffffff8008c619b4>] cam_node_handle_ioctl+0x30c/0xdc8
 [<ffffff8008c62640>] cam_subdev_compat_ioctl+0xe4/0x1dc
 [<ffffff8008bcf8bc>] subdev_compat_ioctl32+0x40/0x68
 [<ffffff8008bd3858>] v4l2_compat_ioctl32+0x64/0x1780

Preallocate the required memory using GFP_KERNEL when the iommu
domain allows sleep.

Change-Id: I96194a4fabd21cc1d685e4f12afe8fbdd4768ac2
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fc949fe..40d4a2c 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -2481,22 +2481,16 @@
 }
 
 static void arm_smmu_prealloc_memory(struct arm_smmu_domain *smmu_domain,
-					struct scatterlist *sgl, int nents,
-					struct list_head *pool)
+					size_t size, struct list_head *pool)
 {
-	u32 nr = 0;
 	int i;
-	size_t size = 0;
-	struct scatterlist *sg;
+	u32 nr = 0;
 	struct page *page;
 
 	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
 			arm_smmu_has_secure_vmid(smmu_domain))
 		return;
 
-	for_each_sg(sgl, sg, nents, i)
-		size += sg->length;
-
 	/* number of 2nd level pagetable entries */
 	nr += round_up(size, SZ_1G) >> 30;
 	/* number of 3rd level pagetabel entries */
@@ -2511,16 +2505,32 @@
 	}
 }
 
+static void arm_smmu_prealloc_memory_sg(struct arm_smmu_domain *smmu_domain,
+					struct scatterlist *sgl, int nents,
+					struct list_head *pool)
+{
+	int i;
+	size_t size = 0;
+	struct scatterlist *sg;
+
+	if ((smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC)) ||
+			arm_smmu_has_secure_vmid(smmu_domain))
+		return;
+
+	for_each_sg(sgl, sg, nents, i)
+		size += sg->length;
+
+	arm_smmu_prealloc_memory(smmu_domain, size, pool);
+}
+
 static void arm_smmu_release_prealloc_memory(
 		struct arm_smmu_domain *smmu_domain, struct list_head *list)
 {
 	struct page *page, *tmp;
-	u32 remaining = 0;
 
 	list_for_each_entry_safe(page, tmp, list, lru) {
 		list_del(&page->lru);
 		__free_pages(page, 0);
-		remaining++;
 	}
 }
 
@@ -2602,6 +2612,7 @@
 	unsigned long flags;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
+	LIST_HEAD(nonsecure_pool);
 
 	if (!ops)
 		return -ENODEV;
@@ -2609,15 +2620,19 @@
 	if (arm_smmu_is_slave_side_secure(smmu_domain))
 		return msm_secure_smmu_map(domain, iova, paddr, size, prot);
 
+	arm_smmu_prealloc_memory(smmu_domain, size, &nonsecure_pool);
 	arm_smmu_secure_domain_lock(smmu_domain);
 
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	list_splice_init(&nonsecure_pool, &smmu_domain->nonsecure_pool);
 	ret = ops->map(ops, iova, paddr, size, prot);
+	list_splice_init(&smmu_domain->nonsecure_pool, &nonsecure_pool);
 	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
 
 	arm_smmu_assign_table(smmu_domain);
 	arm_smmu_secure_domain_unlock(smmu_domain);
 
+	arm_smmu_release_prealloc_memory(smmu_domain, &nonsecure_pool);
 	return ret;
 }
 
@@ -2695,7 +2710,7 @@
 	if (arm_smmu_is_slave_side_secure(smmu_domain))
 		return msm_secure_smmu_map_sg(domain, iova, sg, nents, prot);
 
-	arm_smmu_prealloc_memory(smmu_domain, sg, nents, &nonsecure_pool);
+	arm_smmu_prealloc_memory_sg(smmu_domain, sg, nents, &nonsecure_pool);
 	arm_smmu_secure_domain_lock(smmu_domain);
 
 	__saved_iova_start = iova;