iommu: add support for force guard page
Add support for force guard page. This is added at the end of iova map
range. Clients needs to set_attr DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE to
achieve the same.
Change-Id: I2b3d221b5f2331f35518612a87997d3acc873821
Signed-off-by: Prakash Gupta <guptap@codeaurora.org>
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
index cfd49b2..fefec0b 100644
--- a/arch/arm64/include/asm/dma-iommu.h
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -25,6 +25,7 @@
dma_addr_t base;
u32 min_iova_align;
struct page *guard_page;
+ u32 force_guard_page_len;
struct dma_fast_smmu_mapping *fast;
};
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 31d4684..42dde2d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1183,7 +1183,8 @@
size = PAGE_ALIGN(size);
if (mapping->min_iova_align)
- guard_len = ALIGN(size, mapping->min_iova_align) - size;
+ guard_len = ALIGN(size + mapping->force_guard_page_len,
+ mapping->min_iova_align) - size;
else
guard_len = 0;
@@ -1231,12 +1232,14 @@
addr = addr & PAGE_MASK;
size = PAGE_ALIGN(size);
- if (mapping->min_iova_align) {
- guard_len = ALIGN(size, mapping->min_iova_align) - size;
- iommu_unmap(mapping->domain, addr + size, guard_len);
- } else {
+ if (mapping->min_iova_align)
+ guard_len = ALIGN(size + mapping->force_guard_page_len,
+ mapping->min_iova_align) - size;
+ else
guard_len = 0;
- }
+
+ if (guard_len)
+ iommu_unmap(mapping->domain, addr + size, guard_len);
start = (addr - mapping->base) >> PAGE_SHIFT;
count = (size + guard_len) >> PAGE_SHIFT;
@@ -1987,21 +1990,30 @@
unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
int vmid = VMID_HLOS;
int min_iova_align = 0;
+ int force_iova_guard_page = 0;
iommu_domain_get_attr(mapping->domain,
DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
&min_iova_align);
iommu_domain_get_attr(mapping->domain,
DOMAIN_ATTR_SECURE_VMID, &vmid);
+ iommu_domain_get_attr(mapping->domain,
+ DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
+ &force_iova_guard_page);
+
if (vmid >= VMID_LAST || vmid < 0)
vmid = VMID_HLOS;
- if (min_iova_align) {
- mapping->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
- mapping->guard_page = arm_smmu_errata_get_guard_page(vmid);
- if (!mapping->guard_page)
- return -ENOMEM;
- }
+ mapping->min_iova_align = (min_iova_align) ? ARM_SMMU_MIN_IOVA_ALIGN :
+ PAGE_SIZE;
+
+ if (force_iova_guard_page)
+ mapping->force_guard_page_len = PAGE_SIZE;
+
+ mapping->guard_page =
+ arm_smmu_errata_get_guard_page(vmid);
+ if (!mapping->guard_page)
+ return -ENOMEM;
mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 40d4a2c..3f20364 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -542,6 +542,7 @@
bool qsmmuv500_errata1_init;
bool qsmmuv500_errata1_client;
bool qsmmuv500_errata2_min_align;
+ bool is_force_guard_page;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -3245,6 +3246,12 @@
*((int *)data) = smmu_domain->qsmmuv500_errata2_min_align;
ret = 0;
break;
+ case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE));
+ ret = 0;
+ break;
+
default:
ret = -ENODEV;
break;
@@ -3447,6 +3454,28 @@
1 << DOMAIN_ATTR_CB_STALL_DISABLE;
ret = 0;
break;
+
+ case DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE: {
+ int force_iova_guard_page = *((int *)data);
+
+ if (smmu_domain->smmu != NULL) {
+ dev_err(smmu_domain->smmu->dev,
+ "cannot change force guard page attribute while attached\n");
+ ret = -EBUSY;
+ break;
+ }
+
+ if (force_iova_guard_page)
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE;
+ else
+ smmu_domain->attributes &=
+ ~(1 << DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE);
+
+ ret = 0;
+ break;
+ }
+
default:
ret = -ENODEV;
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 57ae0dd..36c84df 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -45,6 +45,7 @@
spinlock_t msi_lock;
u32 min_iova_align;
struct page *guard_page;
+ u32 force_guard_page_len;
};
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
@@ -130,20 +131,31 @@
struct iommu_dma_cookie *cookie = domain->iova_cookie;
int vmid = VMID_HLOS;
int min_iova_align = 0;
+ int force_iova_guard_page = 0;
+
iommu_domain_get_attr(domain,
DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
&min_iova_align);
iommu_domain_get_attr(domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
+ iommu_domain_get_attr(domain,
+ DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
+ &force_iova_guard_page);
+
if (vmid >= VMID_LAST || vmid < 0)
vmid = VMID_HLOS;
- if (min_iova_align) {
- cookie->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
- cookie->guard_page = arm_smmu_errata_get_guard_page(vmid);
- if (!cookie->guard_page)
- return -ENOMEM;
- }
+ cookie->min_iova_align = (min_iova_align) ? ARM_SMMU_MIN_IOVA_ALIGN :
+ PAGE_SIZE;
+
+ if (force_iova_guard_page)
+ cookie->force_guard_page_len = PAGE_SIZE;
+
+ cookie->guard_page =
+ arm_smmu_errata_get_guard_page(vmid);
+ if (!cookie->guard_page)
+ return -ENOMEM;
+
return 0;
}
@@ -244,7 +256,8 @@
dma_addr_t ret_iova;
if (cookie->min_iova_align)
- guard_len = ALIGN(size, cookie->min_iova_align) - size;
+ guard_len = ALIGN(size + cookie->force_guard_page_len,
+ cookie->min_iova_align) - size;
else
guard_len = 0;
iova_len = (size + guard_len) >> shift;
@@ -290,12 +303,14 @@
unsigned long shift = iova_shift(iovad);
unsigned long guard_len;
- if (cookie->min_iova_align) {
- guard_len = ALIGN(size, cookie->min_iova_align) - size;
- iommu_unmap(domain, iova + size, guard_len);
- } else {
+ if (cookie->min_iova_align)
+ guard_len = ALIGN(size + cookie->force_guard_page_len,
+ cookie->min_iova_align) - size;
+ else
guard_len = 0;
- }
+
+ if (guard_len)
+ iommu_unmap(domain, iova + size, guard_len);
free_iova_fast(iovad, iova >> shift, (size + guard_len) >> shift);
}
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index eac7b41..7e6287c 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -163,7 +163,8 @@
dma_addr_t iova;
if (mapping->min_iova_align)
- guard_len = ALIGN(size, mapping->min_iova_align) - size;
+ guard_len = ALIGN(size + mapping->force_guard_page_len,
+ mapping->min_iova_align) - size;
else
guard_len = 0;
@@ -311,12 +312,15 @@
unsigned long nbits;
unsigned long guard_len;
- if (mapping->min_iova_align) {
- guard_len = ALIGN(size, mapping->min_iova_align) - size;
- iommu_unmap(mapping->domain, iova + size, guard_len);
- } else {
+ if (mapping->min_iova_align)
+ guard_len = ALIGN(size + mapping->force_guard_page_len,
+ mapping->min_iova_align) - size;
+ else
guard_len = 0;
- }
+
+ if (guard_len)
+ iommu_unmap(mapping->domain, iova + size, guard_len);
+
nbits = (size + guard_len) >> FAST_PAGE_SHIFT;
@@ -898,20 +902,30 @@
struct dma_fast_smmu_mapping *fast = mapping->fast;
int vmid = VMID_HLOS;
int min_iova_align = 0;
+ int force_iova_guard_page = 0;
iommu_domain_get_attr(mapping->domain,
- DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
- &min_iova_align);
+ DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
+ &min_iova_align);
iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
+ iommu_domain_get_attr(mapping->domain,
+ DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
+ &force_iova_guard_page);
+
if (vmid >= VMID_LAST || vmid < 0)
vmid = VMID_HLOS;
- if (min_iova_align) {
- fast->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
- fast->guard_page = arm_smmu_errata_get_guard_page(vmid);
- if (!fast->guard_page)
- return -ENOMEM;
- }
+ fast->min_iova_align = (min_iova_align) ? ARM_SMMU_MIN_IOVA_ALIGN :
+ PAGE_SIZE;
+
+ if (force_iova_guard_page)
+ fast->force_guard_page_len = PAGE_SIZE;
+
+ fast->guard_page =
+ arm_smmu_errata_get_guard_page(vmid);
+ if (!fast->guard_page)
+ return -ENOMEM;
+
return 0;
}
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index e9dabab..e370b43 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
u32 min_iova_align;
struct page *guard_page;
+ u32 force_guard_page_len;
unsigned int bitmap_size;
unsigned long *bitmap;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f25acfc..acbc605 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -148,6 +148,7 @@
DOMAIN_ATTR_CB_STALL_DISABLE,
DOMAIN_ATTR_UPSTREAM_IOVA_ALLOCATOR,
DOMAIN_ATTR_MMU500_ERRATA_MIN_ALIGN,
+ DOMAIN_ATTR_FORCE_IOVA_GUARD_PAGE,
DOMAIN_ATTR_MAX,
};