gpu: ion: Remove protection of heap on first alloc

ION protects the heap when the first secure allocation
is done while unprotecting when the last secure allocation
is freed. In addition ION provides a separate API for protecting
and unprotecting a heap allowing users to better time the
protection of a heap.

The protection/unprotection done during first allocation and
last free is not needed anymore. Clients need better control
and thus use the separate API.

Remove protection of heap that is occuring when the
first secure allocation is done. Remove unprotecting of
the heap when the last secure allocation is freed.

Change-Id: I80254c3a79399cd4900a389dcb535d843d6dfa80
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 16ace6f..293a7a5 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -43,7 +43,7 @@
  * @secure_base:	Base address used when securing a heap that is shared.
  * @secure_size:	Size used when securing a heap that is shared.
  * @lock:	mutex to protect shared access.
- * @heap_secured:	Identifies the heap_id as secure or not.
+ * @heap_protected:	Indicates whether heap has been protected or not.
  * @allocated_bytes:	the total number of allocated bytes from the pool.
  * @total_size:	the total size of the memory pool.
  * @request_region:	function pointer to call when first mapping of memory
@@ -55,7 +55,6 @@
  *		kernel space.
  * @umap_count:	the total number of times this heap has been mapped in
  *		user space.
- * @alloc_count:the total number of times this heap has been allocated
  */
 struct ion_cp_heap {
 	struct ion_heap heap;
@@ -65,7 +64,7 @@
 	ion_phys_addr_t secure_base;
 	size_t secure_size;
 	struct mutex lock;
-	unsigned int heap_secured;
+	unsigned int heap_protected;
 	unsigned long allocated_bytes;
 	unsigned long total_size;
 	int (*request_region)(void *);
@@ -73,12 +72,11 @@
 	void *bus_id;
 	unsigned long kmap_count;
 	unsigned long umap_count;
-	unsigned long alloc_count;
 };
 
 enum {
-	NON_SECURED_HEAP = 0,
-	SECURED_HEAP = 1,
+	HEAP_NOT_PROTECTED = 0,
+	HEAP_PROTECTED = 1,
 };
 
 static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
@@ -98,14 +96,14 @@
 		container_of(heap, struct ion_cp_heap, heap);
 	int ret_value = 0;
 
-	if (cp_heap->heap_secured == NON_SECURED_HEAP) {
-		int ret_value = ion_cp_protect_mem(cp_heap->secure_base,
+	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
+		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
 				cp_heap->secure_size, cp_heap->permission_type);
 		if (ret_value) {
 			pr_err("Failed to protect memory for heap %s - "
 				"error code: %d\n", heap->name, ret_value);
 		} else {
-			cp_heap->heap_secured = SECURED_HEAP;
+			cp_heap->heap_protected = HEAP_PROTECTED;
 			pr_debug("Protected heap %s @ 0x%x\n",
 				heap->name, (unsigned int) cp_heap->base);
 		}
@@ -122,7 +120,7 @@
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 
-	if (cp_heap->heap_secured == SECURED_HEAP) {
+	if (cp_heap->heap_protected == HEAP_PROTECTED) {
 		int error_code = ion_cp_unprotect_mem(
 			cp_heap->secure_base, cp_heap->secure_size,
 			cp_heap->permission_type);
@@ -130,7 +128,7 @@
 			pr_err("Failed to un-protect memory for heap %s - "
 				"error code: %d\n", heap->name, error_code);
 		} else  {
-			cp_heap->heap_secured = NON_SECURED_HEAP;
+			cp_heap->heap_protected = HEAP_NOT_PROTECTED;
 			pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
 				(unsigned int) cp_heap->base);
 		}
@@ -149,8 +147,7 @@
 		container_of(heap, struct ion_cp_heap, heap);
 
 	mutex_lock(&cp_heap->lock);
-
-	if (!secure_allocation && cp_heap->heap_secured == SECURED_HEAP) {
+	if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
 		mutex_unlock(&cp_heap->lock);
 		pr_err("ION cannot allocate un-secure memory from protected"
 			" heap %s\n", heap->name);
@@ -165,13 +162,7 @@
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
-	if (secure_allocation && ion_cp_protect(heap)) {
-		mutex_unlock(&cp_heap->lock);
-		return ION_CP_ALLOCATE_FAIL;
-	}
-
 	cp_heap->allocated_bytes += size;
-	++cp_heap->alloc_count;
 	mutex_unlock(&cp_heap->lock);
 
 	offset = gen_pool_alloc_aligned(cp_heap->pool,
@@ -189,11 +180,6 @@
 				cp_heap->allocated_bytes, size);
 
 		cp_heap->allocated_bytes -= size;
-		--cp_heap->alloc_count;
-
-		if (cp_heap->alloc_count == 0)
-			ion_cp_unprotect(heap);
-
 		mutex_unlock(&cp_heap->lock);
 
 		return ION_CP_ALLOCATE_FAIL;
@@ -213,12 +199,7 @@
 	gen_pool_free(cp_heap->pool, addr, size);
 
 	mutex_lock(&cp_heap->lock);
-
 	cp_heap->allocated_bytes -= size;
-	--cp_heap->alloc_count;
-
-	if (cp_heap->alloc_count == 0)
-		ion_cp_unprotect(heap);
 	mutex_unlock(&cp_heap->lock);
 }
 
@@ -248,30 +229,6 @@
 	buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
 }
 
-
-/**
- * Checks if user space mapping is allowed.
- * NOTE: Will increment the mapping count if
- * mapping is allowed.
- * Will fail mapping if heap is secured.
- */
-static unsigned int is_user_mapping_allowed(struct ion_heap *heap)
-{
-	struct ion_cp_heap *cp_heap =
-		container_of(heap, struct ion_cp_heap, heap);
-
-	mutex_lock(&cp_heap->lock);
-
-	if (cp_heap->heap_secured == SECURED_HEAP) {
-		mutex_unlock(&cp_heap->lock);
-		return 0;
-	}
-	++cp_heap->umap_count;
-
-	mutex_unlock(&cp_heap->lock);
-	return 1;
-}
-
 struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
@@ -304,7 +261,7 @@
 static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
 {
 	int ret_value = 0;
-	if ((cp_heap->umap_count+cp_heap->kmap_count) == 1)
+	if ((cp_heap->umap_count+cp_heap->kmap_count) == 0)
 		if (cp_heap->request_region)
 			ret_value = cp_heap->request_region(cp_heap->bus_id);
 	return ret_value;
@@ -328,36 +285,30 @@
 {
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
-	void *ret_value;
+	void *ret_value = NULL;
 
 	mutex_lock(&cp_heap->lock);
+	if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
+	    ((cp_heap->heap_protected == HEAP_PROTECTED) &&
+	      !ION_IS_CACHED(flags))) {
 
-	if (cp_heap->heap_secured == SECURED_HEAP && ION_IS_CACHED(flags)) {
-		pr_err("Unable to map secured heap %s as cached\n", heap->name);
-		mutex_unlock(&cp_heap->lock);
-		return NULL;
-	}
+		if (ion_cp_request_region(cp_heap)) {
+			mutex_unlock(&cp_heap->lock);
+			return NULL;
+		}
 
-	++cp_heap->kmap_count;
+		if (ION_IS_CACHED(flags))
+			ret_value = ioremap_cached(buffer->priv_phys,
+						   buffer->size);
+		else
+			ret_value = ioremap(buffer->priv_phys, buffer->size);
 
-	if (ion_cp_request_region(cp_heap)) {
-		--cp_heap->kmap_count;
-		mutex_unlock(&cp_heap->lock);
-		return NULL;
+		if (!ret_value)
+			ion_cp_release_region(cp_heap);
+		else
+			++cp_heap->kmap_count;
 	}
 	mutex_unlock(&cp_heap->lock);
-
-	if (ION_IS_CACHED(flags))
-		ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
-	else
-		ret_value = ioremap(buffer->priv_phys, buffer->size);
-
-	if (!ret_value) {
-		mutex_lock(&cp_heap->lock);
-		--cp_heap->kmap_count;
-		ion_cp_release_region(cp_heap);
-		mutex_unlock(&cp_heap->lock);
-	}
 	return ret_value;
 }
 
@@ -382,17 +333,15 @@
 			struct vm_area_struct *vma, unsigned long flags)
 {
 	int ret_value = -EAGAIN;
-	if (is_user_mapping_allowed(heap)) {
+	struct ion_cp_heap *cp_heap =
+		container_of(heap, struct ion_cp_heap, heap);
 
-		struct ion_cp_heap *cp_heap =
-			container_of(heap, struct ion_cp_heap, heap);
-
-		mutex_lock(&cp_heap->lock);
+	mutex_lock(&cp_heap->lock);
+	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
 		if (ion_cp_request_region(cp_heap)) {
 			mutex_unlock(&cp_heap->lock);
 			return -EINVAL;
 		}
-		mutex_unlock(&cp_heap->lock);
 
 		 if (ION_IS_CACHED(flags))
 			ret_value =  remap_pfn_range(vma, vma->vm_start,
@@ -407,13 +356,12 @@
 				vma->vm_end - vma->vm_start,
 				pgprot_noncached(vma->vm_page_prot));
 
-		 if (ret_value) {
-			mutex_lock(&cp_heap->lock);
-			--cp_heap->umap_count;
+		if (ret_value)
 			ion_cp_release_region(cp_heap);
-			mutex_unlock(&cp_heap->lock);
-		}
+		else
+			++cp_heap->umap_count;
 	}
+	mutex_unlock(&cp_heap->lock);
 	return ret_value;
 }
 
@@ -459,28 +407,25 @@
 {
 	unsigned long total_alloc;
 	unsigned long total_size;
-	unsigned long alloc_count;
 	unsigned long umap_count;
 	unsigned long kmap_count;
-	unsigned long heap_secured;
+	unsigned long heap_protected;
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 
 	mutex_lock(&cp_heap->lock);
 	total_alloc = cp_heap->allocated_bytes;
 	total_size = cp_heap->total_size;
-	alloc_count = cp_heap->alloc_count;
 	umap_count = cp_heap->umap_count;
 	kmap_count = cp_heap->kmap_count;
-	heap_secured = cp_heap->heap_secured == SECURED_HEAP;
+	heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
 	mutex_unlock(&cp_heap->lock);
 
 	seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
 	seq_printf(s, "total heap size: %lx\n", total_size);
-	seq_printf(s, "allocation count: %lx\n", alloc_count);
 	seq_printf(s, "umapping count: %lx\n", umap_count);
 	seq_printf(s, "kmapping count: %lx\n", kmap_count);
-	seq_printf(s, "secured heap: %s\n", heap_secured ? "Yes" : "No");
+	seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
 
 	return 0;
 }
@@ -491,7 +436,14 @@
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 	mutex_lock(&cp_heap->lock);
-	ret_value = ion_cp_protect(heap);
+	if (cp_heap->umap_count == 0) {
+		ret_value = ion_cp_protect(heap);
+	} else {
+		pr_err("ION cannot secure heap with outstanding mappings: "
+		       "User space: %lu\n", cp_heap->umap_count);
+		ret_value = -EINVAL;
+	}
+
 	mutex_unlock(&cp_heap->lock);
 	return ret_value;
 }
@@ -545,13 +497,12 @@
 		goto destroy_pool;
 
 	cp_heap->allocated_bytes = 0;
-	cp_heap->alloc_count = 0;
 	cp_heap->umap_count = 0;
 	cp_heap->kmap_count = 0;
 	cp_heap->total_size = heap_data->size;
 	cp_heap->heap.ops = &cp_heap_ops;
 	cp_heap->heap.type = ION_HEAP_TYPE_CP;
-	cp_heap->heap_secured = NON_SECURED_HEAP;
+	cp_heap->heap_protected = HEAP_NOT_PROTECTED;
 	cp_heap->secure_base = cp_heap->base;
 	cp_heap->secure_size = heap_data->size;
 	if (heap_data->extra_data) {