blob: 83463ac2e8877bd958d2a82626f0c0e500f4a6fc [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott7db4e0b2013-01-03 14:20:16 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Laura Abbottcaafeea2011-12-13 11:43:10 -080029#include <linux/fmem.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080030#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070031#include <linux/dma-mapping.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080032#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070033
34#include <asm/mach/map.h>
35
Olav Haugan0a852512012-01-09 10:20:55 -080036#include <mach/msm_memtypes.h>
37#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080038#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039
40#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080041
42#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070043#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080044
Laura Abbott7e446482012-06-13 15:59:39 -070045#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080046/**
47 * struct ion_cp_heap - container for the heap and shared heap data
48
49 * @heap: the heap information structure
50 * @pool: memory pool to allocate from.
51 * @base: the base address of the memory pool.
52 * @permission_type: Identifier for the memory used by SCM for protecting
53 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080054 * @secure_base: Base address used when securing a heap that is shared.
55 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080056 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080057 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080058 * @allocated_bytes: the total number of allocated bytes from the pool.
59 * @total_size: the total size of the memory pool.
60 * @request_region: function pointer to call when first mapping of memory
61 * occurs.
62 * @release_region: function pointer to call when last mapping of memory
63 * unmapped.
64 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080065 * @kmap_cached_count: the total number of times this heap has been mapped in
66 * kernel space (cached).
67 * @kmap_uncached_count:the total number of times this heap has been mapped in
68 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080069 * @umap_count: the total number of times this heap has been mapped in
70 * user space.
Olav Haugan8726caf2012-05-10 15:11:35 -070071 * @iommu_iova: saved iova when mapping full heap at once.
72 * @iommu_partition: partition used to map full heap.
Laura Abbottcaafeea2011-12-13 11:43:10 -080073 * @reusable: indicates if the memory should be reused via fmem.
74 * @reserved_vrange: reserved virtual address range for use with fmem
Olav Haugan8726caf2012-05-10 15:11:35 -070075 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
76 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
Olav Haugan85c95402012-05-30 17:32:37 -070077 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
78*/
Olav Haugan0a852512012-01-09 10:20:55 -080079struct ion_cp_heap {
80 struct ion_heap heap;
81 struct gen_pool *pool;
82 ion_phys_addr_t base;
83 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080084 ion_phys_addr_t secure_base;
85 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080086 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080087 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080088 unsigned long allocated_bytes;
89 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070090 int (*heap_request_region)(void *);
91 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080092 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080093 unsigned long kmap_cached_count;
94 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080095 unsigned long umap_count;
Olav Haugan8726caf2012-05-10 15:11:35 -070096 unsigned long iommu_iova[MAX_DOMAINS];
97 unsigned long iommu_partition[MAX_DOMAINS];
Laura Abbottcaafeea2011-12-13 11:43:10 -080098 int reusable;
99 void *reserved_vrange;
Olav Haugan8726caf2012-05-10 15:11:35 -0700100 int iommu_map_all;
101 int iommu_2x_map_domain;
Olav Haugan85c95402012-05-30 17:32:37 -0700102 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -0700103 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700104 void *cpu_addr;
105 size_t heap_size;
106 dma_addr_t handle;
107 int cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800108 int allow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -0800109};
110
111enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800112 HEAP_NOT_PROTECTED = 0,
113 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800114};
115
Laura Abbott3180a5f2012-08-03 17:31:03 -0700116#define DMA_ALLOC_TRIES 5
117
Laura Abbott3180a5f2012-08-03 17:31:03 -0700118static int allocate_heap_memory(struct ion_heap *heap)
119{
120 struct device *dev = heap->priv;
121 struct ion_cp_heap *cp_heap =
122 container_of(heap, struct ion_cp_heap, heap);
123 int ret;
124 int tries = 0;
125 DEFINE_DMA_ATTRS(attrs);
126 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
127
128
129 if (cp_heap->cpu_addr)
130 return 0;
131
132 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
133 cp_heap->cpu_addr = dma_alloc_attrs(dev,
134 cp_heap->heap_size,
135 &(cp_heap->handle),
136 0,
137 &attrs);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800138 if (!cp_heap->cpu_addr) {
139 trace_ion_cp_alloc_retry(tries);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700140 msleep(20);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800141 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700142 }
143
144 if (!cp_heap->cpu_addr)
145 goto out;
146
147 cp_heap->base = cp_heap->handle;
148
149 cp_heap->pool = gen_pool_create(12, -1);
150 if (!cp_heap->pool)
151 goto out_free;
152
153 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
154 cp_heap->heap_size, -1);
155 if (ret < 0)
156 goto out_pool;
157
158 return 0;
159
160out_pool:
161 gen_pool_destroy(cp_heap->pool);
162out_free:
163 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
164 cp_heap->handle);
165out:
166 return ION_CP_ALLOCATE_FAIL;
167}
168
169static void free_heap_memory(struct ion_heap *heap)
170{
171 struct device *dev = heap->priv;
172 struct ion_cp_heap *cp_heap =
173 container_of(heap, struct ion_cp_heap, heap);
174
175 /* release memory */
176 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
177 cp_heap->handle);
178 gen_pool_destroy(cp_heap->pool);
179 cp_heap->pool = NULL;
180 cp_heap->cpu_addr = 0;
181}
182
183
184
Olav Haugan2a5404b2012-02-01 17:51:30 -0800185/**
186 * Get the total number of kernel mappings.
187 * Must be called with heap->lock locked.
188 */
189static unsigned long ion_cp_get_total_kmap_count(
190 const struct ion_cp_heap *cp_heap)
191{
192 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
193}
Olav Haugan0a852512012-01-09 10:20:55 -0800194
Laura Abbott94ad25e2012-08-03 13:56:21 -0700195static int ion_on_first_alloc(struct ion_heap *heap)
196{
197 struct ion_cp_heap *cp_heap =
198 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700199 int ret_value;
200
201 if (cp_heap->reusable) {
202 ret_value = fmem_set_state(FMEM_C_STATE);
203 if (ret_value)
204 return 1;
205 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700206
207 if (cp_heap->cma) {
208 ret_value = allocate_heap_memory(heap);
209 if (ret_value)
210 return 1;
211 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700212 return 0;
213}
214
215static void ion_on_last_free(struct ion_heap *heap)
216{
217 struct ion_cp_heap *cp_heap =
218 container_of(heap, struct ion_cp_heap, heap);
219
220 if (cp_heap->reusable)
221 if (fmem_set_state(FMEM_T_STATE) != 0)
222 pr_err("%s: unable to transition heap to T-state\n",
223 __func__);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700224
225 if (cp_heap->cma)
226 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700227}
228
Olav Haugan0a852512012-01-09 10:20:55 -0800229/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800230 * Protects memory if heap is unsecured heap. Also ensures that we are in
231 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800232 * Must be called with heap->lock locked.
233 */
Laura Abbott7e446482012-06-13 15:59:39 -0700234static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800235{
236 struct ion_cp_heap *cp_heap =
237 container_of(heap, struct ion_cp_heap, heap);
238 int ret_value = 0;
239
Laura Abbottf68983e2012-06-13 16:23:23 -0700240 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800241 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700242 if (!cp_heap->allocated_bytes)
243 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800244 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800245
Olav Hauganea66e7a2012-01-23 17:30:27 -0800246 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700247 cp_heap->secure_size, cp_heap->permission_type,
248 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800249 if (ret_value) {
250 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800251 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800252
Laura Abbott94ad25e2012-08-03 13:56:21 -0700253 if (!cp_heap->allocated_bytes)
254 ion_on_last_free(heap);
255
Laura Abbottf68983e2012-06-13 16:23:23 -0700256 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800257 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800258 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800259 pr_debug("Protected heap %s @ 0x%lx\n",
260 heap->name, cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800261 }
262 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800263out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700264 pr_debug("%s: protect count is %d\n", __func__,
265 atomic_read(&cp_heap->protect_cnt));
266 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800267 return ret_value;
268}
269
270/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800271 * Unprotects memory if heap is secure heap. Also ensures that we are in
272 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800273 * Must be called with heap->lock locked.
274 */
Laura Abbott7e446482012-06-13 15:59:39 -0700275static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800276{
277 struct ion_cp_heap *cp_heap =
278 container_of(heap, struct ion_cp_heap, heap);
279
Laura Abbottf68983e2012-06-13 16:23:23 -0700280 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800281 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800282 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700283 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800284 if (error_code) {
285 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800286 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800287 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800288 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800289 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800290 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800291
Laura Abbott94ad25e2012-08-03 13:56:21 -0700292 if (!cp_heap->allocated_bytes)
293 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800294 }
295 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700296 pr_debug("%s: protect count is %d\n", __func__,
297 atomic_read(&cp_heap->protect_cnt));
298 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800299}
300
301ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
302 unsigned long size,
303 unsigned long align,
304 unsigned long flags)
305{
306 unsigned long offset;
307 unsigned long secure_allocation = flags & ION_SECURE;
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800308 unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
Olav Haugan0a852512012-01-09 10:20:55 -0800309
310 struct ion_cp_heap *cp_heap =
311 container_of(heap, struct ion_cp_heap, heap);
312
313 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800314 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800315 mutex_unlock(&cp_heap->lock);
316 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800317 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800318 return ION_CP_ALLOCATE_FAIL;
319 }
320
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800321 if (!force_contig && !secure_allocation &&
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800322 !cp_heap->allow_non_secure_allocation) {
Laura Abbottac963312012-12-11 15:09:03 -0800323 mutex_unlock(&cp_heap->lock);
324 pr_debug("%s: non-secure allocation disallowed from this heap\n",
325 __func__);
326 return ION_CP_ALLOCATE_FAIL;
327 }
328
Laura Abbott087db592012-11-01 09:41:37 -0700329 /*
330 * The check above already checked for non-secure allocations when the
331 * heap is protected. HEAP_PROTECTED implies that this must be a secure
332 * allocation. If the heap is protected and there are userspace or
333 * cached kernel mappings, something has gone wrong in the security
334 * model.
335 */
336 if (cp_heap->heap_protected == HEAP_PROTECTED) {
337 BUG_ON(cp_heap->umap_count != 0);
338 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800339 }
340
Laura Abbottcaafeea2011-12-13 11:43:10 -0800341 /*
342 * if this is the first reusable allocation, transition
343 * the heap
344 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700345 if (!cp_heap->allocated_bytes)
346 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800347 mutex_unlock(&cp_heap->lock);
348 return ION_RESERVED_ALLOCATE_FAIL;
349 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800350
Olav Haugan0a852512012-01-09 10:20:55 -0800351 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800352 mutex_unlock(&cp_heap->lock);
353
354 offset = gen_pool_alloc_aligned(cp_heap->pool,
355 size, ilog2(align));
356
357 if (!offset) {
358 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700359 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800360 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700361 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800362 pr_debug("%s: heap %s has enough memory (%lx) but"
363 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800364 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800365 __func__, heap->name,
366 cp_heap->total_size -
367 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700368 if (!cp_heap->allocated_bytes &&
369 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
370 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800371 mutex_unlock(&cp_heap->lock);
372
373 return ION_CP_ALLOCATE_FAIL;
374 }
375
376 return offset;
377}
378
Olav Haugan8726caf2012-05-10 15:11:35 -0700379static void iommu_unmap_all(unsigned long domain_num,
380 struct ion_cp_heap *cp_heap)
381{
382 unsigned long left_to_unmap = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700383 unsigned long page_size = SZ_64K;
384
385 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
386 if (domain) {
387 unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
388
389 while (left_to_unmap) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700390 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700391 temp_iova += page_size;
392 left_to_unmap -= page_size;
393 }
394 if (domain_num == cp_heap->iommu_2x_map_domain)
395 msm_iommu_unmap_extra(domain, temp_iova,
396 cp_heap->total_size, SZ_64K);
397 } else {
398 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
399 }
400}
401
Olav Haugan0a852512012-01-09 10:20:55 -0800402void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
403 unsigned long size)
404{
405 struct ion_cp_heap *cp_heap =
406 container_of(heap, struct ion_cp_heap, heap);
407
408 if (addr == ION_CP_ALLOCATE_FAIL)
409 return;
410 gen_pool_free(cp_heap->pool, addr, size);
411
412 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800413 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800414
Laura Abbott94ad25e2012-08-03 13:56:21 -0700415 if (!cp_heap->allocated_bytes &&
416 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
417 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700418
419 /* Unmap everything if we previously mapped the whole heap at once. */
420 if (!cp_heap->allocated_bytes) {
421 unsigned int i;
422 for (i = 0; i < MAX_DOMAINS; ++i) {
423 if (cp_heap->iommu_iova[i]) {
424 unsigned long vaddr_len = cp_heap->total_size;
425
426 if (i == cp_heap->iommu_2x_map_domain)
427 vaddr_len <<= 1;
428 iommu_unmap_all(i, cp_heap);
429
430 msm_free_iova_address(cp_heap->iommu_iova[i], i,
431 cp_heap->iommu_partition[i],
432 vaddr_len);
433 }
434 cp_heap->iommu_iova[i] = 0;
435 cp_heap->iommu_partition[i] = 0;
436 }
437 }
Olav Haugan0a852512012-01-09 10:20:55 -0800438 mutex_unlock(&cp_heap->lock);
439}
440
441static int ion_cp_heap_phys(struct ion_heap *heap,
442 struct ion_buffer *buffer,
443 ion_phys_addr_t *addr, size_t *len)
444{
Laura Abbott60ae9372012-10-10 16:28:59 -0700445 struct ion_cp_buffer *buf = buffer->priv_virt;
446
447 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 *len = buffer->size;
449 return 0;
450}
451
452static int ion_cp_heap_allocate(struct ion_heap *heap,
453 struct ion_buffer *buffer,
454 unsigned long size, unsigned long align,
455 unsigned long flags)
456{
Laura Abbott60ae9372012-10-10 16:28:59 -0700457 struct ion_cp_buffer *buf;
458 phys_addr_t addr;
459
460 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
461 if (!buf)
462 return ION_CP_ALLOCATE_FAIL;
463
464 addr = ion_cp_allocate(heap, size, align, flags);
465 if (addr == ION_CP_ALLOCATE_FAIL)
466 return -ENOMEM;
467
468 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700469 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700470 atomic_set(&buf->secure_cnt, 0);
471 mutex_init(&buf->lock);
472 buf->is_secure = flags & ION_SECURE ? 1 : 0;
473 buffer->priv_virt = buf;
474
475 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800476}
477
478static void ion_cp_heap_free(struct ion_buffer *buffer)
479{
480 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700481 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800482
Laura Abbott60ae9372012-10-10 16:28:59 -0700483 ion_cp_free(heap, buf->buffer, buffer->size);
484 WARN_ON(atomic_read(&buf->secure_cnt));
485 WARN_ON(atomic_read(&buf->map_cnt));
486 kfree(buf);
487
488 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800489}
490
Laura Abbottb14ed962012-01-30 14:18:08 -0800491struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800492{
Laura Abbottb14ed962012-01-30 14:18:08 -0800493 struct sg_table *table;
494 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700495 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800496
Laura Abbottb14ed962012-01-30 14:18:08 -0800497 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
498 if (!table)
Olav Haugan0a852512012-01-09 10:20:55 -0800499 return ERR_PTR(-ENOMEM);
500
Laura Abbottc3824d72012-11-02 09:57:19 -0700501 if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M)) {
Laura Abbotte84d77e2012-10-10 16:59:46 -0700502 int n_chunks;
503 int i;
504 struct scatterlist *sg;
Olav Haugan0a852512012-01-09 10:20:55 -0800505
Laura Abbotte84d77e2012-10-10 16:59:46 -0700506 /* Count number of 1MB chunks. Alignment is already checked. */
507 n_chunks = buffer->size >> 20;
508
509 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
510 if (ret)
511 goto err0;
512
513 for_each_sg(table->sgl, sg, table->nents, i) {
514 sg_dma_address(sg) = buf->buffer + i * SZ_1M;
515 sg->length = SZ_1M;
516 sg->offset = 0;
517 }
518 } else {
519 ret = sg_alloc_table(table, 1, GFP_KERNEL);
520 if (ret)
521 goto err0;
522
523 table->sgl->length = buffer->size;
524 table->sgl->offset = 0;
525 table->sgl->dma_address = buf->buffer;
526 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800527
528 return table;
529err0:
530 kfree(table);
531 return ERR_PTR(ret);
Olav Haugan0a852512012-01-09 10:20:55 -0800532}
533
Laura Abbottb14ed962012-01-30 14:18:08 -0800534struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700535 struct ion_buffer *buffer)
536{
Laura Abbottb14ed962012-01-30 14:18:08 -0800537 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700538}
539
Olav Haugan0a852512012-01-09 10:20:55 -0800540void ion_cp_heap_unmap_dma(struct ion_heap *heap,
541 struct ion_buffer *buffer)
542{
Laura Abbottb14ed962012-01-30 14:18:08 -0800543 if (buffer->sg_table)
544 sg_free_table(buffer->sg_table);
545 kfree(buffer->sg_table);
546 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800547}
548
549/**
550 * Call request region for SMI memory of this is the first mapping.
551 */
552static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
553{
554 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800555 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700556 if (cp_heap->heap_request_region)
557 ret_value = cp_heap->heap_request_region(
558 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800559 return ret_value;
560}
561
562/**
563 * Call release region for SMI memory of this is the last un-mapping.
564 */
565static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
566{
567 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800568 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700569 if (cp_heap->heap_release_region)
570 ret_value = cp_heap->heap_release_region(
571 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800572 return ret_value;
573}
574
Laura Abbottcaafeea2011-12-13 11:43:10 -0800575void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
576 void *virt_base, unsigned long flags)
577{
578 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700579 struct ion_cp_buffer *buf = buffer->priv_virt;
580 unsigned int offset = buf->buffer - phys_base;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800581 unsigned long start = ((unsigned long)virt_base) + offset;
582 const struct mem_type *type = ION_IS_CACHED(flags) ?
583 get_mem_type(MT_DEVICE_CACHED) :
584 get_mem_type(MT_DEVICE);
585
Laura Abbott60ae9372012-10-10 16:28:59 -0700586 if (phys_base > buf->buffer)
Laura Abbottcaafeea2011-12-13 11:43:10 -0800587 return NULL;
588
589
Laura Abbott60ae9372012-10-10 16:28:59 -0700590 ret = ioremap_pages(start, buf->buffer, buffer->size, type);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800591
592 if (!ret)
593 return (void *)start;
594 else
595 return NULL;
596}
597
Laura Abbottb14ed962012-01-30 14:18:08 -0800598void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800599{
600 struct ion_cp_heap *cp_heap =
601 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800602 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700603 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800604
605 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800606 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
607 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800608 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800609
Olav Hauganea66e7a2012-01-23 17:30:27 -0800610 if (ion_cp_request_region(cp_heap)) {
611 mutex_unlock(&cp_heap->lock);
612 return NULL;
613 }
Olav Haugan0a852512012-01-09 10:20:55 -0800614
Laura Abbottcaafeea2011-12-13 11:43:10 -0800615 if (cp_heap->reusable) {
616 ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
Laura Abbottb14ed962012-01-30 14:18:08 -0800617 cp_heap->reserved_vrange, buffer->flags);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700618 } else if (cp_heap->cma) {
619 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
620 struct page **pages = vmalloc(
621 sizeof(struct page *) * npages);
622 int i;
623 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800624
Laura Abbott3180a5f2012-08-03 17:31:03 -0700625 if (ION_IS_CACHED(buffer->flags))
626 pgprot = PAGE_KERNEL;
627 else
628 pgprot = pgprot_writecombine(PAGE_KERNEL);
629
630 for (i = 0; i < npages; i++) {
631 pages[i] = phys_to_page(buf->buffer +
632 i * PAGE_SIZE);
633 }
634 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
635 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800636 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800637 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700638 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800639 buffer->size);
640 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700641 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800642 buffer->size);
643 }
Olav Haugan0a852512012-01-09 10:20:55 -0800644
Olav Haugan2a5404b2012-02-01 17:51:30 -0800645 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800646 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800647 } else {
648 if (ION_IS_CACHED(buffer->flags))
649 ++cp_heap->kmap_cached_count;
650 else
651 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700652 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800653 }
Olav Haugan0a852512012-01-09 10:20:55 -0800654 }
655 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800656 return ret_value;
657}
658
659void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
660 struct ion_buffer *buffer)
661{
662 struct ion_cp_heap *cp_heap =
663 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700664 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800665
Laura Abbottcaafeea2011-12-13 11:43:10 -0800666 if (cp_heap->reusable)
667 unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700668 else if (cp_heap->cma)
669 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800670 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700671 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800672
Olav Haugan0a852512012-01-09 10:20:55 -0800673 buffer->vaddr = NULL;
674
675 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800676 if (ION_IS_CACHED(buffer->flags))
677 --cp_heap->kmap_cached_count;
678 else
679 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700680
681 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800682 ion_cp_release_region(cp_heap);
683 mutex_unlock(&cp_heap->lock);
684
685 return;
686}
687
688int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800689 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800690{
691 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800692 struct ion_cp_heap *cp_heap =
693 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700694 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800695
Olav Hauganea66e7a2012-01-23 17:30:27 -0800696 mutex_lock(&cp_heap->lock);
Mitchel Humpherys8d0a4922013-01-21 16:49:09 -0800697 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
Olav Haugan0a852512012-01-09 10:20:55 -0800698 if (ion_cp_request_region(cp_heap)) {
699 mutex_unlock(&cp_heap->lock);
700 return -EINVAL;
701 }
Olav Haugan0a852512012-01-09 10:20:55 -0800702
Laura Abbottb14ed962012-01-30 14:18:08 -0800703 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800704 vma->vm_page_prot = pgprot_writecombine(
705 vma->vm_page_prot);
706
707 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700708 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800709 vma->vm_end - vma->vm_start,
710 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800711
Laura Abbott60ae9372012-10-10 16:28:59 -0700712 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800713 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700714 } else {
715 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800716 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700717 }
718
Olav Haugan0a852512012-01-09 10:20:55 -0800719 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800720 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800721 return ret_value;
722}
723
724void ion_cp_heap_unmap_user(struct ion_heap *heap,
725 struct ion_buffer *buffer)
726{
727 struct ion_cp_heap *cp_heap =
728 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700729 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800730
731 mutex_lock(&cp_heap->lock);
732 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700733 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800734 ion_cp_release_region(cp_heap);
735 mutex_unlock(&cp_heap->lock);
736}
737
738int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
739 void *vaddr, unsigned int offset, unsigned int length,
740 unsigned int cmd)
741{
Olav Haugan85c95402012-05-30 17:32:37 -0700742 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
743 struct ion_cp_heap *cp_heap =
744 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700745 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800746
747 switch (cmd) {
748 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700749 dmac_clean_range(vaddr, vaddr + length);
750 outer_cache_op = outer_clean_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800751 break;
752 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700753 dmac_inv_range(vaddr, vaddr + length);
754 outer_cache_op = outer_inv_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800755 break;
756 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700757 dmac_flush_range(vaddr, vaddr + length);
758 outer_cache_op = outer_flush_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800759 break;
760 default:
761 return -EINVAL;
762 }
763
Olav Haugan85c95402012-05-30 17:32:37 -0700764 if (cp_heap->has_outer_cache) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700765 unsigned long pstart = buf->buffer + offset;
Olav Haugan85c95402012-05-30 17:32:37 -0700766 outer_cache_op(pstart, pstart + length);
767 }
Olav Haugan0a852512012-01-09 10:20:55 -0800768 return 0;
769}
770
Olav Haugan0671b9a2012-05-25 11:58:56 -0700771static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
772 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800773{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800774 unsigned long total_alloc;
775 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800776 unsigned long umap_count;
777 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800778 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800779 struct ion_cp_heap *cp_heap =
780 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800781
782 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800783 total_alloc = cp_heap->allocated_bytes;
784 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800785 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800786 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800787 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800788 mutex_unlock(&cp_heap->lock);
789
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800790 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
791 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800792 seq_printf(s, "umapping count: %lx\n", umap_count);
793 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800794 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Laura Abbottcaafeea2011-12-13 11:43:10 -0800795 seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800796
Olav Haugan0671b9a2012-05-25 11:58:56 -0700797 if (mem_map) {
798 unsigned long base = cp_heap->base;
799 unsigned long size = cp_heap->total_size;
800 unsigned long end = base+size;
801 unsigned long last_end = base;
802 struct rb_node *n;
803
804 seq_printf(s, "\nMemory Map\n");
805 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
806 "client", "start address", "end address",
807 "size (hex)");
808
809 for (n = rb_first(mem_map); n; n = rb_next(n)) {
810 struct mem_map_data *data =
811 rb_entry(n, struct mem_map_data, node);
812 const char *client_name = "(null)";
813
814 if (last_end < data->addr) {
815 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
816 "FREE", last_end, data->addr-1,
817 data->addr-last_end,
818 data->addr-last_end);
819 }
820
821 if (data->client_name)
822 client_name = data->client_name;
823
824 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
825 client_name, data->addr,
826 data->addr_end,
827 data->size, data->size);
828 last_end = data->addr_end+1;
829 }
830 if (last_end < end) {
831 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
832 last_end, end-1, end-last_end, end-last_end);
833 }
834 }
835
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800836 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800837}
838
Laura Abbott7e446482012-06-13 15:59:39 -0700839int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800840{
841 int ret_value;
842 struct ion_cp_heap *cp_heap =
843 container_of(heap, struct ion_cp_heap, heap);
844 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800845 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -0700846 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800847 } else {
848 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -0800849 "User space: %lu, kernel space (cached): %lu\n",
850 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800851 ret_value = -EINVAL;
852 }
853
Olav Haugan0a852512012-01-09 10:20:55 -0800854 mutex_unlock(&cp_heap->lock);
855 return ret_value;
856}
857
Laura Abbott7e446482012-06-13 15:59:39 -0700858int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800859{
860 int ret_value = 0;
861 struct ion_cp_heap *cp_heap =
862 container_of(heap, struct ion_cp_heap, heap);
863 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -0700864 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800865 mutex_unlock(&cp_heap->lock);
866 return ret_value;
867}
868
Olav Haugan8726caf2012-05-10 15:11:35 -0700869static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
Olav Haugan3450cae2012-05-14 11:36:38 -0700870 int partition, unsigned long prot)
Olav Haugan8726caf2012-05-10 15:11:35 -0700871{
872 unsigned long left_to_map = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700873 unsigned long page_size = SZ_64K;
874 int ret_value = 0;
875 unsigned long virt_addr_len = cp_heap->total_size;
876 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
877
878 /* If we are mapping into the video domain we need to map twice the
879 * size of the heap to account for prefetch issue in video core.
880 */
881 if (domain_num == cp_heap->iommu_2x_map_domain)
882 virt_addr_len <<= 1;
883
884 if (cp_heap->total_size & (SZ_64K-1)) {
885 pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
886 ret_value = -EINVAL;
887 }
888 if (cp_heap->base & (SZ_64K-1)) {
889 pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
890 ret_value = -EINVAL;
891 }
892 if (!ret_value && domain) {
893 unsigned long temp_phys = cp_heap->base;
Laura Abbottd01221b2012-05-16 17:52:49 -0700894 unsigned long temp_iova;
895
896 ret_value = msm_allocate_iova_address(domain_num, partition,
897 virt_addr_len, SZ_64K,
898 &temp_iova);
899
900 if (ret_value) {
Olav Haugan8726caf2012-05-10 15:11:35 -0700901 pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
902 __func__, domain_num, partition);
Olav Haugan8726caf2012-05-10 15:11:35 -0700903 goto out;
904 }
905 cp_heap->iommu_iova[domain_num] = temp_iova;
906
907 while (left_to_map) {
908 int ret = iommu_map(domain, temp_iova, temp_phys,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700909 page_size, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -0700910 if (ret) {
911 pr_err("%s: could not map %lx in domain %p, error: %d\n",
912 __func__, temp_iova, domain, ret);
913 ret_value = -EAGAIN;
914 goto free_iova;
915 }
916 temp_iova += page_size;
917 temp_phys += page_size;
918 left_to_map -= page_size;
919 }
920 if (domain_num == cp_heap->iommu_2x_map_domain)
921 ret_value = msm_iommu_map_extra(domain, temp_iova,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800922 cp_heap->base,
Olav Haugan8726caf2012-05-10 15:11:35 -0700923 cp_heap->total_size,
924 SZ_64K, prot);
925 if (ret_value)
926 goto free_iova;
927 } else {
928 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
929 ret_value = -ENOMEM;
930 }
931 goto out;
932
933free_iova:
934 msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
935 partition, virt_addr_len);
936out:
937 return ret_value;
938}
939
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800940static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
941 struct ion_iommu_map *data,
942 unsigned int domain_num,
943 unsigned int partition_num,
944 unsigned long align,
945 unsigned long iova_length,
946 unsigned long flags)
947{
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800948 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700949 int ret = 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800950 unsigned long extra;
Olav Haugan8726caf2012-05-10 15:11:35 -0700951 struct ion_cp_heap *cp_heap =
952 container_of(buffer->heap, struct ion_cp_heap, heap);
Olav Hauganf310cf22012-05-08 08:42:49 -0700953 int prot = IOMMU_WRITE | IOMMU_READ;
Laura Abbott60ae9372012-10-10 16:28:59 -0700954 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700955 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800956
957 data->mapped_size = iova_length;
958
959 if (!msm_use_iommu()) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700960 data->iova_addr = buf->buffer;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800961 return 0;
962 }
963
Olav Haugan8726caf2012-05-10 15:11:35 -0700964 if (cp_heap->iommu_iova[domain_num]) {
965 /* Already mapped. */
Laura Abbott60ae9372012-10-10 16:28:59 -0700966 unsigned long offset = buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -0700967 data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
968 return 0;
969 } else if (cp_heap->iommu_map_all) {
Olav Haugan3450cae2012-05-14 11:36:38 -0700970 ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -0700971 if (!ret) {
972 unsigned long offset =
Laura Abbott60ae9372012-10-10 16:28:59 -0700973 buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -0700974 data->iova_addr =
975 cp_heap->iommu_iova[domain_num] + offset;
976 cp_heap->iommu_partition[domain_num] = partition_num;
977 /*
978 clear delayed map flag so that we don't interfere
979 with this feature (we are already delaying).
980 */
981 data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
982 return 0;
983 } else {
984 cp_heap->iommu_iova[domain_num] = 0;
985 cp_heap->iommu_partition[domain_num] = 0;
986 return ret;
987 }
988 }
989
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800990 extra = iova_length - buffer->size;
991
Laura Abbottd01221b2012-05-16 17:52:49 -0700992 ret = msm_allocate_iova_address(domain_num, partition_num,
993 data->mapped_size, align,
994 &data->iova_addr);
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800995
Laura Abbottd01221b2012-05-16 17:52:49 -0700996 if (ret)
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800997 goto out;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800998
999 domain = msm_get_iommu_domain(domain_num);
1000
1001 if (!domain) {
1002 ret = -ENOMEM;
1003 goto out1;
1004 }
1005
Laura Abbottb14ed962012-01-30 14:18:08 -08001006 ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -07001007 buffer->size, prot);
1008 if (ret) {
1009 pr_err("%s: could not map %lx in domain %p\n",
1010 __func__, data->iova_addr, domain);
1011 goto out1;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001012 }
1013
Olav Haugan16cdb412012-03-27 13:02:17 -07001014 if (extra) {
1015 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08001016 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
1017 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
1018 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -07001019 if (ret)
1020 goto out2;
1021 }
Olav Haugan16cdb412012-03-27 13:02:17 -07001022 return ret;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001023
1024out2:
Olav Haugan16cdb412012-03-27 13:02:17 -07001025 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001026out1:
1027 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1028 data->mapped_size);
1029out:
1030 return ret;
1031}
1032
1033static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
1034{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001035 unsigned int domain_num;
1036 unsigned int partition_num;
1037 struct iommu_domain *domain;
Olav Haugan8726caf2012-05-10 15:11:35 -07001038 struct ion_cp_heap *cp_heap =
1039 container_of(data->buffer->heap, struct ion_cp_heap, heap);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001040
1041 if (!msm_use_iommu())
1042 return;
1043
Olav Haugan8726caf2012-05-10 15:11:35 -07001044
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001045 domain_num = iommu_map_domain(data);
Olav Haugan8726caf2012-05-10 15:11:35 -07001046
1047 /* If we are mapping everything we'll wait to unmap until everything
1048 is freed. */
1049 if (cp_heap->iommu_iova[domain_num])
1050 return;
1051
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001052 partition_num = iommu_map_partition(data);
1053
1054 domain = msm_get_iommu_domain(domain_num);
1055
1056 if (!domain) {
1057 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
1058 return;
1059 }
1060
Olav Haugan16cdb412012-03-27 13:02:17 -07001061 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001062 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1063 data->mapped_size);
1064
1065 return;
1066}
Olav Haugan0a852512012-01-09 10:20:55 -08001067
1068static struct ion_heap_ops cp_heap_ops = {
1069 .allocate = ion_cp_heap_allocate,
1070 .free = ion_cp_heap_free,
1071 .phys = ion_cp_heap_phys,
1072 .map_user = ion_cp_heap_map_user,
1073 .unmap_user = ion_cp_heap_unmap_user,
1074 .map_kernel = ion_cp_heap_map_kernel,
1075 .unmap_kernel = ion_cp_heap_unmap_kernel,
1076 .map_dma = ion_cp_heap_map_dma,
1077 .unmap_dma = ion_cp_heap_unmap_dma,
1078 .cache_op = ion_cp_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -08001079 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -08001080 .secure_heap = ion_cp_secure_heap,
1081 .unsecure_heap = ion_cp_unsecure_heap,
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001082 .map_iommu = ion_cp_heap_map_iommu,
1083 .unmap_iommu = ion_cp_heap_unmap_iommu,
Laura Abbott93619302012-10-11 11:51:40 -07001084 .secure_buffer = ion_cp_secure_buffer,
1085 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -08001086};
1087
Olav Haugan0a852512012-01-09 10:20:55 -08001088struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
1089{
1090 struct ion_cp_heap *cp_heap;
1091 int ret;
1092
1093 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
1094 if (!cp_heap)
1095 return ERR_PTR(-ENOMEM);
1096
Olav Haugan0a852512012-01-09 10:20:55 -08001097 mutex_init(&cp_heap->lock);
1098
Olav Haugan0a852512012-01-09 10:20:55 -08001099
Olav Haugan0a852512012-01-09 10:20:55 -08001100 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001101 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -08001102 cp_heap->kmap_cached_count = 0;
1103 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001104 cp_heap->total_size = heap_data->size;
1105 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07001106 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -08001107 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001108 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -08001109 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -07001110 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001111 cp_heap->heap_size = heap_data->size;
1112
Laura Abbottf68983e2012-06-13 16:23:23 -07001113 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -08001114 if (heap_data->extra_data) {
1115 struct ion_cp_heap_pdata *extra_data =
1116 heap_data->extra_data;
Laura Abbottcaafeea2011-12-13 11:43:10 -08001117 cp_heap->reusable = extra_data->reusable;
1118 cp_heap->reserved_vrange = extra_data->virt_addr;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001119 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -08001120 if (extra_data->secure_size) {
1121 cp_heap->secure_base = extra_data->secure_base;
1122 cp_heap->secure_size = extra_data->secure_size;
1123 }
Olav Haugan0703dbf2011-12-19 17:53:38 -08001124 if (extra_data->setup_region)
1125 cp_heap->bus_id = extra_data->setup_region();
1126 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001127 cp_heap->heap_request_region =
1128 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001129 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001130 cp_heap->heap_release_region =
1131 extra_data->release_region;
Olav Haugan8726caf2012-05-10 15:11:35 -07001132 cp_heap->iommu_map_all =
1133 extra_data->iommu_map_all;
1134 cp_heap->iommu_2x_map_domain =
1135 extra_data->iommu_2x_map_domain;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001136 cp_heap->cma = extra_data->is_cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -08001137 cp_heap->allow_non_secure_allocation =
1138 extra_data->allow_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -07001139
Olav Haugan0703dbf2011-12-19 17:53:38 -08001140 }
Olav Haugan8726caf2012-05-10 15:11:35 -07001141
Laura Abbott3180a5f2012-08-03 17:31:03 -07001142 if (cp_heap->cma) {
1143 cp_heap->pool = NULL;
1144 cp_heap->cpu_addr = 0;
1145 cp_heap->heap.priv = heap_data->priv;
1146 } else {
1147 cp_heap->pool = gen_pool_create(12, -1);
1148 if (!cp_heap->pool)
1149 goto free_heap;
1150
1151 cp_heap->base = heap_data->base;
1152 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
1153 heap_data->size, -1);
1154 if (ret < 0)
1155 goto destroy_pool;
1156
1157 }
Olav Haugan0a852512012-01-09 10:20:55 -08001158 return &cp_heap->heap;
1159
1160destroy_pool:
1161 gen_pool_destroy(cp_heap->pool);
1162
1163free_heap:
1164 kfree(cp_heap);
1165
1166 return ERR_PTR(-ENOMEM);
1167}
1168
1169void ion_cp_heap_destroy(struct ion_heap *heap)
1170{
1171 struct ion_cp_heap *cp_heap =
1172 container_of(heap, struct ion_cp_heap, heap);
1173
1174 gen_pool_destroy(cp_heap->pool);
1175 kfree(cp_heap);
1176 cp_heap = NULL;
1177}
1178
Olav Haugan0671b9a2012-05-25 11:58:56 -07001179void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
1180 unsigned long *size) \
1181{
1182 struct ion_cp_heap *cp_heap =
1183 container_of(heap, struct ion_cp_heap, heap);
1184 *base = cp_heap->base;
1185 *size = cp_heap->total_size;
1186}
Olav Haugan0a852512012-01-09 10:20:55 -08001187
Olav Haugan0a852512012-01-09 10:20:55 -08001188