blob: a7473e2568e60fad4d5b00874a8b95345e8ab678 [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott7db4e0b2013-01-03 14:20:16 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Laura Abbottcaafeea2011-12-13 11:43:10 -080029#include <linux/fmem.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080030#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070031#include <linux/dma-mapping.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080032#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070033
34#include <asm/mach/map.h>
35
Olav Haugan0a852512012-01-09 10:20:55 -080036#include <mach/msm_memtypes.h>
37#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080038#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039
40#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080041
42#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070043#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080044
Laura Abbott7e446482012-06-13 15:59:39 -070045#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080046/**
47 * struct ion_cp_heap - container for the heap and shared heap data
48
49 * @heap: the heap information structure
50 * @pool: memory pool to allocate from.
51 * @base: the base address of the memory pool.
52 * @permission_type: Identifier for the memory used by SCM for protecting
53 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080054 * @secure_base: Base address used when securing a heap that is shared.
55 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080056 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080057 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080058 * @allocated_bytes: the total number of allocated bytes from the pool.
59 * @total_size: the total size of the memory pool.
60 * @request_region: function pointer to call when first mapping of memory
61 * occurs.
62 * @release_region: function pointer to call when last mapping of memory
63 * unmapped.
64 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080065 * @kmap_cached_count: the total number of times this heap has been mapped in
66 * kernel space (cached).
67 * @kmap_uncached_count:the total number of times this heap has been mapped in
68 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080069 * @umap_count: the total number of times this heap has been mapped in
70 * user space.
Olav Haugan8726caf2012-05-10 15:11:35 -070071 * @iommu_iova: saved iova when mapping full heap at once.
72 * @iommu_partition: partition used to map full heap.
Laura Abbottcaafeea2011-12-13 11:43:10 -080073 * @reusable: indicates if the memory should be reused via fmem.
74 * @reserved_vrange: reserved virtual address range for use with fmem
Olav Haugan8726caf2012-05-10 15:11:35 -070075 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
76 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
Olav Haugan85c95402012-05-30 17:32:37 -070077 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
78*/
Olav Haugan0a852512012-01-09 10:20:55 -080079struct ion_cp_heap {
80 struct ion_heap heap;
81 struct gen_pool *pool;
82 ion_phys_addr_t base;
83 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080084 ion_phys_addr_t secure_base;
85 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080086 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080087 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080088 unsigned long allocated_bytes;
89 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070090 int (*heap_request_region)(void *);
91 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080092 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080093 unsigned long kmap_cached_count;
94 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080095 unsigned long umap_count;
Olav Haugan8726caf2012-05-10 15:11:35 -070096 unsigned long iommu_iova[MAX_DOMAINS];
97 unsigned long iommu_partition[MAX_DOMAINS];
Laura Abbottcaafeea2011-12-13 11:43:10 -080098 int reusable;
99 void *reserved_vrange;
Olav Haugan8726caf2012-05-10 15:11:35 -0700100 int iommu_map_all;
101 int iommu_2x_map_domain;
Olav Haugan85c95402012-05-30 17:32:37 -0700102 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -0700103 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700104 void *cpu_addr;
105 size_t heap_size;
106 dma_addr_t handle;
107 int cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800108 int allow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -0800109};
110
111enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800112 HEAP_NOT_PROTECTED = 0,
113 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800114};
115
Laura Abbott3180a5f2012-08-03 17:31:03 -0700116#define DMA_ALLOC_TRIES 5
117
Laura Abbott3180a5f2012-08-03 17:31:03 -0700118static int allocate_heap_memory(struct ion_heap *heap)
119{
120 struct device *dev = heap->priv;
121 struct ion_cp_heap *cp_heap =
122 container_of(heap, struct ion_cp_heap, heap);
123 int ret;
124 int tries = 0;
125 DEFINE_DMA_ATTRS(attrs);
126 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
127
128
129 if (cp_heap->cpu_addr)
130 return 0;
131
132 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
133 cp_heap->cpu_addr = dma_alloc_attrs(dev,
134 cp_heap->heap_size,
135 &(cp_heap->handle),
136 0,
137 &attrs);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800138 if (!cp_heap->cpu_addr) {
139 trace_ion_cp_alloc_retry(tries);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700140 msleep(20);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800141 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700142 }
143
144 if (!cp_heap->cpu_addr)
145 goto out;
146
147 cp_heap->base = cp_heap->handle;
148
149 cp_heap->pool = gen_pool_create(12, -1);
150 if (!cp_heap->pool)
151 goto out_free;
152
153 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
154 cp_heap->heap_size, -1);
155 if (ret < 0)
156 goto out_pool;
157
158 return 0;
159
160out_pool:
161 gen_pool_destroy(cp_heap->pool);
162out_free:
163 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
164 cp_heap->handle);
165out:
166 return ION_CP_ALLOCATE_FAIL;
167}
168
169static void free_heap_memory(struct ion_heap *heap)
170{
171 struct device *dev = heap->priv;
172 struct ion_cp_heap *cp_heap =
173 container_of(heap, struct ion_cp_heap, heap);
174
175 /* release memory */
176 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
177 cp_heap->handle);
178 gen_pool_destroy(cp_heap->pool);
179 cp_heap->pool = NULL;
180 cp_heap->cpu_addr = 0;
181}
182
183
184
Olav Haugan2a5404b2012-02-01 17:51:30 -0800185/**
186 * Get the total number of kernel mappings.
187 * Must be called with heap->lock locked.
188 */
189static unsigned long ion_cp_get_total_kmap_count(
190 const struct ion_cp_heap *cp_heap)
191{
192 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
193}
Olav Haugan0a852512012-01-09 10:20:55 -0800194
Laura Abbott94ad25e2012-08-03 13:56:21 -0700195static int ion_on_first_alloc(struct ion_heap *heap)
196{
197 struct ion_cp_heap *cp_heap =
198 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700199 int ret_value;
200
201 if (cp_heap->reusable) {
202 ret_value = fmem_set_state(FMEM_C_STATE);
203 if (ret_value)
204 return 1;
205 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700206
207 if (cp_heap->cma) {
208 ret_value = allocate_heap_memory(heap);
209 if (ret_value)
210 return 1;
211 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700212 return 0;
213}
214
215static void ion_on_last_free(struct ion_heap *heap)
216{
217 struct ion_cp_heap *cp_heap =
218 container_of(heap, struct ion_cp_heap, heap);
219
220 if (cp_heap->reusable)
221 if (fmem_set_state(FMEM_T_STATE) != 0)
222 pr_err("%s: unable to transition heap to T-state\n",
223 __func__);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700224
225 if (cp_heap->cma)
226 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700227}
228
Olav Haugan0a852512012-01-09 10:20:55 -0800229/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800230 * Protects memory if heap is unsecured heap. Also ensures that we are in
231 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800232 * Must be called with heap->lock locked.
233 */
Laura Abbott7e446482012-06-13 15:59:39 -0700234static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800235{
236 struct ion_cp_heap *cp_heap =
237 container_of(heap, struct ion_cp_heap, heap);
238 int ret_value = 0;
239
Laura Abbottf68983e2012-06-13 16:23:23 -0700240 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800241 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700242 if (!cp_heap->allocated_bytes)
243 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800244 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800245
Olav Hauganea66e7a2012-01-23 17:30:27 -0800246 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700247 cp_heap->secure_size, cp_heap->permission_type,
248 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800249 if (ret_value) {
250 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800251 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800252
Laura Abbott94ad25e2012-08-03 13:56:21 -0700253 if (!cp_heap->allocated_bytes)
254 ion_on_last_free(heap);
255
Laura Abbottf68983e2012-06-13 16:23:23 -0700256 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800257 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800258 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbott1135c9e2013-03-13 15:33:40 -0700259 pr_debug("Protected heap %s @ 0x%pa\n",
260 heap->name, &cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800261 }
262 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800263out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700264 pr_debug("%s: protect count is %d\n", __func__,
265 atomic_read(&cp_heap->protect_cnt));
266 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800267 return ret_value;
268}
269
270/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800271 * Unprotects memory if heap is secure heap. Also ensures that we are in
272 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800273 * Must be called with heap->lock locked.
274 */
Laura Abbott7e446482012-06-13 15:59:39 -0700275static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800276{
277 struct ion_cp_heap *cp_heap =
278 container_of(heap, struct ion_cp_heap, heap);
279
Laura Abbottf68983e2012-06-13 16:23:23 -0700280 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800281 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800282 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700283 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800284 if (error_code) {
285 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800286 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800287 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800288 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800289 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800290 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800291
Laura Abbott94ad25e2012-08-03 13:56:21 -0700292 if (!cp_heap->allocated_bytes)
293 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800294 }
295 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700296 pr_debug("%s: protect count is %d\n", __func__,
297 atomic_read(&cp_heap->protect_cnt));
298 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800299}
300
301ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
302 unsigned long size,
303 unsigned long align,
304 unsigned long flags)
305{
306 unsigned long offset;
307 unsigned long secure_allocation = flags & ION_SECURE;
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800308 unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
Olav Haugan0a852512012-01-09 10:20:55 -0800309
310 struct ion_cp_heap *cp_heap =
311 container_of(heap, struct ion_cp_heap, heap);
312
313 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800314 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800315 mutex_unlock(&cp_heap->lock);
316 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800317 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800318 return ION_CP_ALLOCATE_FAIL;
319 }
320
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800321 if (!force_contig && !secure_allocation &&
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800322 !cp_heap->allow_non_secure_allocation) {
Laura Abbottac963312012-12-11 15:09:03 -0800323 mutex_unlock(&cp_heap->lock);
324 pr_debug("%s: non-secure allocation disallowed from this heap\n",
325 __func__);
326 return ION_CP_ALLOCATE_FAIL;
327 }
328
Laura Abbott087db592012-11-01 09:41:37 -0700329 /*
330 * The check above already checked for non-secure allocations when the
331 * heap is protected. HEAP_PROTECTED implies that this must be a secure
332 * allocation. If the heap is protected and there are userspace or
333 * cached kernel mappings, something has gone wrong in the security
334 * model.
335 */
336 if (cp_heap->heap_protected == HEAP_PROTECTED) {
337 BUG_ON(cp_heap->umap_count != 0);
338 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800339 }
340
Laura Abbottcaafeea2011-12-13 11:43:10 -0800341 /*
342 * if this is the first reusable allocation, transition
343 * the heap
344 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700345 if (!cp_heap->allocated_bytes)
346 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800347 mutex_unlock(&cp_heap->lock);
348 return ION_RESERVED_ALLOCATE_FAIL;
349 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800350
Olav Haugan0a852512012-01-09 10:20:55 -0800351 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800352 mutex_unlock(&cp_heap->lock);
353
354 offset = gen_pool_alloc_aligned(cp_heap->pool,
355 size, ilog2(align));
356
357 if (!offset) {
358 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700359 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800360 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700361 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800362 pr_debug("%s: heap %s has enough memory (%lx) but"
363 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800364 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800365 __func__, heap->name,
366 cp_heap->total_size -
367 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700368 if (!cp_heap->allocated_bytes &&
369 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
370 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800371 mutex_unlock(&cp_heap->lock);
372
373 return ION_CP_ALLOCATE_FAIL;
374 }
375
376 return offset;
377}
378
Olav Haugan8726caf2012-05-10 15:11:35 -0700379static void iommu_unmap_all(unsigned long domain_num,
380 struct ion_cp_heap *cp_heap)
381{
382 unsigned long left_to_unmap = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700383 unsigned long page_size = SZ_64K;
384
385 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
386 if (domain) {
387 unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
388
389 while (left_to_unmap) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700390 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700391 temp_iova += page_size;
392 left_to_unmap -= page_size;
393 }
394 if (domain_num == cp_heap->iommu_2x_map_domain)
395 msm_iommu_unmap_extra(domain, temp_iova,
396 cp_heap->total_size, SZ_64K);
397 } else {
398 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
399 }
400}
401
Olav Haugan0a852512012-01-09 10:20:55 -0800402void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
403 unsigned long size)
404{
405 struct ion_cp_heap *cp_heap =
406 container_of(heap, struct ion_cp_heap, heap);
407
408 if (addr == ION_CP_ALLOCATE_FAIL)
409 return;
410 gen_pool_free(cp_heap->pool, addr, size);
411
412 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800413 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800414
Laura Abbott94ad25e2012-08-03 13:56:21 -0700415 if (!cp_heap->allocated_bytes &&
416 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
417 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700418
419 /* Unmap everything if we previously mapped the whole heap at once. */
420 if (!cp_heap->allocated_bytes) {
421 unsigned int i;
422 for (i = 0; i < MAX_DOMAINS; ++i) {
423 if (cp_heap->iommu_iova[i]) {
424 unsigned long vaddr_len = cp_heap->total_size;
425
426 if (i == cp_heap->iommu_2x_map_domain)
427 vaddr_len <<= 1;
428 iommu_unmap_all(i, cp_heap);
429
430 msm_free_iova_address(cp_heap->iommu_iova[i], i,
431 cp_heap->iommu_partition[i],
432 vaddr_len);
433 }
434 cp_heap->iommu_iova[i] = 0;
435 cp_heap->iommu_partition[i] = 0;
436 }
437 }
Olav Haugan0a852512012-01-09 10:20:55 -0800438 mutex_unlock(&cp_heap->lock);
439}
440
441static int ion_cp_heap_phys(struct ion_heap *heap,
442 struct ion_buffer *buffer,
443 ion_phys_addr_t *addr, size_t *len)
444{
Laura Abbott60ae9372012-10-10 16:28:59 -0700445 struct ion_cp_buffer *buf = buffer->priv_virt;
446
447 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 *len = buffer->size;
449 return 0;
450}
451
452static int ion_cp_heap_allocate(struct ion_heap *heap,
453 struct ion_buffer *buffer,
454 unsigned long size, unsigned long align,
455 unsigned long flags)
456{
Laura Abbott60ae9372012-10-10 16:28:59 -0700457 struct ion_cp_buffer *buf;
458 phys_addr_t addr;
459
460 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
461 if (!buf)
462 return ION_CP_ALLOCATE_FAIL;
463
464 addr = ion_cp_allocate(heap, size, align, flags);
465 if (addr == ION_CP_ALLOCATE_FAIL)
466 return -ENOMEM;
467
468 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700469 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700470 atomic_set(&buf->secure_cnt, 0);
471 mutex_init(&buf->lock);
472 buf->is_secure = flags & ION_SECURE ? 1 : 0;
473 buffer->priv_virt = buf;
474
475 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800476}
477
478static void ion_cp_heap_free(struct ion_buffer *buffer)
479{
480 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700481 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800482
Laura Abbott60ae9372012-10-10 16:28:59 -0700483 ion_cp_free(heap, buf->buffer, buffer->size);
484 WARN_ON(atomic_read(&buf->secure_cnt));
485 WARN_ON(atomic_read(&buf->map_cnt));
486 kfree(buf);
487
488 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800489}
490
Laura Abbottb14ed962012-01-30 14:18:08 -0800491struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800492{
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800493 size_t chunk_size = buffer->size;
Laura Abbottb14ed962012-01-30 14:18:08 -0800494 struct sg_table *table;
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800495 int ret, i, n_chunks;
496 struct scatterlist *sg;
Laura Abbott60ae9372012-10-10 16:28:59 -0700497 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800498
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800499 if (ION_IS_CACHED(buffer->flags))
500 chunk_size = PAGE_SIZE;
501 else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
502 chunk_size = SZ_1M;
503
Laura Abbottb14ed962012-01-30 14:18:08 -0800504 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
505 if (!table)
Olav Haugan0a852512012-01-09 10:20:55 -0800506 return ERR_PTR(-ENOMEM);
507
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800508 n_chunks = DIV_ROUND_UP(buffer->size, chunk_size);
Olav Haugan0a852512012-01-09 10:20:55 -0800509
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800510 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
511 if (ret)
512 goto err0;
Laura Abbotte84d77e2012-10-10 16:59:46 -0700513
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800514 for_each_sg(table->sgl, sg, table->nents, i) {
515 sg_dma_address(sg) = buf->buffer + i * chunk_size;
516 sg->length = chunk_size;
517 sg->offset = 0;
Laura Abbotte84d77e2012-10-10 16:59:46 -0700518 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800519
520 return table;
521err0:
522 kfree(table);
523 return ERR_PTR(ret);
Olav Haugan0a852512012-01-09 10:20:55 -0800524}
525
Laura Abbottb14ed962012-01-30 14:18:08 -0800526struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700527 struct ion_buffer *buffer)
528{
Laura Abbottb14ed962012-01-30 14:18:08 -0800529 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700530}
531
Olav Haugan0a852512012-01-09 10:20:55 -0800532void ion_cp_heap_unmap_dma(struct ion_heap *heap,
533 struct ion_buffer *buffer)
534{
Laura Abbottb14ed962012-01-30 14:18:08 -0800535 if (buffer->sg_table)
536 sg_free_table(buffer->sg_table);
537 kfree(buffer->sg_table);
538 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800539}
540
541/**
542 * Call request region for SMI memory of this is the first mapping.
543 */
544static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
545{
546 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800547 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700548 if (cp_heap->heap_request_region)
549 ret_value = cp_heap->heap_request_region(
550 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800551 return ret_value;
552}
553
554/**
555 * Call release region for SMI memory of this is the last un-mapping.
556 */
557static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
558{
559 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800560 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700561 if (cp_heap->heap_release_region)
562 ret_value = cp_heap->heap_release_region(
563 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800564 return ret_value;
565}
566
Laura Abbottcaafeea2011-12-13 11:43:10 -0800567void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
568 void *virt_base, unsigned long flags)
569{
570 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700571 struct ion_cp_buffer *buf = buffer->priv_virt;
572 unsigned int offset = buf->buffer - phys_base;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800573 unsigned long start = ((unsigned long)virt_base) + offset;
574 const struct mem_type *type = ION_IS_CACHED(flags) ?
575 get_mem_type(MT_DEVICE_CACHED) :
576 get_mem_type(MT_DEVICE);
577
Laura Abbott60ae9372012-10-10 16:28:59 -0700578 if (phys_base > buf->buffer)
Laura Abbottcaafeea2011-12-13 11:43:10 -0800579 return NULL;
580
581
Laura Abbott60ae9372012-10-10 16:28:59 -0700582 ret = ioremap_pages(start, buf->buffer, buffer->size, type);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800583
584 if (!ret)
585 return (void *)start;
586 else
587 return NULL;
588}
589
Laura Abbottb14ed962012-01-30 14:18:08 -0800590void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800591{
592 struct ion_cp_heap *cp_heap =
593 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800594 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700595 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800596
597 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800598 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
599 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800600 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800601
Olav Hauganea66e7a2012-01-23 17:30:27 -0800602 if (ion_cp_request_region(cp_heap)) {
603 mutex_unlock(&cp_heap->lock);
604 return NULL;
605 }
Olav Haugan0a852512012-01-09 10:20:55 -0800606
Laura Abbottcaafeea2011-12-13 11:43:10 -0800607 if (cp_heap->reusable) {
608 ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
Laura Abbottb14ed962012-01-30 14:18:08 -0800609 cp_heap->reserved_vrange, buffer->flags);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700610 } else if (cp_heap->cma) {
611 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
612 struct page **pages = vmalloc(
613 sizeof(struct page *) * npages);
614 int i;
615 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800616
Laura Abbott3180a5f2012-08-03 17:31:03 -0700617 if (ION_IS_CACHED(buffer->flags))
618 pgprot = PAGE_KERNEL;
619 else
620 pgprot = pgprot_writecombine(PAGE_KERNEL);
621
622 for (i = 0; i < npages; i++) {
623 pages[i] = phys_to_page(buf->buffer +
624 i * PAGE_SIZE);
625 }
626 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
627 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800628 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800629 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700630 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800631 buffer->size);
632 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700633 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800634 buffer->size);
635 }
Olav Haugan0a852512012-01-09 10:20:55 -0800636
Olav Haugan2a5404b2012-02-01 17:51:30 -0800637 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800638 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800639 } else {
640 if (ION_IS_CACHED(buffer->flags))
641 ++cp_heap->kmap_cached_count;
642 else
643 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700644 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800645 }
Olav Haugan0a852512012-01-09 10:20:55 -0800646 }
647 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800648 return ret_value;
649}
650
651void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
652 struct ion_buffer *buffer)
653{
654 struct ion_cp_heap *cp_heap =
655 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700656 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800657
Laura Abbottcaafeea2011-12-13 11:43:10 -0800658 if (cp_heap->reusable)
659 unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700660 else if (cp_heap->cma)
661 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800662 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700663 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800664
Olav Haugan0a852512012-01-09 10:20:55 -0800665 buffer->vaddr = NULL;
666
667 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800668 if (ION_IS_CACHED(buffer->flags))
669 --cp_heap->kmap_cached_count;
670 else
671 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700672
673 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800674 ion_cp_release_region(cp_heap);
675 mutex_unlock(&cp_heap->lock);
676
677 return;
678}
679
680int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800681 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800682{
683 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800684 struct ion_cp_heap *cp_heap =
685 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700686 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800687
Olav Hauganea66e7a2012-01-23 17:30:27 -0800688 mutex_lock(&cp_heap->lock);
Mitchel Humpherys8d0a4922013-01-21 16:49:09 -0800689 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
Olav Haugan0a852512012-01-09 10:20:55 -0800690 if (ion_cp_request_region(cp_heap)) {
691 mutex_unlock(&cp_heap->lock);
692 return -EINVAL;
693 }
Olav Haugan0a852512012-01-09 10:20:55 -0800694
Laura Abbottb14ed962012-01-30 14:18:08 -0800695 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800696 vma->vm_page_prot = pgprot_writecombine(
697 vma->vm_page_prot);
698
699 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700700 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800701 vma->vm_end - vma->vm_start,
702 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800703
Laura Abbott60ae9372012-10-10 16:28:59 -0700704 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800705 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700706 } else {
707 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800708 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700709 }
710
Olav Haugan0a852512012-01-09 10:20:55 -0800711 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800712 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800713 return ret_value;
714}
715
716void ion_cp_heap_unmap_user(struct ion_heap *heap,
717 struct ion_buffer *buffer)
718{
719 struct ion_cp_heap *cp_heap =
720 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700721 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800722
723 mutex_lock(&cp_heap->lock);
724 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700725 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800726 ion_cp_release_region(cp_heap);
727 mutex_unlock(&cp_heap->lock);
728}
729
730int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
731 void *vaddr, unsigned int offset, unsigned int length,
732 unsigned int cmd)
733{
Olav Haugan85c95402012-05-30 17:32:37 -0700734 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
735 struct ion_cp_heap *cp_heap =
736 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700737 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800738
739 switch (cmd) {
740 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700741 dmac_clean_range(vaddr, vaddr + length);
742 outer_cache_op = outer_clean_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800743 break;
744 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700745 dmac_inv_range(vaddr, vaddr + length);
746 outer_cache_op = outer_inv_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800747 break;
748 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700749 dmac_flush_range(vaddr, vaddr + length);
750 outer_cache_op = outer_flush_range;
Olav Haugan0a852512012-01-09 10:20:55 -0800751 break;
752 default:
753 return -EINVAL;
754 }
755
Olav Haugan85c95402012-05-30 17:32:37 -0700756 if (cp_heap->has_outer_cache) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700757 unsigned long pstart = buf->buffer + offset;
Olav Haugan85c95402012-05-30 17:32:37 -0700758 outer_cache_op(pstart, pstart + length);
759 }
Olav Haugan0a852512012-01-09 10:20:55 -0800760 return 0;
761}
762
Olav Haugan0671b9a2012-05-25 11:58:56 -0700763static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
764 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800765{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800766 unsigned long total_alloc;
767 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800768 unsigned long umap_count;
769 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800770 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800771 struct ion_cp_heap *cp_heap =
772 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800773
774 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800775 total_alloc = cp_heap->allocated_bytes;
776 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800777 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800778 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800779 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800780 mutex_unlock(&cp_heap->lock);
781
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800782 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
783 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800784 seq_printf(s, "umapping count: %lx\n", umap_count);
785 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800786 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Laura Abbottcaafeea2011-12-13 11:43:10 -0800787 seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800788
Olav Haugan0671b9a2012-05-25 11:58:56 -0700789 if (mem_map) {
790 unsigned long base = cp_heap->base;
791 unsigned long size = cp_heap->total_size;
792 unsigned long end = base+size;
793 unsigned long last_end = base;
794 struct rb_node *n;
795
796 seq_printf(s, "\nMemory Map\n");
797 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
798 "client", "start address", "end address",
799 "size (hex)");
800
801 for (n = rb_first(mem_map); n; n = rb_next(n)) {
802 struct mem_map_data *data =
803 rb_entry(n, struct mem_map_data, node);
804 const char *client_name = "(null)";
805
806 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700807 phys_addr_t da;
808
809 da = data->addr-1;
810 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
811 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700812 data->addr-last_end,
813 data->addr-last_end);
814 }
815
816 if (data->client_name)
817 client_name = data->client_name;
818
Laura Abbott1135c9e2013-03-13 15:33:40 -0700819 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
820 client_name, &data->addr,
821 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700822 data->size, data->size);
823 last_end = data->addr_end+1;
824 }
825 if (last_end < end) {
826 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
827 last_end, end-1, end-last_end, end-last_end);
828 }
829 }
830
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800831 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800832}
833
Laura Abbott7e446482012-06-13 15:59:39 -0700834int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800835{
836 int ret_value;
837 struct ion_cp_heap *cp_heap =
838 container_of(heap, struct ion_cp_heap, heap);
839 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800840 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -0700841 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800842 } else {
843 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -0800844 "User space: %lu, kernel space (cached): %lu\n",
845 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800846 ret_value = -EINVAL;
847 }
848
Olav Haugan0a852512012-01-09 10:20:55 -0800849 mutex_unlock(&cp_heap->lock);
850 return ret_value;
851}
852
Laura Abbott7e446482012-06-13 15:59:39 -0700853int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800854{
855 int ret_value = 0;
856 struct ion_cp_heap *cp_heap =
857 container_of(heap, struct ion_cp_heap, heap);
858 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -0700859 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800860 mutex_unlock(&cp_heap->lock);
861 return ret_value;
862}
863
Olav Haugan8726caf2012-05-10 15:11:35 -0700864static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
Olav Haugan3450cae2012-05-14 11:36:38 -0700865 int partition, unsigned long prot)
Olav Haugan8726caf2012-05-10 15:11:35 -0700866{
867 unsigned long left_to_map = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700868 unsigned long page_size = SZ_64K;
869 int ret_value = 0;
870 unsigned long virt_addr_len = cp_heap->total_size;
871 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
872
873 /* If we are mapping into the video domain we need to map twice the
874 * size of the heap to account for prefetch issue in video core.
875 */
876 if (domain_num == cp_heap->iommu_2x_map_domain)
877 virt_addr_len <<= 1;
878
879 if (cp_heap->total_size & (SZ_64K-1)) {
880 pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
881 ret_value = -EINVAL;
882 }
883 if (cp_heap->base & (SZ_64K-1)) {
884 pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
885 ret_value = -EINVAL;
886 }
887 if (!ret_value && domain) {
888 unsigned long temp_phys = cp_heap->base;
Laura Abbottd01221b2012-05-16 17:52:49 -0700889 unsigned long temp_iova;
890
891 ret_value = msm_allocate_iova_address(domain_num, partition,
892 virt_addr_len, SZ_64K,
893 &temp_iova);
894
895 if (ret_value) {
Olav Haugan8726caf2012-05-10 15:11:35 -0700896 pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
897 __func__, domain_num, partition);
Olav Haugan8726caf2012-05-10 15:11:35 -0700898 goto out;
899 }
900 cp_heap->iommu_iova[domain_num] = temp_iova;
901
902 while (left_to_map) {
903 int ret = iommu_map(domain, temp_iova, temp_phys,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700904 page_size, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -0700905 if (ret) {
906 pr_err("%s: could not map %lx in domain %p, error: %d\n",
907 __func__, temp_iova, domain, ret);
908 ret_value = -EAGAIN;
909 goto free_iova;
910 }
911 temp_iova += page_size;
912 temp_phys += page_size;
913 left_to_map -= page_size;
914 }
915 if (domain_num == cp_heap->iommu_2x_map_domain)
916 ret_value = msm_iommu_map_extra(domain, temp_iova,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800917 cp_heap->base,
Olav Haugan8726caf2012-05-10 15:11:35 -0700918 cp_heap->total_size,
919 SZ_64K, prot);
920 if (ret_value)
921 goto free_iova;
922 } else {
923 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
924 ret_value = -ENOMEM;
925 }
926 goto out;
927
928free_iova:
929 msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
930 partition, virt_addr_len);
931out:
932 return ret_value;
933}
934
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800935static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
936 struct ion_iommu_map *data,
937 unsigned int domain_num,
938 unsigned int partition_num,
939 unsigned long align,
940 unsigned long iova_length,
941 unsigned long flags)
942{
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800943 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700944 int ret = 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800945 unsigned long extra;
Olav Haugan8726caf2012-05-10 15:11:35 -0700946 struct ion_cp_heap *cp_heap =
947 container_of(buffer->heap, struct ion_cp_heap, heap);
Olav Hauganf310cf22012-05-08 08:42:49 -0700948 int prot = IOMMU_WRITE | IOMMU_READ;
Laura Abbott60ae9372012-10-10 16:28:59 -0700949 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700950 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800951
952 data->mapped_size = iova_length;
953
954 if (!msm_use_iommu()) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700955 data->iova_addr = buf->buffer;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800956 return 0;
957 }
958
Olav Haugan8726caf2012-05-10 15:11:35 -0700959 if (cp_heap->iommu_iova[domain_num]) {
960 /* Already mapped. */
Laura Abbott60ae9372012-10-10 16:28:59 -0700961 unsigned long offset = buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -0700962 data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
963 return 0;
964 } else if (cp_heap->iommu_map_all) {
Olav Haugan3450cae2012-05-14 11:36:38 -0700965 ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -0700966 if (!ret) {
967 unsigned long offset =
Laura Abbott60ae9372012-10-10 16:28:59 -0700968 buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -0700969 data->iova_addr =
970 cp_heap->iommu_iova[domain_num] + offset;
971 cp_heap->iommu_partition[domain_num] = partition_num;
972 /*
973 clear delayed map flag so that we don't interfere
974 with this feature (we are already delaying).
975 */
976 data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
977 return 0;
978 } else {
979 cp_heap->iommu_iova[domain_num] = 0;
980 cp_heap->iommu_partition[domain_num] = 0;
981 return ret;
982 }
983 }
984
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800985 extra = iova_length - buffer->size;
986
Laura Abbottd01221b2012-05-16 17:52:49 -0700987 ret = msm_allocate_iova_address(domain_num, partition_num,
988 data->mapped_size, align,
989 &data->iova_addr);
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800990
Laura Abbottd01221b2012-05-16 17:52:49 -0700991 if (ret)
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800992 goto out;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800993
994 domain = msm_get_iommu_domain(domain_num);
995
996 if (!domain) {
997 ret = -ENOMEM;
998 goto out1;
999 }
1000
Laura Abbottb14ed962012-01-30 14:18:08 -08001001 ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -07001002 buffer->size, prot);
1003 if (ret) {
1004 pr_err("%s: could not map %lx in domain %p\n",
1005 __func__, data->iova_addr, domain);
1006 goto out1;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001007 }
1008
Olav Haugan16cdb412012-03-27 13:02:17 -07001009 if (extra) {
1010 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08001011 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
1012 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
1013 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -07001014 if (ret)
1015 goto out2;
1016 }
Olav Haugan16cdb412012-03-27 13:02:17 -07001017 return ret;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001018
1019out2:
Olav Haugan16cdb412012-03-27 13:02:17 -07001020 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001021out1:
1022 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1023 data->mapped_size);
1024out:
1025 return ret;
1026}
1027
1028static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
1029{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001030 unsigned int domain_num;
1031 unsigned int partition_num;
1032 struct iommu_domain *domain;
Olav Haugan8726caf2012-05-10 15:11:35 -07001033 struct ion_cp_heap *cp_heap =
1034 container_of(data->buffer->heap, struct ion_cp_heap, heap);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001035
1036 if (!msm_use_iommu())
1037 return;
1038
Olav Haugan8726caf2012-05-10 15:11:35 -07001039
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001040 domain_num = iommu_map_domain(data);
Olav Haugan8726caf2012-05-10 15:11:35 -07001041
1042 /* If we are mapping everything we'll wait to unmap until everything
1043 is freed. */
1044 if (cp_heap->iommu_iova[domain_num])
1045 return;
1046
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001047 partition_num = iommu_map_partition(data);
1048
1049 domain = msm_get_iommu_domain(domain_num);
1050
1051 if (!domain) {
1052 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
1053 return;
1054 }
1055
Olav Haugan16cdb412012-03-27 13:02:17 -07001056 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001057 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1058 data->mapped_size);
1059
1060 return;
1061}
Olav Haugan0a852512012-01-09 10:20:55 -08001062
1063static struct ion_heap_ops cp_heap_ops = {
1064 .allocate = ion_cp_heap_allocate,
1065 .free = ion_cp_heap_free,
1066 .phys = ion_cp_heap_phys,
1067 .map_user = ion_cp_heap_map_user,
1068 .unmap_user = ion_cp_heap_unmap_user,
1069 .map_kernel = ion_cp_heap_map_kernel,
1070 .unmap_kernel = ion_cp_heap_unmap_kernel,
1071 .map_dma = ion_cp_heap_map_dma,
1072 .unmap_dma = ion_cp_heap_unmap_dma,
1073 .cache_op = ion_cp_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -08001074 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -08001075 .secure_heap = ion_cp_secure_heap,
1076 .unsecure_heap = ion_cp_unsecure_heap,
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001077 .map_iommu = ion_cp_heap_map_iommu,
1078 .unmap_iommu = ion_cp_heap_unmap_iommu,
Laura Abbott93619302012-10-11 11:51:40 -07001079 .secure_buffer = ion_cp_secure_buffer,
1080 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -08001081};
1082
Olav Haugan0a852512012-01-09 10:20:55 -08001083struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
1084{
1085 struct ion_cp_heap *cp_heap;
1086 int ret;
1087
1088 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
1089 if (!cp_heap)
1090 return ERR_PTR(-ENOMEM);
1091
Olav Haugan0a852512012-01-09 10:20:55 -08001092 mutex_init(&cp_heap->lock);
1093
Olav Haugan0a852512012-01-09 10:20:55 -08001094
Olav Haugan0a852512012-01-09 10:20:55 -08001095 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001096 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -08001097 cp_heap->kmap_cached_count = 0;
1098 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001099 cp_heap->total_size = heap_data->size;
1100 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07001101 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -08001102 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001103 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -08001104 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -07001105 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001106 cp_heap->heap_size = heap_data->size;
1107
Laura Abbottf68983e2012-06-13 16:23:23 -07001108 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -08001109 if (heap_data->extra_data) {
1110 struct ion_cp_heap_pdata *extra_data =
1111 heap_data->extra_data;
Laura Abbottcaafeea2011-12-13 11:43:10 -08001112 cp_heap->reusable = extra_data->reusable;
1113 cp_heap->reserved_vrange = extra_data->virt_addr;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001114 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -08001115 if (extra_data->secure_size) {
1116 cp_heap->secure_base = extra_data->secure_base;
1117 cp_heap->secure_size = extra_data->secure_size;
1118 }
Olav Haugan0703dbf2011-12-19 17:53:38 -08001119 if (extra_data->setup_region)
1120 cp_heap->bus_id = extra_data->setup_region();
1121 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001122 cp_heap->heap_request_region =
1123 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001124 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001125 cp_heap->heap_release_region =
1126 extra_data->release_region;
Olav Haugan8726caf2012-05-10 15:11:35 -07001127 cp_heap->iommu_map_all =
1128 extra_data->iommu_map_all;
1129 cp_heap->iommu_2x_map_domain =
1130 extra_data->iommu_2x_map_domain;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001131 cp_heap->cma = extra_data->is_cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -08001132 cp_heap->allow_non_secure_allocation =
1133 extra_data->allow_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -07001134
Olav Haugan0703dbf2011-12-19 17:53:38 -08001135 }
Olav Haugan8726caf2012-05-10 15:11:35 -07001136
Laura Abbott3180a5f2012-08-03 17:31:03 -07001137 if (cp_heap->cma) {
1138 cp_heap->pool = NULL;
1139 cp_heap->cpu_addr = 0;
1140 cp_heap->heap.priv = heap_data->priv;
1141 } else {
1142 cp_heap->pool = gen_pool_create(12, -1);
1143 if (!cp_heap->pool)
1144 goto free_heap;
1145
1146 cp_heap->base = heap_data->base;
1147 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
1148 heap_data->size, -1);
1149 if (ret < 0)
1150 goto destroy_pool;
1151
1152 }
Olav Haugan0a852512012-01-09 10:20:55 -08001153 return &cp_heap->heap;
1154
1155destroy_pool:
1156 gen_pool_destroy(cp_heap->pool);
1157
1158free_heap:
1159 kfree(cp_heap);
1160
1161 return ERR_PTR(-ENOMEM);
1162}
1163
1164void ion_cp_heap_destroy(struct ion_heap *heap)
1165{
1166 struct ion_cp_heap *cp_heap =
1167 container_of(heap, struct ion_cp_heap, heap);
1168
1169 gen_pool_destroy(cp_heap->pool);
1170 kfree(cp_heap);
1171 cp_heap = NULL;
1172}
1173
Olav Haugan0671b9a2012-05-25 11:58:56 -07001174void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
1175 unsigned long *size) \
1176{
1177 struct ion_cp_heap *cp_heap =
1178 container_of(heap, struct ion_cp_heap, heap);
1179 *base = cp_heap->base;
1180 *size = cp_heap->total_size;
1181}
Olav Haugan0a852512012-01-09 10:20:55 -08001182
Olav Haugan0a852512012-01-09 10:20:55 -08001183