blob: 30002521f7c922b875c801de53017310443c3783 [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott7db4e0b2013-01-03 14:20:16 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Laura Abbottcaafeea2011-12-13 11:43:10 -080029#include <linux/fmem.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080030#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070031#include <linux/dma-mapping.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080032#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070033
34#include <asm/mach/map.h>
35
Olav Haugan0a852512012-01-09 10:20:55 -080036#include <mach/msm_memtypes.h>
37#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080038#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039
40#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080041
42#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070043#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080044
Laura Abbott7e446482012-06-13 15:59:39 -070045#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080046/**
47 * struct ion_cp_heap - container for the heap and shared heap data
48
49 * @heap: the heap information structure
50 * @pool: memory pool to allocate from.
51 * @base: the base address of the memory pool.
52 * @permission_type: Identifier for the memory used by SCM for protecting
53 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080054 * @secure_base: Base address used when securing a heap that is shared.
55 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080056 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080057 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080058 * @allocated_bytes: the total number of allocated bytes from the pool.
59 * @total_size: the total size of the memory pool.
60 * @request_region: function pointer to call when first mapping of memory
61 * occurs.
62 * @release_region: function pointer to call when last mapping of memory
63 * unmapped.
64 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080065 * @kmap_cached_count: the total number of times this heap has been mapped in
66 * kernel space (cached).
67 * @kmap_uncached_count:the total number of times this heap has been mapped in
68 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080069 * @umap_count: the total number of times this heap has been mapped in
70 * user space.
Olav Haugan8726caf2012-05-10 15:11:35 -070071 * @iommu_iova: saved iova when mapping full heap at once.
72 * @iommu_partition: partition used to map full heap.
Laura Abbottcaafeea2011-12-13 11:43:10 -080073 * @reusable: indicates if the memory should be reused via fmem.
74 * @reserved_vrange: reserved virtual address range for use with fmem
Olav Haugan8726caf2012-05-10 15:11:35 -070075 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
76 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
Olav Haugan85c95402012-05-30 17:32:37 -070077 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
78*/
Olav Haugan0a852512012-01-09 10:20:55 -080079struct ion_cp_heap {
80 struct ion_heap heap;
81 struct gen_pool *pool;
82 ion_phys_addr_t base;
83 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080084 ion_phys_addr_t secure_base;
85 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080086 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080087 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080088 unsigned long allocated_bytes;
89 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070090 int (*heap_request_region)(void *);
91 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080092 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080093 unsigned long kmap_cached_count;
94 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080095 unsigned long umap_count;
Olav Haugan8726caf2012-05-10 15:11:35 -070096 unsigned long iommu_iova[MAX_DOMAINS];
97 unsigned long iommu_partition[MAX_DOMAINS];
Laura Abbottcaafeea2011-12-13 11:43:10 -080098 int reusable;
99 void *reserved_vrange;
Olav Haugan8726caf2012-05-10 15:11:35 -0700100 int iommu_map_all;
101 int iommu_2x_map_domain;
Olav Haugan85c95402012-05-30 17:32:37 -0700102 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -0700103 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700104 void *cpu_addr;
105 size_t heap_size;
106 dma_addr_t handle;
107 int cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800108 int allow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -0800109};
110
111enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800112 HEAP_NOT_PROTECTED = 0,
113 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800114};
115
Laura Abbott3180a5f2012-08-03 17:31:03 -0700116#define DMA_ALLOC_TRIES 5
117
Laura Abbott3180a5f2012-08-03 17:31:03 -0700118static int allocate_heap_memory(struct ion_heap *heap)
119{
120 struct device *dev = heap->priv;
121 struct ion_cp_heap *cp_heap =
122 container_of(heap, struct ion_cp_heap, heap);
123 int ret;
124 int tries = 0;
125 DEFINE_DMA_ATTRS(attrs);
126 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
127
128
129 if (cp_heap->cpu_addr)
130 return 0;
131
132 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
133 cp_heap->cpu_addr = dma_alloc_attrs(dev,
134 cp_heap->heap_size,
135 &(cp_heap->handle),
136 0,
137 &attrs);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800138 if (!cp_heap->cpu_addr) {
139 trace_ion_cp_alloc_retry(tries);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700140 msleep(20);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800141 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700142 }
143
144 if (!cp_heap->cpu_addr)
145 goto out;
146
147 cp_heap->base = cp_heap->handle;
148
149 cp_heap->pool = gen_pool_create(12, -1);
150 if (!cp_heap->pool)
151 goto out_free;
152
153 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
154 cp_heap->heap_size, -1);
155 if (ret < 0)
156 goto out_pool;
157
158 return 0;
159
160out_pool:
161 gen_pool_destroy(cp_heap->pool);
162out_free:
163 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
164 cp_heap->handle);
165out:
166 return ION_CP_ALLOCATE_FAIL;
167}
168
169static void free_heap_memory(struct ion_heap *heap)
170{
171 struct device *dev = heap->priv;
172 struct ion_cp_heap *cp_heap =
173 container_of(heap, struct ion_cp_heap, heap);
174
175 /* release memory */
176 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
177 cp_heap->handle);
178 gen_pool_destroy(cp_heap->pool);
179 cp_heap->pool = NULL;
180 cp_heap->cpu_addr = 0;
181}
182
183
184
Olav Haugan2a5404b2012-02-01 17:51:30 -0800185/**
186 * Get the total number of kernel mappings.
187 * Must be called with heap->lock locked.
188 */
189static unsigned long ion_cp_get_total_kmap_count(
190 const struct ion_cp_heap *cp_heap)
191{
192 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
193}
Olav Haugan0a852512012-01-09 10:20:55 -0800194
Laura Abbott94ad25e2012-08-03 13:56:21 -0700195static int ion_on_first_alloc(struct ion_heap *heap)
196{
197 struct ion_cp_heap *cp_heap =
198 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700199 int ret_value;
200
201 if (cp_heap->reusable) {
202 ret_value = fmem_set_state(FMEM_C_STATE);
203 if (ret_value)
204 return 1;
205 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700206
207 if (cp_heap->cma) {
208 ret_value = allocate_heap_memory(heap);
209 if (ret_value)
210 return 1;
211 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700212 return 0;
213}
214
215static void ion_on_last_free(struct ion_heap *heap)
216{
217 struct ion_cp_heap *cp_heap =
218 container_of(heap, struct ion_cp_heap, heap);
219
220 if (cp_heap->reusable)
221 if (fmem_set_state(FMEM_T_STATE) != 0)
222 pr_err("%s: unable to transition heap to T-state\n",
223 __func__);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700224
225 if (cp_heap->cma)
226 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700227}
228
Olav Haugan0a852512012-01-09 10:20:55 -0800229/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800230 * Protects memory if heap is unsecured heap. Also ensures that we are in
231 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800232 * Must be called with heap->lock locked.
233 */
Laura Abbott7e446482012-06-13 15:59:39 -0700234static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800235{
236 struct ion_cp_heap *cp_heap =
237 container_of(heap, struct ion_cp_heap, heap);
238 int ret_value = 0;
239
Laura Abbottf68983e2012-06-13 16:23:23 -0700240 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800241 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700242 if (!cp_heap->allocated_bytes)
243 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800244 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800245
Olav Hauganea66e7a2012-01-23 17:30:27 -0800246 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700247 cp_heap->secure_size, cp_heap->permission_type,
248 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800249 if (ret_value) {
250 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800251 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800252
Laura Abbott94ad25e2012-08-03 13:56:21 -0700253 if (!cp_heap->allocated_bytes)
254 ion_on_last_free(heap);
255
Laura Abbottf68983e2012-06-13 16:23:23 -0700256 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800257 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800258 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbott1135c9e2013-03-13 15:33:40 -0700259 pr_debug("Protected heap %s @ 0x%pa\n",
260 heap->name, &cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800261 }
262 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800263out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700264 pr_debug("%s: protect count is %d\n", __func__,
265 atomic_read(&cp_heap->protect_cnt));
266 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800267 return ret_value;
268}
269
270/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800271 * Unprotects memory if heap is secure heap. Also ensures that we are in
272 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800273 * Must be called with heap->lock locked.
274 */
Laura Abbott7e446482012-06-13 15:59:39 -0700275static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800276{
277 struct ion_cp_heap *cp_heap =
278 container_of(heap, struct ion_cp_heap, heap);
279
Laura Abbottf68983e2012-06-13 16:23:23 -0700280 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800281 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800282 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700283 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800284 if (error_code) {
285 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800286 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800287 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800288 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800289 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800290 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800291
Laura Abbott94ad25e2012-08-03 13:56:21 -0700292 if (!cp_heap->allocated_bytes)
293 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800294 }
295 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700296 pr_debug("%s: protect count is %d\n", __func__,
297 atomic_read(&cp_heap->protect_cnt));
298 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800299}
300
301ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
302 unsigned long size,
303 unsigned long align,
304 unsigned long flags)
305{
306 unsigned long offset;
307 unsigned long secure_allocation = flags & ION_SECURE;
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800308 unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
Olav Haugan0a852512012-01-09 10:20:55 -0800309
310 struct ion_cp_heap *cp_heap =
311 container_of(heap, struct ion_cp_heap, heap);
312
313 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800314 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800315 mutex_unlock(&cp_heap->lock);
316 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800317 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800318 return ION_CP_ALLOCATE_FAIL;
319 }
320
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800321 if (!force_contig && !secure_allocation &&
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800322 !cp_heap->allow_non_secure_allocation) {
Laura Abbottac963312012-12-11 15:09:03 -0800323 mutex_unlock(&cp_heap->lock);
324 pr_debug("%s: non-secure allocation disallowed from this heap\n",
325 __func__);
326 return ION_CP_ALLOCATE_FAIL;
327 }
328
Laura Abbott087db592012-11-01 09:41:37 -0700329 /*
330 * The check above already checked for non-secure allocations when the
331 * heap is protected. HEAP_PROTECTED implies that this must be a secure
332 * allocation. If the heap is protected and there are userspace or
333 * cached kernel mappings, something has gone wrong in the security
334 * model.
335 */
336 if (cp_heap->heap_protected == HEAP_PROTECTED) {
337 BUG_ON(cp_heap->umap_count != 0);
338 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800339 }
340
Laura Abbottcaafeea2011-12-13 11:43:10 -0800341 /*
342 * if this is the first reusable allocation, transition
343 * the heap
344 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700345 if (!cp_heap->allocated_bytes)
346 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800347 mutex_unlock(&cp_heap->lock);
348 return ION_RESERVED_ALLOCATE_FAIL;
349 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800350
Olav Haugan0a852512012-01-09 10:20:55 -0800351 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800352 mutex_unlock(&cp_heap->lock);
353
354 offset = gen_pool_alloc_aligned(cp_heap->pool,
355 size, ilog2(align));
356
357 if (!offset) {
358 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700359 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800360 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700361 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800362 pr_debug("%s: heap %s has enough memory (%lx) but"
363 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800364 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800365 __func__, heap->name,
366 cp_heap->total_size -
367 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700368 if (!cp_heap->allocated_bytes &&
369 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
370 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800371 mutex_unlock(&cp_heap->lock);
372
373 return ION_CP_ALLOCATE_FAIL;
374 }
375
376 return offset;
377}
378
Olav Haugan8726caf2012-05-10 15:11:35 -0700379static void iommu_unmap_all(unsigned long domain_num,
380 struct ion_cp_heap *cp_heap)
381{
382 unsigned long left_to_unmap = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700383 unsigned long page_size = SZ_64K;
384
385 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
386 if (domain) {
387 unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
388
389 while (left_to_unmap) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700390 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700391 temp_iova += page_size;
392 left_to_unmap -= page_size;
393 }
394 if (domain_num == cp_heap->iommu_2x_map_domain)
395 msm_iommu_unmap_extra(domain, temp_iova,
396 cp_heap->total_size, SZ_64K);
397 } else {
398 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
399 }
400}
401
Olav Haugan0a852512012-01-09 10:20:55 -0800402void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
403 unsigned long size)
404{
405 struct ion_cp_heap *cp_heap =
406 container_of(heap, struct ion_cp_heap, heap);
407
408 if (addr == ION_CP_ALLOCATE_FAIL)
409 return;
410 gen_pool_free(cp_heap->pool, addr, size);
411
412 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800413 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800414
Laura Abbott94ad25e2012-08-03 13:56:21 -0700415 if (!cp_heap->allocated_bytes &&
416 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
417 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700418
419 /* Unmap everything if we previously mapped the whole heap at once. */
420 if (!cp_heap->allocated_bytes) {
421 unsigned int i;
422 for (i = 0; i < MAX_DOMAINS; ++i) {
423 if (cp_heap->iommu_iova[i]) {
424 unsigned long vaddr_len = cp_heap->total_size;
425
426 if (i == cp_heap->iommu_2x_map_domain)
427 vaddr_len <<= 1;
428 iommu_unmap_all(i, cp_heap);
429
430 msm_free_iova_address(cp_heap->iommu_iova[i], i,
431 cp_heap->iommu_partition[i],
432 vaddr_len);
433 }
434 cp_heap->iommu_iova[i] = 0;
435 cp_heap->iommu_partition[i] = 0;
436 }
437 }
Olav Haugan0a852512012-01-09 10:20:55 -0800438 mutex_unlock(&cp_heap->lock);
439}
440
441static int ion_cp_heap_phys(struct ion_heap *heap,
442 struct ion_buffer *buffer,
443 ion_phys_addr_t *addr, size_t *len)
444{
Laura Abbott60ae9372012-10-10 16:28:59 -0700445 struct ion_cp_buffer *buf = buffer->priv_virt;
446
447 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 *len = buffer->size;
449 return 0;
450}
451
452static int ion_cp_heap_allocate(struct ion_heap *heap,
453 struct ion_buffer *buffer,
454 unsigned long size, unsigned long align,
455 unsigned long flags)
456{
Laura Abbott60ae9372012-10-10 16:28:59 -0700457 struct ion_cp_buffer *buf;
458 phys_addr_t addr;
459
460 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
461 if (!buf)
462 return ION_CP_ALLOCATE_FAIL;
463
464 addr = ion_cp_allocate(heap, size, align, flags);
465 if (addr == ION_CP_ALLOCATE_FAIL)
466 return -ENOMEM;
467
468 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700469 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700470 atomic_set(&buf->secure_cnt, 0);
471 mutex_init(&buf->lock);
472 buf->is_secure = flags & ION_SECURE ? 1 : 0;
473 buffer->priv_virt = buf;
474
475 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800476}
477
478static void ion_cp_heap_free(struct ion_buffer *buffer)
479{
480 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700481 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800482
Laura Abbott60ae9372012-10-10 16:28:59 -0700483 ion_cp_free(heap, buf->buffer, buffer->size);
484 WARN_ON(atomic_read(&buf->secure_cnt));
485 WARN_ON(atomic_read(&buf->map_cnt));
486 kfree(buf);
487
488 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800489}
490
Laura Abbottb14ed962012-01-30 14:18:08 -0800491struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800492{
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800493 size_t chunk_size = buffer->size;
Laura Abbottb14ed962012-01-30 14:18:08 -0800494 struct sg_table *table;
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800495 int ret, i, n_chunks;
496 struct scatterlist *sg;
Laura Abbott60ae9372012-10-10 16:28:59 -0700497 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800498
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800499 if (ION_IS_CACHED(buffer->flags))
500 chunk_size = PAGE_SIZE;
501 else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
502 chunk_size = SZ_1M;
503
Laura Abbottb14ed962012-01-30 14:18:08 -0800504 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
505 if (!table)
Olav Haugan0a852512012-01-09 10:20:55 -0800506 return ERR_PTR(-ENOMEM);
507
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800508 n_chunks = DIV_ROUND_UP(buffer->size, chunk_size);
Olav Haugan0a852512012-01-09 10:20:55 -0800509
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800510 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
511 if (ret)
512 goto err0;
Laura Abbotte84d77e2012-10-10 16:59:46 -0700513
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800514 for_each_sg(table->sgl, sg, table->nents, i) {
515 sg_dma_address(sg) = buf->buffer + i * chunk_size;
516 sg->length = chunk_size;
517 sg->offset = 0;
Laura Abbotte84d77e2012-10-10 16:59:46 -0700518 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800519
520 return table;
521err0:
522 kfree(table);
523 return ERR_PTR(ret);
Olav Haugan0a852512012-01-09 10:20:55 -0800524}
525
Laura Abbottb14ed962012-01-30 14:18:08 -0800526struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700527 struct ion_buffer *buffer)
528{
Laura Abbottb14ed962012-01-30 14:18:08 -0800529 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700530}
531
Olav Haugan0a852512012-01-09 10:20:55 -0800532void ion_cp_heap_unmap_dma(struct ion_heap *heap,
533 struct ion_buffer *buffer)
534{
Laura Abbottb14ed962012-01-30 14:18:08 -0800535 if (buffer->sg_table)
536 sg_free_table(buffer->sg_table);
537 kfree(buffer->sg_table);
538 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800539}
540
541/**
542 * Call request region for SMI memory of this is the first mapping.
543 */
544static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
545{
546 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800547 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700548 if (cp_heap->heap_request_region)
549 ret_value = cp_heap->heap_request_region(
550 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800551 return ret_value;
552}
553
554/**
555 * Call release region for SMI memory of this is the last un-mapping.
556 */
557static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
558{
559 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800560 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700561 if (cp_heap->heap_release_region)
562 ret_value = cp_heap->heap_release_region(
563 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800564 return ret_value;
565}
566
Laura Abbottcaafeea2011-12-13 11:43:10 -0800567void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
568 void *virt_base, unsigned long flags)
569{
570 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700571 struct ion_cp_buffer *buf = buffer->priv_virt;
572 unsigned int offset = buf->buffer - phys_base;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800573 unsigned long start = ((unsigned long)virt_base) + offset;
574 const struct mem_type *type = ION_IS_CACHED(flags) ?
575 get_mem_type(MT_DEVICE_CACHED) :
576 get_mem_type(MT_DEVICE);
577
Laura Abbott60ae9372012-10-10 16:28:59 -0700578 if (phys_base > buf->buffer)
Laura Abbottcaafeea2011-12-13 11:43:10 -0800579 return NULL;
580
581
Laura Abbott60ae9372012-10-10 16:28:59 -0700582 ret = ioremap_pages(start, buf->buffer, buffer->size, type);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800583
584 if (!ret)
585 return (void *)start;
586 else
587 return NULL;
588}
589
Laura Abbottb14ed962012-01-30 14:18:08 -0800590void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800591{
592 struct ion_cp_heap *cp_heap =
593 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800594 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700595 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800596
597 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800598 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
599 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800600 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800601
Olav Hauganea66e7a2012-01-23 17:30:27 -0800602 if (ion_cp_request_region(cp_heap)) {
603 mutex_unlock(&cp_heap->lock);
604 return NULL;
605 }
Olav Haugan0a852512012-01-09 10:20:55 -0800606
Laura Abbottcaafeea2011-12-13 11:43:10 -0800607 if (cp_heap->reusable) {
608 ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
Laura Abbottb14ed962012-01-30 14:18:08 -0800609 cp_heap->reserved_vrange, buffer->flags);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700610 } else if (cp_heap->cma) {
611 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
612 struct page **pages = vmalloc(
613 sizeof(struct page *) * npages);
614 int i;
615 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800616
Laura Abbott3180a5f2012-08-03 17:31:03 -0700617 if (ION_IS_CACHED(buffer->flags))
618 pgprot = PAGE_KERNEL;
619 else
620 pgprot = pgprot_writecombine(PAGE_KERNEL);
621
622 for (i = 0; i < npages; i++) {
623 pages[i] = phys_to_page(buf->buffer +
624 i * PAGE_SIZE);
625 }
626 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
627 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800628 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800629 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700630 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800631 buffer->size);
632 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700633 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800634 buffer->size);
635 }
Olav Haugan0a852512012-01-09 10:20:55 -0800636
Olav Haugan2a5404b2012-02-01 17:51:30 -0800637 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800638 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800639 } else {
640 if (ION_IS_CACHED(buffer->flags))
641 ++cp_heap->kmap_cached_count;
642 else
643 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700644 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800645 }
Olav Haugan0a852512012-01-09 10:20:55 -0800646 }
647 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800648 return ret_value;
649}
650
651void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
652 struct ion_buffer *buffer)
653{
654 struct ion_cp_heap *cp_heap =
655 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700656 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800657
Laura Abbottcaafeea2011-12-13 11:43:10 -0800658 if (cp_heap->reusable)
659 unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700660 else if (cp_heap->cma)
661 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800662 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700663 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800664
Olav Haugan0a852512012-01-09 10:20:55 -0800665 buffer->vaddr = NULL;
666
667 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800668 if (ION_IS_CACHED(buffer->flags))
669 --cp_heap->kmap_cached_count;
670 else
671 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700672
673 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800674 ion_cp_release_region(cp_heap);
675 mutex_unlock(&cp_heap->lock);
676
677 return;
678}
679
680int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800681 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800682{
683 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800684 struct ion_cp_heap *cp_heap =
685 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700686 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800687
Olav Hauganea66e7a2012-01-23 17:30:27 -0800688 mutex_lock(&cp_heap->lock);
Mitchel Humpherys8d0a4922013-01-21 16:49:09 -0800689 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
Olav Haugan0a852512012-01-09 10:20:55 -0800690 if (ion_cp_request_region(cp_heap)) {
691 mutex_unlock(&cp_heap->lock);
692 return -EINVAL;
693 }
Olav Haugan0a852512012-01-09 10:20:55 -0800694
Laura Abbottb14ed962012-01-30 14:18:08 -0800695 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800696 vma->vm_page_prot = pgprot_writecombine(
697 vma->vm_page_prot);
698
699 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700700 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800701 vma->vm_end - vma->vm_start,
702 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800703
Laura Abbott60ae9372012-10-10 16:28:59 -0700704 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800705 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700706 } else {
707 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800708 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700709 }
710
Olav Haugan0a852512012-01-09 10:20:55 -0800711 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800712 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800713 return ret_value;
714}
715
716void ion_cp_heap_unmap_user(struct ion_heap *heap,
717 struct ion_buffer *buffer)
718{
719 struct ion_cp_heap *cp_heap =
720 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700721 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800722
723 mutex_lock(&cp_heap->lock);
724 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700725 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800726 ion_cp_release_region(cp_heap);
727 mutex_unlock(&cp_heap->lock);
728}
729
730int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
731 void *vaddr, unsigned int offset, unsigned int length,
732 unsigned int cmd)
733{
Neeti Desai3f3c2822013-03-08 17:29:53 -0800734 void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
Olav Haugan85c95402012-05-30 17:32:37 -0700735 struct ion_cp_heap *cp_heap =
Neeti Desai3f3c2822013-03-08 17:29:53 -0800736 container_of(heap, struct ion_cp_heap, heap);
737 unsigned int size_to_vmap, total_size;
Laura Abbott60ae9372012-10-10 16:28:59 -0700738 struct ion_cp_buffer *buf = buffer->priv_virt;
Neeti Desai3f3c2822013-03-08 17:29:53 -0800739 int i, j;
740 void *ptr = NULL;
741 ion_phys_addr_t buff_phys = buffer->priv_phys;
Olav Haugan0a852512012-01-09 10:20:55 -0800742
Neeti Desai3f3c2822013-03-08 17:29:53 -0800743 if (!vaddr) {
744 /*
745 * Split the vmalloc space into smaller regions in
746 * order to clean and/or invalidate the cache.
747 */
748 size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
749 total_size = buffer->size;
750 for (i = 0; i < total_size; i += size_to_vmap) {
751 size_to_vmap = min(size_to_vmap, total_size - i);
752 for (j = 0; j < 10 && size_to_vmap; ++j) {
753 ptr = ioremap(buff_phys, size_to_vmap);
754 if (ptr) {
755 switch (cmd) {
756 case ION_IOC_CLEAN_CACHES:
757 dmac_clean_range(ptr,
758 ptr + size_to_vmap);
759 outer_cache_op =
760 outer_clean_range;
761 break;
762 case ION_IOC_INV_CACHES:
763 dmac_inv_range(ptr,
764 ptr + size_to_vmap);
765 outer_cache_op =
766 outer_inv_range;
767 break;
768 case ION_IOC_CLEAN_INV_CACHES:
769 dmac_flush_range(ptr,
770 ptr + size_to_vmap);
771 outer_cache_op =
772 outer_flush_range;
773 break;
774 default:
775 return -EINVAL;
776 }
777 buff_phys += size_to_vmap;
778 break;
779 } else {
780 size_to_vmap >>= 1;
781 }
782 }
783 if (!ptr) {
784 pr_err("Couldn't io-remap the memory\n");
785 return -EINVAL;
786 }
787 iounmap(ptr);
788 }
789 } else {
790 switch (cmd) {
791 case ION_IOC_CLEAN_CACHES:
792 dmac_clean_range(vaddr, vaddr + length);
793 outer_cache_op = outer_clean_range;
794 break;
795 case ION_IOC_INV_CACHES:
796 dmac_inv_range(vaddr, vaddr + length);
797 outer_cache_op = outer_inv_range;
798 break;
799 case ION_IOC_CLEAN_INV_CACHES:
800 dmac_flush_range(vaddr, vaddr + length);
801 outer_cache_op = outer_flush_range;
802 break;
803 default:
804 return -EINVAL;
805 }
Olav Haugan0a852512012-01-09 10:20:55 -0800806 }
807
Olav Haugan85c95402012-05-30 17:32:37 -0700808 if (cp_heap->has_outer_cache) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700809 unsigned long pstart = buf->buffer + offset;
Olav Haugan85c95402012-05-30 17:32:37 -0700810 outer_cache_op(pstart, pstart + length);
811 }
Olav Haugan0a852512012-01-09 10:20:55 -0800812 return 0;
813}
814
Olav Haugan0671b9a2012-05-25 11:58:56 -0700815static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
816 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800817{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800818 unsigned long total_alloc;
819 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800820 unsigned long umap_count;
821 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800822 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800823 struct ion_cp_heap *cp_heap =
824 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800825
826 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800827 total_alloc = cp_heap->allocated_bytes;
828 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800829 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800830 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800831 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800832 mutex_unlock(&cp_heap->lock);
833
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800834 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
835 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800836 seq_printf(s, "umapping count: %lx\n", umap_count);
837 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800838 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Laura Abbottcaafeea2011-12-13 11:43:10 -0800839 seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800840
Olav Haugan0671b9a2012-05-25 11:58:56 -0700841 if (mem_map) {
842 unsigned long base = cp_heap->base;
843 unsigned long size = cp_heap->total_size;
844 unsigned long end = base+size;
845 unsigned long last_end = base;
846 struct rb_node *n;
847
848 seq_printf(s, "\nMemory Map\n");
849 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
850 "client", "start address", "end address",
851 "size (hex)");
852
853 for (n = rb_first(mem_map); n; n = rb_next(n)) {
854 struct mem_map_data *data =
855 rb_entry(n, struct mem_map_data, node);
856 const char *client_name = "(null)";
857
858 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700859 phys_addr_t da;
860
861 da = data->addr-1;
862 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
863 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700864 data->addr-last_end,
865 data->addr-last_end);
866 }
867
868 if (data->client_name)
869 client_name = data->client_name;
870
Laura Abbott1135c9e2013-03-13 15:33:40 -0700871 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
872 client_name, &data->addr,
873 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700874 data->size, data->size);
875 last_end = data->addr_end+1;
876 }
877 if (last_end < end) {
878 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
879 last_end, end-1, end-last_end, end-last_end);
880 }
881 }
882
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800883 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800884}
885
Laura Abbott7e446482012-06-13 15:59:39 -0700886int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800887{
888 int ret_value;
889 struct ion_cp_heap *cp_heap =
890 container_of(heap, struct ion_cp_heap, heap);
891 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800892 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -0700893 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800894 } else {
895 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -0800896 "User space: %lu, kernel space (cached): %lu\n",
897 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800898 ret_value = -EINVAL;
899 }
900
Olav Haugan0a852512012-01-09 10:20:55 -0800901 mutex_unlock(&cp_heap->lock);
902 return ret_value;
903}
904
Laura Abbott7e446482012-06-13 15:59:39 -0700905int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800906{
907 int ret_value = 0;
908 struct ion_cp_heap *cp_heap =
909 container_of(heap, struct ion_cp_heap, heap);
910 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -0700911 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800912 mutex_unlock(&cp_heap->lock);
913 return ret_value;
914}
915
Olav Haugan8726caf2012-05-10 15:11:35 -0700916static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
Olav Haugan3450cae2012-05-14 11:36:38 -0700917 int partition, unsigned long prot)
Olav Haugan8726caf2012-05-10 15:11:35 -0700918{
919 unsigned long left_to_map = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700920 unsigned long page_size = SZ_64K;
921 int ret_value = 0;
922 unsigned long virt_addr_len = cp_heap->total_size;
923 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
924
925 /* If we are mapping into the video domain we need to map twice the
926 * size of the heap to account for prefetch issue in video core.
927 */
928 if (domain_num == cp_heap->iommu_2x_map_domain)
929 virt_addr_len <<= 1;
930
931 if (cp_heap->total_size & (SZ_64K-1)) {
932 pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
933 ret_value = -EINVAL;
934 }
935 if (cp_heap->base & (SZ_64K-1)) {
936 pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
937 ret_value = -EINVAL;
938 }
939 if (!ret_value && domain) {
940 unsigned long temp_phys = cp_heap->base;
Laura Abbottd01221b2012-05-16 17:52:49 -0700941 unsigned long temp_iova;
942
943 ret_value = msm_allocate_iova_address(domain_num, partition,
944 virt_addr_len, SZ_64K,
945 &temp_iova);
946
947 if (ret_value) {
Olav Haugan8726caf2012-05-10 15:11:35 -0700948 pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
949 __func__, domain_num, partition);
Olav Haugan8726caf2012-05-10 15:11:35 -0700950 goto out;
951 }
952 cp_heap->iommu_iova[domain_num] = temp_iova;
953
954 while (left_to_map) {
955 int ret = iommu_map(domain, temp_iova, temp_phys,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700956 page_size, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -0700957 if (ret) {
958 pr_err("%s: could not map %lx in domain %p, error: %d\n",
959 __func__, temp_iova, domain, ret);
960 ret_value = -EAGAIN;
961 goto free_iova;
962 }
963 temp_iova += page_size;
964 temp_phys += page_size;
965 left_to_map -= page_size;
966 }
967 if (domain_num == cp_heap->iommu_2x_map_domain)
968 ret_value = msm_iommu_map_extra(domain, temp_iova,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800969 cp_heap->base,
Olav Haugan8726caf2012-05-10 15:11:35 -0700970 cp_heap->total_size,
971 SZ_64K, prot);
972 if (ret_value)
973 goto free_iova;
974 } else {
975 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
976 ret_value = -ENOMEM;
977 }
978 goto out;
979
980free_iova:
981 msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
982 partition, virt_addr_len);
983out:
984 return ret_value;
985}
986
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800987static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
988 struct ion_iommu_map *data,
989 unsigned int domain_num,
990 unsigned int partition_num,
991 unsigned long align,
992 unsigned long iova_length,
993 unsigned long flags)
994{
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800995 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700996 int ret = 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800997 unsigned long extra;
Olav Haugan8726caf2012-05-10 15:11:35 -0700998 struct ion_cp_heap *cp_heap =
999 container_of(buffer->heap, struct ion_cp_heap, heap);
Olav Hauganf310cf22012-05-08 08:42:49 -07001000 int prot = IOMMU_WRITE | IOMMU_READ;
Laura Abbott60ae9372012-10-10 16:28:59 -07001001 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -07001002 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001003
1004 data->mapped_size = iova_length;
1005
1006 if (!msm_use_iommu()) {
Laura Abbott60ae9372012-10-10 16:28:59 -07001007 data->iova_addr = buf->buffer;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001008 return 0;
1009 }
1010
Olav Haugan8726caf2012-05-10 15:11:35 -07001011 if (cp_heap->iommu_iova[domain_num]) {
1012 /* Already mapped. */
Laura Abbott60ae9372012-10-10 16:28:59 -07001013 unsigned long offset = buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -07001014 data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
1015 return 0;
1016 } else if (cp_heap->iommu_map_all) {
Olav Haugan3450cae2012-05-14 11:36:38 -07001017 ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -07001018 if (!ret) {
1019 unsigned long offset =
Laura Abbott60ae9372012-10-10 16:28:59 -07001020 buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -07001021 data->iova_addr =
1022 cp_heap->iommu_iova[domain_num] + offset;
1023 cp_heap->iommu_partition[domain_num] = partition_num;
1024 /*
1025 clear delayed map flag so that we don't interfere
1026 with this feature (we are already delaying).
1027 */
1028 data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
1029 return 0;
1030 } else {
1031 cp_heap->iommu_iova[domain_num] = 0;
1032 cp_heap->iommu_partition[domain_num] = 0;
1033 return ret;
1034 }
1035 }
1036
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001037 extra = iova_length - buffer->size;
1038
Laura Abbottd01221b2012-05-16 17:52:49 -07001039 ret = msm_allocate_iova_address(domain_num, partition_num,
1040 data->mapped_size, align,
1041 &data->iova_addr);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001042
Laura Abbottd01221b2012-05-16 17:52:49 -07001043 if (ret)
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001044 goto out;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001045
1046 domain = msm_get_iommu_domain(domain_num);
1047
1048 if (!domain) {
1049 ret = -ENOMEM;
1050 goto out1;
1051 }
1052
Laura Abbottb14ed962012-01-30 14:18:08 -08001053 ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -07001054 buffer->size, prot);
1055 if (ret) {
1056 pr_err("%s: could not map %lx in domain %p\n",
1057 __func__, data->iova_addr, domain);
1058 goto out1;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001059 }
1060
Olav Haugan16cdb412012-03-27 13:02:17 -07001061 if (extra) {
1062 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08001063 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
1064 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
1065 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -07001066 if (ret)
1067 goto out2;
1068 }
Olav Haugan16cdb412012-03-27 13:02:17 -07001069 return ret;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001070
1071out2:
Olav Haugan16cdb412012-03-27 13:02:17 -07001072 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001073out1:
1074 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1075 data->mapped_size);
1076out:
1077 return ret;
1078}
1079
1080static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
1081{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001082 unsigned int domain_num;
1083 unsigned int partition_num;
1084 struct iommu_domain *domain;
Olav Haugan8726caf2012-05-10 15:11:35 -07001085 struct ion_cp_heap *cp_heap =
1086 container_of(data->buffer->heap, struct ion_cp_heap, heap);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001087
1088 if (!msm_use_iommu())
1089 return;
1090
Olav Haugan8726caf2012-05-10 15:11:35 -07001091
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001092 domain_num = iommu_map_domain(data);
Olav Haugan8726caf2012-05-10 15:11:35 -07001093
1094 /* If we are mapping everything we'll wait to unmap until everything
1095 is freed. */
1096 if (cp_heap->iommu_iova[domain_num])
1097 return;
1098
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001099 partition_num = iommu_map_partition(data);
1100
1101 domain = msm_get_iommu_domain(domain_num);
1102
1103 if (!domain) {
1104 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
1105 return;
1106 }
1107
Olav Haugan16cdb412012-03-27 13:02:17 -07001108 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001109 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1110 data->mapped_size);
1111
1112 return;
1113}
Olav Haugan0a852512012-01-09 10:20:55 -08001114
1115static struct ion_heap_ops cp_heap_ops = {
1116 .allocate = ion_cp_heap_allocate,
1117 .free = ion_cp_heap_free,
1118 .phys = ion_cp_heap_phys,
1119 .map_user = ion_cp_heap_map_user,
1120 .unmap_user = ion_cp_heap_unmap_user,
1121 .map_kernel = ion_cp_heap_map_kernel,
1122 .unmap_kernel = ion_cp_heap_unmap_kernel,
1123 .map_dma = ion_cp_heap_map_dma,
1124 .unmap_dma = ion_cp_heap_unmap_dma,
1125 .cache_op = ion_cp_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -08001126 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -08001127 .secure_heap = ion_cp_secure_heap,
1128 .unsecure_heap = ion_cp_unsecure_heap,
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001129 .map_iommu = ion_cp_heap_map_iommu,
1130 .unmap_iommu = ion_cp_heap_unmap_iommu,
Laura Abbott93619302012-10-11 11:51:40 -07001131 .secure_buffer = ion_cp_secure_buffer,
1132 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -08001133};
1134
Olav Haugan0a852512012-01-09 10:20:55 -08001135struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
1136{
1137 struct ion_cp_heap *cp_heap;
1138 int ret;
1139
1140 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
1141 if (!cp_heap)
1142 return ERR_PTR(-ENOMEM);
1143
Olav Haugan0a852512012-01-09 10:20:55 -08001144 mutex_init(&cp_heap->lock);
1145
Olav Haugan0a852512012-01-09 10:20:55 -08001146
Olav Haugan0a852512012-01-09 10:20:55 -08001147 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001148 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -08001149 cp_heap->kmap_cached_count = 0;
1150 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001151 cp_heap->total_size = heap_data->size;
1152 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07001153 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -08001154 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001155 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -08001156 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -07001157 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001158 cp_heap->heap_size = heap_data->size;
1159
Laura Abbottf68983e2012-06-13 16:23:23 -07001160 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -08001161 if (heap_data->extra_data) {
1162 struct ion_cp_heap_pdata *extra_data =
1163 heap_data->extra_data;
Laura Abbottcaafeea2011-12-13 11:43:10 -08001164 cp_heap->reusable = extra_data->reusable;
1165 cp_heap->reserved_vrange = extra_data->virt_addr;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001166 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -08001167 if (extra_data->secure_size) {
1168 cp_heap->secure_base = extra_data->secure_base;
1169 cp_heap->secure_size = extra_data->secure_size;
1170 }
Olav Haugan0703dbf2011-12-19 17:53:38 -08001171 if (extra_data->setup_region)
1172 cp_heap->bus_id = extra_data->setup_region();
1173 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001174 cp_heap->heap_request_region =
1175 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001176 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001177 cp_heap->heap_release_region =
1178 extra_data->release_region;
Olav Haugan8726caf2012-05-10 15:11:35 -07001179 cp_heap->iommu_map_all =
1180 extra_data->iommu_map_all;
1181 cp_heap->iommu_2x_map_domain =
1182 extra_data->iommu_2x_map_domain;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001183 cp_heap->cma = extra_data->is_cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -08001184 cp_heap->allow_non_secure_allocation =
1185 extra_data->allow_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -07001186
Olav Haugan0703dbf2011-12-19 17:53:38 -08001187 }
Olav Haugan8726caf2012-05-10 15:11:35 -07001188
Laura Abbott3180a5f2012-08-03 17:31:03 -07001189 if (cp_heap->cma) {
1190 cp_heap->pool = NULL;
1191 cp_heap->cpu_addr = 0;
1192 cp_heap->heap.priv = heap_data->priv;
1193 } else {
1194 cp_heap->pool = gen_pool_create(12, -1);
1195 if (!cp_heap->pool)
1196 goto free_heap;
1197
1198 cp_heap->base = heap_data->base;
1199 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
1200 heap_data->size, -1);
1201 if (ret < 0)
1202 goto destroy_pool;
1203
1204 }
Olav Haugan0a852512012-01-09 10:20:55 -08001205 return &cp_heap->heap;
1206
1207destroy_pool:
1208 gen_pool_destroy(cp_heap->pool);
1209
1210free_heap:
1211 kfree(cp_heap);
1212
1213 return ERR_PTR(-ENOMEM);
1214}
1215
1216void ion_cp_heap_destroy(struct ion_heap *heap)
1217{
1218 struct ion_cp_heap *cp_heap =
1219 container_of(heap, struct ion_cp_heap, heap);
1220
1221 gen_pool_destroy(cp_heap->pool);
1222 kfree(cp_heap);
1223 cp_heap = NULL;
1224}
1225
Olav Haugan0671b9a2012-05-25 11:58:56 -07001226void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
1227 unsigned long *size) \
1228{
1229 struct ion_cp_heap *cp_heap =
1230 container_of(heap, struct ion_cp_heap, heap);
1231 *base = cp_heap->base;
1232 *size = cp_heap->total_size;
1233}
Olav Haugan0a852512012-01-09 10:20:55 -08001234
Olav Haugan0a852512012-01-09 10:20:55 -08001235