blob: 8a5e5c96013e8e4aedf0d7121e60a7edfbc40de4 [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott7db4e0b2013-01-03 14:20:16 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Laura Abbottcaafeea2011-12-13 11:43:10 -080029#include <linux/fmem.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080030#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070031#include <linux/dma-mapping.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080032#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070033
34#include <asm/mach/map.h>
35
Olav Haugan0a852512012-01-09 10:20:55 -080036#include <mach/msm_memtypes.h>
37#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080038#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039
40#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080041
42#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070043#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080044
Laura Abbott7e446482012-06-13 15:59:39 -070045#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080046/**
47 * struct ion_cp_heap - container for the heap and shared heap data
48
49 * @heap: the heap information structure
50 * @pool: memory pool to allocate from.
51 * @base: the base address of the memory pool.
52 * @permission_type: Identifier for the memory used by SCM for protecting
53 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080054 * @secure_base: Base address used when securing a heap that is shared.
55 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080056 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080057 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080058 * @allocated_bytes: the total number of allocated bytes from the pool.
59 * @total_size: the total size of the memory pool.
60 * @request_region: function pointer to call when first mapping of memory
61 * occurs.
62 * @release_region: function pointer to call when last mapping of memory
63 * unmapped.
64 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080065 * @kmap_cached_count: the total number of times this heap has been mapped in
66 * kernel space (cached).
67 * @kmap_uncached_count:the total number of times this heap has been mapped in
68 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080069 * @umap_count: the total number of times this heap has been mapped in
70 * user space.
Olav Haugan8726caf2012-05-10 15:11:35 -070071 * @iommu_iova: saved iova when mapping full heap at once.
72 * @iommu_partition: partition used to map full heap.
Laura Abbottcaafeea2011-12-13 11:43:10 -080073 * @reusable: indicates if the memory should be reused via fmem.
74 * @reserved_vrange: reserved virtual address range for use with fmem
Olav Haugan8726caf2012-05-10 15:11:35 -070075 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
76 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
Olav Haugan85c95402012-05-30 17:32:37 -070077 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
78*/
Olav Haugan0a852512012-01-09 10:20:55 -080079struct ion_cp_heap {
80 struct ion_heap heap;
81 struct gen_pool *pool;
82 ion_phys_addr_t base;
83 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080084 ion_phys_addr_t secure_base;
85 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080086 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080087 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080088 unsigned long allocated_bytes;
89 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070090 int (*heap_request_region)(void *);
91 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080092 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080093 unsigned long kmap_cached_count;
94 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080095 unsigned long umap_count;
Olav Haugan8726caf2012-05-10 15:11:35 -070096 unsigned long iommu_iova[MAX_DOMAINS];
97 unsigned long iommu_partition[MAX_DOMAINS];
Laura Abbottcaafeea2011-12-13 11:43:10 -080098 int reusable;
99 void *reserved_vrange;
Olav Haugan8726caf2012-05-10 15:11:35 -0700100 int iommu_map_all;
101 int iommu_2x_map_domain;
Olav Haugan85c95402012-05-30 17:32:37 -0700102 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -0700103 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700104 void *cpu_addr;
105 size_t heap_size;
106 dma_addr_t handle;
107 int cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800108 int allow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -0800109};
110
111enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800112 HEAP_NOT_PROTECTED = 0,
113 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800114};
115
Laura Abbott3180a5f2012-08-03 17:31:03 -0700116#define DMA_ALLOC_TRIES 5
117
Laura Abbott3180a5f2012-08-03 17:31:03 -0700118static int allocate_heap_memory(struct ion_heap *heap)
119{
120 struct device *dev = heap->priv;
121 struct ion_cp_heap *cp_heap =
122 container_of(heap, struct ion_cp_heap, heap);
123 int ret;
124 int tries = 0;
125 DEFINE_DMA_ATTRS(attrs);
126 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
127
128
129 if (cp_heap->cpu_addr)
130 return 0;
131
132 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
133 cp_heap->cpu_addr = dma_alloc_attrs(dev,
134 cp_heap->heap_size,
135 &(cp_heap->handle),
136 0,
137 &attrs);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800138 if (!cp_heap->cpu_addr) {
139 trace_ion_cp_alloc_retry(tries);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700140 msleep(20);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800141 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700142 }
143
144 if (!cp_heap->cpu_addr)
145 goto out;
146
147 cp_heap->base = cp_heap->handle;
148
149 cp_heap->pool = gen_pool_create(12, -1);
150 if (!cp_heap->pool)
151 goto out_free;
152
153 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
154 cp_heap->heap_size, -1);
155 if (ret < 0)
156 goto out_pool;
157
158 return 0;
159
160out_pool:
161 gen_pool_destroy(cp_heap->pool);
162out_free:
163 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
164 cp_heap->handle);
165out:
166 return ION_CP_ALLOCATE_FAIL;
167}
168
169static void free_heap_memory(struct ion_heap *heap)
170{
171 struct device *dev = heap->priv;
172 struct ion_cp_heap *cp_heap =
173 container_of(heap, struct ion_cp_heap, heap);
174
175 /* release memory */
176 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
177 cp_heap->handle);
178 gen_pool_destroy(cp_heap->pool);
179 cp_heap->pool = NULL;
180 cp_heap->cpu_addr = 0;
181}
182
183
184
Olav Haugan2a5404b2012-02-01 17:51:30 -0800185/**
186 * Get the total number of kernel mappings.
187 * Must be called with heap->lock locked.
188 */
189static unsigned long ion_cp_get_total_kmap_count(
190 const struct ion_cp_heap *cp_heap)
191{
192 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
193}
Olav Haugan0a852512012-01-09 10:20:55 -0800194
Laura Abbott94ad25e2012-08-03 13:56:21 -0700195static int ion_on_first_alloc(struct ion_heap *heap)
196{
197 struct ion_cp_heap *cp_heap =
198 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700199 int ret_value;
200
201 if (cp_heap->reusable) {
202 ret_value = fmem_set_state(FMEM_C_STATE);
203 if (ret_value)
204 return 1;
205 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700206
207 if (cp_heap->cma) {
208 ret_value = allocate_heap_memory(heap);
209 if (ret_value)
210 return 1;
211 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700212 return 0;
213}
214
215static void ion_on_last_free(struct ion_heap *heap)
216{
217 struct ion_cp_heap *cp_heap =
218 container_of(heap, struct ion_cp_heap, heap);
219
220 if (cp_heap->reusable)
221 if (fmem_set_state(FMEM_T_STATE) != 0)
222 pr_err("%s: unable to transition heap to T-state\n",
223 __func__);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700224
225 if (cp_heap->cma)
226 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700227}
228
Olav Haugan0a852512012-01-09 10:20:55 -0800229/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800230 * Protects memory if heap is unsecured heap. Also ensures that we are in
231 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800232 * Must be called with heap->lock locked.
233 */
Laura Abbott7e446482012-06-13 15:59:39 -0700234static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800235{
236 struct ion_cp_heap *cp_heap =
237 container_of(heap, struct ion_cp_heap, heap);
238 int ret_value = 0;
239
Laura Abbottf68983e2012-06-13 16:23:23 -0700240 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800241 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700242 if (!cp_heap->allocated_bytes)
243 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800244 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800245
Olav Hauganea66e7a2012-01-23 17:30:27 -0800246 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700247 cp_heap->secure_size, cp_heap->permission_type,
248 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800249 if (ret_value) {
250 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800251 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800252
Laura Abbott94ad25e2012-08-03 13:56:21 -0700253 if (!cp_heap->allocated_bytes)
254 ion_on_last_free(heap);
255
Laura Abbottf68983e2012-06-13 16:23:23 -0700256 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800257 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800258 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbott1135c9e2013-03-13 15:33:40 -0700259 pr_debug("Protected heap %s @ 0x%pa\n",
260 heap->name, &cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800261 }
262 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800263out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700264 pr_debug("%s: protect count is %d\n", __func__,
265 atomic_read(&cp_heap->protect_cnt));
266 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800267 return ret_value;
268}
269
270/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800271 * Unprotects memory if heap is secure heap. Also ensures that we are in
272 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800273 * Must be called with heap->lock locked.
274 */
Laura Abbott7e446482012-06-13 15:59:39 -0700275static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800276{
277 struct ion_cp_heap *cp_heap =
278 container_of(heap, struct ion_cp_heap, heap);
279
Laura Abbottf68983e2012-06-13 16:23:23 -0700280 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800281 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800282 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700283 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800284 if (error_code) {
285 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800286 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800287 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800288 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800289 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800290 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800291
Laura Abbott94ad25e2012-08-03 13:56:21 -0700292 if (!cp_heap->allocated_bytes)
293 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800294 }
295 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700296 pr_debug("%s: protect count is %d\n", __func__,
297 atomic_read(&cp_heap->protect_cnt));
298 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800299}
300
301ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
302 unsigned long size,
303 unsigned long align,
304 unsigned long flags)
305{
306 unsigned long offset;
307 unsigned long secure_allocation = flags & ION_SECURE;
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800308 unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
Olav Haugan0a852512012-01-09 10:20:55 -0800309
310 struct ion_cp_heap *cp_heap =
311 container_of(heap, struct ion_cp_heap, heap);
312
313 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800314 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800315 mutex_unlock(&cp_heap->lock);
316 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800317 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800318 return ION_CP_ALLOCATE_FAIL;
319 }
320
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800321 if (!force_contig && !secure_allocation &&
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800322 !cp_heap->allow_non_secure_allocation) {
Laura Abbottac963312012-12-11 15:09:03 -0800323 mutex_unlock(&cp_heap->lock);
324 pr_debug("%s: non-secure allocation disallowed from this heap\n",
325 __func__);
326 return ION_CP_ALLOCATE_FAIL;
327 }
328
Laura Abbott087db592012-11-01 09:41:37 -0700329 /*
330 * The check above already checked for non-secure allocations when the
331 * heap is protected. HEAP_PROTECTED implies that this must be a secure
332 * allocation. If the heap is protected and there are userspace or
333 * cached kernel mappings, something has gone wrong in the security
334 * model.
335 */
336 if (cp_heap->heap_protected == HEAP_PROTECTED) {
337 BUG_ON(cp_heap->umap_count != 0);
338 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800339 }
340
Laura Abbottcaafeea2011-12-13 11:43:10 -0800341 /*
342 * if this is the first reusable allocation, transition
343 * the heap
344 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700345 if (!cp_heap->allocated_bytes)
346 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800347 mutex_unlock(&cp_heap->lock);
348 return ION_RESERVED_ALLOCATE_FAIL;
349 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800350
Olav Haugan0a852512012-01-09 10:20:55 -0800351 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800352 mutex_unlock(&cp_heap->lock);
353
354 offset = gen_pool_alloc_aligned(cp_heap->pool,
355 size, ilog2(align));
356
357 if (!offset) {
358 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700359 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800360 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700361 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800362 pr_debug("%s: heap %s has enough memory (%lx) but"
363 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800364 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800365 __func__, heap->name,
366 cp_heap->total_size -
367 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700368 if (!cp_heap->allocated_bytes &&
369 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
370 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800371 mutex_unlock(&cp_heap->lock);
372
373 return ION_CP_ALLOCATE_FAIL;
374 }
375
376 return offset;
377}
378
Olav Haugan8726caf2012-05-10 15:11:35 -0700379static void iommu_unmap_all(unsigned long domain_num,
380 struct ion_cp_heap *cp_heap)
381{
382 unsigned long left_to_unmap = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700383 unsigned long page_size = SZ_64K;
384
385 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
386 if (domain) {
387 unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
388
389 while (left_to_unmap) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700390 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700391 temp_iova += page_size;
392 left_to_unmap -= page_size;
393 }
394 if (domain_num == cp_heap->iommu_2x_map_domain)
395 msm_iommu_unmap_extra(domain, temp_iova,
396 cp_heap->total_size, SZ_64K);
397 } else {
398 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
399 }
400}
401
Olav Haugan0a852512012-01-09 10:20:55 -0800402void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
403 unsigned long size)
404{
405 struct ion_cp_heap *cp_heap =
406 container_of(heap, struct ion_cp_heap, heap);
407
408 if (addr == ION_CP_ALLOCATE_FAIL)
409 return;
410 gen_pool_free(cp_heap->pool, addr, size);
411
412 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800413 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800414
Laura Abbott94ad25e2012-08-03 13:56:21 -0700415 if (!cp_heap->allocated_bytes &&
416 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
417 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700418
419 /* Unmap everything if we previously mapped the whole heap at once. */
420 if (!cp_heap->allocated_bytes) {
421 unsigned int i;
422 for (i = 0; i < MAX_DOMAINS; ++i) {
423 if (cp_heap->iommu_iova[i]) {
424 unsigned long vaddr_len = cp_heap->total_size;
425
426 if (i == cp_heap->iommu_2x_map_domain)
427 vaddr_len <<= 1;
428 iommu_unmap_all(i, cp_heap);
429
430 msm_free_iova_address(cp_heap->iommu_iova[i], i,
431 cp_heap->iommu_partition[i],
432 vaddr_len);
433 }
434 cp_heap->iommu_iova[i] = 0;
435 cp_heap->iommu_partition[i] = 0;
436 }
437 }
Olav Haugan0a852512012-01-09 10:20:55 -0800438 mutex_unlock(&cp_heap->lock);
439}
440
441static int ion_cp_heap_phys(struct ion_heap *heap,
442 struct ion_buffer *buffer,
443 ion_phys_addr_t *addr, size_t *len)
444{
Laura Abbott60ae9372012-10-10 16:28:59 -0700445 struct ion_cp_buffer *buf = buffer->priv_virt;
446
447 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800448 *len = buffer->size;
449 return 0;
450}
451
452static int ion_cp_heap_allocate(struct ion_heap *heap,
453 struct ion_buffer *buffer,
454 unsigned long size, unsigned long align,
455 unsigned long flags)
456{
Laura Abbott60ae9372012-10-10 16:28:59 -0700457 struct ion_cp_buffer *buf;
458 phys_addr_t addr;
459
460 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
461 if (!buf)
462 return ION_CP_ALLOCATE_FAIL;
463
464 addr = ion_cp_allocate(heap, size, align, flags);
465 if (addr == ION_CP_ALLOCATE_FAIL)
466 return -ENOMEM;
467
468 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700469 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700470 atomic_set(&buf->secure_cnt, 0);
471 mutex_init(&buf->lock);
472 buf->is_secure = flags & ION_SECURE ? 1 : 0;
473 buffer->priv_virt = buf;
474
475 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800476}
477
478static void ion_cp_heap_free(struct ion_buffer *buffer)
479{
480 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700481 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800482
Laura Abbott60ae9372012-10-10 16:28:59 -0700483 ion_cp_free(heap, buf->buffer, buffer->size);
484 WARN_ON(atomic_read(&buf->secure_cnt));
485 WARN_ON(atomic_read(&buf->map_cnt));
486 kfree(buf);
487
488 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800489}
490
Laura Abbottb14ed962012-01-30 14:18:08 -0800491struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800492{
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800493 size_t chunk_size = buffer->size;
Laura Abbottb14ed962012-01-30 14:18:08 -0800494 struct sg_table *table;
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800495 int ret, i, n_chunks;
496 struct scatterlist *sg;
Laura Abbott60ae9372012-10-10 16:28:59 -0700497 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800498
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800499 if (ION_IS_CACHED(buffer->flags))
500 chunk_size = PAGE_SIZE;
501 else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
502 chunk_size = SZ_1M;
503
Laura Abbottb14ed962012-01-30 14:18:08 -0800504 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
505 if (!table)
Olav Haugan0a852512012-01-09 10:20:55 -0800506 return ERR_PTR(-ENOMEM);
507
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800508 n_chunks = DIV_ROUND_UP(buffer->size, chunk_size);
Olav Haugan0a852512012-01-09 10:20:55 -0800509
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800510 ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
511 if (ret)
512 goto err0;
Laura Abbotte84d77e2012-10-10 16:59:46 -0700513
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800514 for_each_sg(table->sgl, sg, table->nents, i) {
515 sg_dma_address(sg) = buf->buffer + i * chunk_size;
516 sg->length = chunk_size;
517 sg->offset = 0;
Laura Abbotte84d77e2012-10-10 16:59:46 -0700518 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800519
520 return table;
521err0:
522 kfree(table);
523 return ERR_PTR(ret);
Olav Haugan0a852512012-01-09 10:20:55 -0800524}
525
Laura Abbottb14ed962012-01-30 14:18:08 -0800526struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700527 struct ion_buffer *buffer)
528{
Laura Abbottb14ed962012-01-30 14:18:08 -0800529 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700530}
531
Olav Haugan0a852512012-01-09 10:20:55 -0800532void ion_cp_heap_unmap_dma(struct ion_heap *heap,
533 struct ion_buffer *buffer)
534{
Laura Abbottb14ed962012-01-30 14:18:08 -0800535 if (buffer->sg_table)
536 sg_free_table(buffer->sg_table);
537 kfree(buffer->sg_table);
538 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800539}
540
541/**
542 * Call request region for SMI memory of this is the first mapping.
543 */
544static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
545{
546 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800547 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700548 if (cp_heap->heap_request_region)
549 ret_value = cp_heap->heap_request_region(
550 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800551 return ret_value;
552}
553
554/**
555 * Call release region for SMI memory of this is the last un-mapping.
556 */
557static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
558{
559 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800560 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700561 if (cp_heap->heap_release_region)
562 ret_value = cp_heap->heap_release_region(
563 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800564 return ret_value;
565}
566
Laura Abbottcaafeea2011-12-13 11:43:10 -0800567void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
568 void *virt_base, unsigned long flags)
569{
570 int ret;
Laura Abbott60ae9372012-10-10 16:28:59 -0700571 struct ion_cp_buffer *buf = buffer->priv_virt;
572 unsigned int offset = buf->buffer - phys_base;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800573 unsigned long start = ((unsigned long)virt_base) + offset;
574 const struct mem_type *type = ION_IS_CACHED(flags) ?
575 get_mem_type(MT_DEVICE_CACHED) :
576 get_mem_type(MT_DEVICE);
577
Laura Abbott60ae9372012-10-10 16:28:59 -0700578 if (phys_base > buf->buffer)
Laura Abbottcaafeea2011-12-13 11:43:10 -0800579 return NULL;
580
581
Laura Abbott60ae9372012-10-10 16:28:59 -0700582 ret = ioremap_pages(start, buf->buffer, buffer->size, type);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800583
584 if (!ret)
585 return (void *)start;
586 else
587 return NULL;
588}
589
Laura Abbottb14ed962012-01-30 14:18:08 -0800590void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800591{
592 struct ion_cp_heap *cp_heap =
593 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800594 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700595 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800596
597 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800598 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
599 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800600 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800601
Olav Hauganea66e7a2012-01-23 17:30:27 -0800602 if (ion_cp_request_region(cp_heap)) {
603 mutex_unlock(&cp_heap->lock);
604 return NULL;
605 }
Olav Haugan0a852512012-01-09 10:20:55 -0800606
Laura Abbottcaafeea2011-12-13 11:43:10 -0800607 if (cp_heap->reusable) {
608 ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
Laura Abbottb14ed962012-01-30 14:18:08 -0800609 cp_heap->reserved_vrange, buffer->flags);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700610 } else if (cp_heap->cma) {
611 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
612 struct page **pages = vmalloc(
613 sizeof(struct page *) * npages);
614 int i;
615 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800616
Olav Haugan245db992013-03-26 11:25:41 -0700617 if (!pages) {
618 mutex_unlock(&cp_heap->lock);
619 return ERR_PTR(-ENOMEM);
620 }
621
Laura Abbott3180a5f2012-08-03 17:31:03 -0700622 if (ION_IS_CACHED(buffer->flags))
623 pgprot = PAGE_KERNEL;
624 else
625 pgprot = pgprot_writecombine(PAGE_KERNEL);
626
627 for (i = 0; i < npages; i++) {
628 pages[i] = phys_to_page(buf->buffer +
629 i * PAGE_SIZE);
630 }
631 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
632 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800633 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800634 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700635 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800636 buffer->size);
637 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700638 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800639 buffer->size);
640 }
Olav Haugan0a852512012-01-09 10:20:55 -0800641
Olav Haugan2a5404b2012-02-01 17:51:30 -0800642 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800643 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800644 } else {
645 if (ION_IS_CACHED(buffer->flags))
646 ++cp_heap->kmap_cached_count;
647 else
648 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700649 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800650 }
Olav Haugan0a852512012-01-09 10:20:55 -0800651 }
652 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800653 return ret_value;
654}
655
656void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
657 struct ion_buffer *buffer)
658{
659 struct ion_cp_heap *cp_heap =
660 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700661 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800662
Laura Abbottcaafeea2011-12-13 11:43:10 -0800663 if (cp_heap->reusable)
664 unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700665 else if (cp_heap->cma)
666 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800667 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700668 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800669
Olav Haugan0a852512012-01-09 10:20:55 -0800670 buffer->vaddr = NULL;
671
672 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800673 if (ION_IS_CACHED(buffer->flags))
674 --cp_heap->kmap_cached_count;
675 else
676 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700677
678 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800679 ion_cp_release_region(cp_heap);
680 mutex_unlock(&cp_heap->lock);
681
682 return;
683}
684
685int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800686 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800687{
688 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800689 struct ion_cp_heap *cp_heap =
690 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700691 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800692
Olav Hauganea66e7a2012-01-23 17:30:27 -0800693 mutex_lock(&cp_heap->lock);
Mitchel Humpherys8d0a4922013-01-21 16:49:09 -0800694 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
Olav Haugan0a852512012-01-09 10:20:55 -0800695 if (ion_cp_request_region(cp_heap)) {
696 mutex_unlock(&cp_heap->lock);
697 return -EINVAL;
698 }
Olav Haugan0a852512012-01-09 10:20:55 -0800699
Laura Abbottb14ed962012-01-30 14:18:08 -0800700 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800701 vma->vm_page_prot = pgprot_writecombine(
702 vma->vm_page_prot);
703
704 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700705 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800706 vma->vm_end - vma->vm_start,
707 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800708
Laura Abbott60ae9372012-10-10 16:28:59 -0700709 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800710 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700711 } else {
712 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800713 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700714 }
715
Olav Haugan0a852512012-01-09 10:20:55 -0800716 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800717 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800718 return ret_value;
719}
720
721void ion_cp_heap_unmap_user(struct ion_heap *heap,
722 struct ion_buffer *buffer)
723{
724 struct ion_cp_heap *cp_heap =
725 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700726 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800727
728 mutex_lock(&cp_heap->lock);
729 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700730 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800731 ion_cp_release_region(cp_heap);
732 mutex_unlock(&cp_heap->lock);
733}
734
735int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
736 void *vaddr, unsigned int offset, unsigned int length,
737 unsigned int cmd)
738{
Neeti Desai3f3c2822013-03-08 17:29:53 -0800739 void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
Olav Haugan85c95402012-05-30 17:32:37 -0700740 struct ion_cp_heap *cp_heap =
Neeti Desai3f3c2822013-03-08 17:29:53 -0800741 container_of(heap, struct ion_cp_heap, heap);
742 unsigned int size_to_vmap, total_size;
Laura Abbott60ae9372012-10-10 16:28:59 -0700743 struct ion_cp_buffer *buf = buffer->priv_virt;
Neeti Desai3f3c2822013-03-08 17:29:53 -0800744 int i, j;
745 void *ptr = NULL;
746 ion_phys_addr_t buff_phys = buffer->priv_phys;
Olav Haugan0a852512012-01-09 10:20:55 -0800747
Neeti Desai3f3c2822013-03-08 17:29:53 -0800748 if (!vaddr) {
749 /*
750 * Split the vmalloc space into smaller regions in
751 * order to clean and/or invalidate the cache.
752 */
753 size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
754 total_size = buffer->size;
755 for (i = 0; i < total_size; i += size_to_vmap) {
756 size_to_vmap = min(size_to_vmap, total_size - i);
757 for (j = 0; j < 10 && size_to_vmap; ++j) {
758 ptr = ioremap(buff_phys, size_to_vmap);
759 if (ptr) {
760 switch (cmd) {
761 case ION_IOC_CLEAN_CACHES:
762 dmac_clean_range(ptr,
763 ptr + size_to_vmap);
764 outer_cache_op =
765 outer_clean_range;
766 break;
767 case ION_IOC_INV_CACHES:
768 dmac_inv_range(ptr,
769 ptr + size_to_vmap);
770 outer_cache_op =
771 outer_inv_range;
772 break;
773 case ION_IOC_CLEAN_INV_CACHES:
774 dmac_flush_range(ptr,
775 ptr + size_to_vmap);
776 outer_cache_op =
777 outer_flush_range;
778 break;
779 default:
780 return -EINVAL;
781 }
782 buff_phys += size_to_vmap;
783 break;
784 } else {
785 size_to_vmap >>= 1;
786 }
787 }
788 if (!ptr) {
789 pr_err("Couldn't io-remap the memory\n");
790 return -EINVAL;
791 }
792 iounmap(ptr);
793 }
794 } else {
795 switch (cmd) {
796 case ION_IOC_CLEAN_CACHES:
797 dmac_clean_range(vaddr, vaddr + length);
798 outer_cache_op = outer_clean_range;
799 break;
800 case ION_IOC_INV_CACHES:
801 dmac_inv_range(vaddr, vaddr + length);
802 outer_cache_op = outer_inv_range;
803 break;
804 case ION_IOC_CLEAN_INV_CACHES:
805 dmac_flush_range(vaddr, vaddr + length);
806 outer_cache_op = outer_flush_range;
807 break;
808 default:
809 return -EINVAL;
810 }
Olav Haugan0a852512012-01-09 10:20:55 -0800811 }
812
Olav Haugan85c95402012-05-30 17:32:37 -0700813 if (cp_heap->has_outer_cache) {
Laura Abbott60ae9372012-10-10 16:28:59 -0700814 unsigned long pstart = buf->buffer + offset;
Olav Haugan85c95402012-05-30 17:32:37 -0700815 outer_cache_op(pstart, pstart + length);
816 }
Olav Haugan0a852512012-01-09 10:20:55 -0800817 return 0;
818}
819
Olav Haugan0671b9a2012-05-25 11:58:56 -0700820static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
821 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800822{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800823 unsigned long total_alloc;
824 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800825 unsigned long umap_count;
826 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800827 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800828 struct ion_cp_heap *cp_heap =
829 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800830
831 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800832 total_alloc = cp_heap->allocated_bytes;
833 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800834 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800835 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800836 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800837 mutex_unlock(&cp_heap->lock);
838
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800839 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
840 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800841 seq_printf(s, "umapping count: %lx\n", umap_count);
842 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800843 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Laura Abbottcaafeea2011-12-13 11:43:10 -0800844 seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800845
Olav Haugan0671b9a2012-05-25 11:58:56 -0700846 if (mem_map) {
847 unsigned long base = cp_heap->base;
848 unsigned long size = cp_heap->total_size;
849 unsigned long end = base+size;
850 unsigned long last_end = base;
851 struct rb_node *n;
852
853 seq_printf(s, "\nMemory Map\n");
854 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
855 "client", "start address", "end address",
856 "size (hex)");
857
858 for (n = rb_first(mem_map); n; n = rb_next(n)) {
859 struct mem_map_data *data =
860 rb_entry(n, struct mem_map_data, node);
861 const char *client_name = "(null)";
862
863 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700864 phys_addr_t da;
865
866 da = data->addr-1;
867 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
868 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700869 data->addr-last_end,
870 data->addr-last_end);
871 }
872
873 if (data->client_name)
874 client_name = data->client_name;
875
Laura Abbott1135c9e2013-03-13 15:33:40 -0700876 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
877 client_name, &data->addr,
878 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700879 data->size, data->size);
880 last_end = data->addr_end+1;
881 }
882 if (last_end < end) {
883 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
884 last_end, end-1, end-last_end, end-last_end);
885 }
886 }
887
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800888 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800889}
890
Laura Abbott7e446482012-06-13 15:59:39 -0700891int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800892{
893 int ret_value;
894 struct ion_cp_heap *cp_heap =
895 container_of(heap, struct ion_cp_heap, heap);
896 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800897 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -0700898 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800899 } else {
900 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -0800901 "User space: %lu, kernel space (cached): %lu\n",
902 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800903 ret_value = -EINVAL;
904 }
905
Olav Haugan0a852512012-01-09 10:20:55 -0800906 mutex_unlock(&cp_heap->lock);
907 return ret_value;
908}
909
Laura Abbott7e446482012-06-13 15:59:39 -0700910int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800911{
912 int ret_value = 0;
913 struct ion_cp_heap *cp_heap =
914 container_of(heap, struct ion_cp_heap, heap);
915 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -0700916 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800917 mutex_unlock(&cp_heap->lock);
918 return ret_value;
919}
920
Olav Haugan8726caf2012-05-10 15:11:35 -0700921static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
Olav Haugan3450cae2012-05-14 11:36:38 -0700922 int partition, unsigned long prot)
Olav Haugan8726caf2012-05-10 15:11:35 -0700923{
924 unsigned long left_to_map = cp_heap->total_size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700925 unsigned long page_size = SZ_64K;
926 int ret_value = 0;
927 unsigned long virt_addr_len = cp_heap->total_size;
928 struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
929
930 /* If we are mapping into the video domain we need to map twice the
931 * size of the heap to account for prefetch issue in video core.
932 */
933 if (domain_num == cp_heap->iommu_2x_map_domain)
934 virt_addr_len <<= 1;
935
936 if (cp_heap->total_size & (SZ_64K-1)) {
937 pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
938 ret_value = -EINVAL;
939 }
940 if (cp_heap->base & (SZ_64K-1)) {
941 pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
942 ret_value = -EINVAL;
943 }
944 if (!ret_value && domain) {
945 unsigned long temp_phys = cp_heap->base;
Laura Abbottd01221b2012-05-16 17:52:49 -0700946 unsigned long temp_iova;
947
948 ret_value = msm_allocate_iova_address(domain_num, partition,
949 virt_addr_len, SZ_64K,
950 &temp_iova);
951
952 if (ret_value) {
Olav Haugan8726caf2012-05-10 15:11:35 -0700953 pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
954 __func__, domain_num, partition);
Olav Haugan8726caf2012-05-10 15:11:35 -0700955 goto out;
956 }
957 cp_heap->iommu_iova[domain_num] = temp_iova;
958
959 while (left_to_map) {
960 int ret = iommu_map(domain, temp_iova, temp_phys,
Steve Mucklef132c6c2012-06-06 18:30:57 -0700961 page_size, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -0700962 if (ret) {
963 pr_err("%s: could not map %lx in domain %p, error: %d\n",
964 __func__, temp_iova, domain, ret);
965 ret_value = -EAGAIN;
966 goto free_iova;
967 }
968 temp_iova += page_size;
969 temp_phys += page_size;
970 left_to_map -= page_size;
971 }
972 if (domain_num == cp_heap->iommu_2x_map_domain)
973 ret_value = msm_iommu_map_extra(domain, temp_iova,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800974 cp_heap->base,
Olav Haugan8726caf2012-05-10 15:11:35 -0700975 cp_heap->total_size,
976 SZ_64K, prot);
977 if (ret_value)
978 goto free_iova;
979 } else {
980 pr_err("Unable to get IOMMU domain %lu\n", domain_num);
981 ret_value = -ENOMEM;
982 }
983 goto out;
984
985free_iova:
986 msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
987 partition, virt_addr_len);
988out:
989 return ret_value;
990}
991
Olav Haugan1c94f7b2012-02-08 09:45:53 -0800992static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
993 struct ion_iommu_map *data,
994 unsigned int domain_num,
995 unsigned int partition_num,
996 unsigned long align,
997 unsigned long iova_length,
998 unsigned long flags)
999{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001000 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -07001001 int ret = 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001002 unsigned long extra;
Olav Haugan8726caf2012-05-10 15:11:35 -07001003 struct ion_cp_heap *cp_heap =
1004 container_of(buffer->heap, struct ion_cp_heap, heap);
Olav Hauganf310cf22012-05-08 08:42:49 -07001005 int prot = IOMMU_WRITE | IOMMU_READ;
Laura Abbott60ae9372012-10-10 16:28:59 -07001006 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -07001007 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001008
1009 data->mapped_size = iova_length;
1010
1011 if (!msm_use_iommu()) {
Laura Abbott60ae9372012-10-10 16:28:59 -07001012 data->iova_addr = buf->buffer;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001013 return 0;
1014 }
1015
Olav Haugan8726caf2012-05-10 15:11:35 -07001016 if (cp_heap->iommu_iova[domain_num]) {
1017 /* Already mapped. */
Laura Abbott60ae9372012-10-10 16:28:59 -07001018 unsigned long offset = buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -07001019 data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
1020 return 0;
1021 } else if (cp_heap->iommu_map_all) {
Olav Haugan3450cae2012-05-14 11:36:38 -07001022 ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
Olav Haugan8726caf2012-05-10 15:11:35 -07001023 if (!ret) {
1024 unsigned long offset =
Laura Abbott60ae9372012-10-10 16:28:59 -07001025 buf->buffer - cp_heap->base;
Olav Haugan8726caf2012-05-10 15:11:35 -07001026 data->iova_addr =
1027 cp_heap->iommu_iova[domain_num] + offset;
1028 cp_heap->iommu_partition[domain_num] = partition_num;
1029 /*
1030 clear delayed map flag so that we don't interfere
1031 with this feature (we are already delaying).
1032 */
1033 data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
1034 return 0;
1035 } else {
1036 cp_heap->iommu_iova[domain_num] = 0;
1037 cp_heap->iommu_partition[domain_num] = 0;
1038 return ret;
1039 }
1040 }
1041
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001042 extra = iova_length - buffer->size;
1043
Laura Abbottd01221b2012-05-16 17:52:49 -07001044 ret = msm_allocate_iova_address(domain_num, partition_num,
1045 data->mapped_size, align,
1046 &data->iova_addr);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001047
Laura Abbottd01221b2012-05-16 17:52:49 -07001048 if (ret)
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001049 goto out;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001050
1051 domain = msm_get_iommu_domain(domain_num);
1052
1053 if (!domain) {
1054 ret = -ENOMEM;
1055 goto out1;
1056 }
1057
Laura Abbottb14ed962012-01-30 14:18:08 -08001058 ret = iommu_map_range(domain, data->iova_addr, buffer->sg_table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -07001059 buffer->size, prot);
1060 if (ret) {
1061 pr_err("%s: could not map %lx in domain %p\n",
1062 __func__, data->iova_addr, domain);
1063 goto out1;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001064 }
1065
Olav Haugan16cdb412012-03-27 13:02:17 -07001066 if (extra) {
1067 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08001068 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
1069 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
1070 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -07001071 if (ret)
1072 goto out2;
1073 }
Olav Haugan16cdb412012-03-27 13:02:17 -07001074 return ret;
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001075
1076out2:
Olav Haugan16cdb412012-03-27 13:02:17 -07001077 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001078out1:
1079 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1080 data->mapped_size);
1081out:
1082 return ret;
1083}
1084
1085static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
1086{
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001087 unsigned int domain_num;
1088 unsigned int partition_num;
1089 struct iommu_domain *domain;
Olav Haugan8726caf2012-05-10 15:11:35 -07001090 struct ion_cp_heap *cp_heap =
1091 container_of(data->buffer->heap, struct ion_cp_heap, heap);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001092
1093 if (!msm_use_iommu())
1094 return;
1095
Olav Haugan8726caf2012-05-10 15:11:35 -07001096
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001097 domain_num = iommu_map_domain(data);
Olav Haugan8726caf2012-05-10 15:11:35 -07001098
1099 /* If we are mapping everything we'll wait to unmap until everything
1100 is freed. */
1101 if (cp_heap->iommu_iova[domain_num])
1102 return;
1103
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001104 partition_num = iommu_map_partition(data);
1105
1106 domain = msm_get_iommu_domain(domain_num);
1107
1108 if (!domain) {
1109 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
1110 return;
1111 }
1112
Olav Haugan16cdb412012-03-27 13:02:17 -07001113 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001114 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
1115 data->mapped_size);
1116
1117 return;
1118}
Olav Haugan0a852512012-01-09 10:20:55 -08001119
1120static struct ion_heap_ops cp_heap_ops = {
1121 .allocate = ion_cp_heap_allocate,
1122 .free = ion_cp_heap_free,
1123 .phys = ion_cp_heap_phys,
1124 .map_user = ion_cp_heap_map_user,
1125 .unmap_user = ion_cp_heap_unmap_user,
1126 .map_kernel = ion_cp_heap_map_kernel,
1127 .unmap_kernel = ion_cp_heap_unmap_kernel,
1128 .map_dma = ion_cp_heap_map_dma,
1129 .unmap_dma = ion_cp_heap_unmap_dma,
1130 .cache_op = ion_cp_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -08001131 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -08001132 .secure_heap = ion_cp_secure_heap,
1133 .unsecure_heap = ion_cp_unsecure_heap,
Olav Haugan1c94f7b2012-02-08 09:45:53 -08001134 .map_iommu = ion_cp_heap_map_iommu,
1135 .unmap_iommu = ion_cp_heap_unmap_iommu,
Laura Abbott93619302012-10-11 11:51:40 -07001136 .secure_buffer = ion_cp_secure_buffer,
1137 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -08001138};
1139
Olav Haugan0a852512012-01-09 10:20:55 -08001140struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
1141{
1142 struct ion_cp_heap *cp_heap;
1143 int ret;
1144
1145 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
1146 if (!cp_heap)
1147 return ERR_PTR(-ENOMEM);
1148
Olav Haugan0a852512012-01-09 10:20:55 -08001149 mutex_init(&cp_heap->lock);
1150
Olav Haugan0a852512012-01-09 10:20:55 -08001151
Olav Haugan0a852512012-01-09 10:20:55 -08001152 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001153 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -08001154 cp_heap->kmap_cached_count = 0;
1155 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -08001156 cp_heap->total_size = heap_data->size;
1157 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -07001158 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -08001159 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001160 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -08001161 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -07001162 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001163 cp_heap->heap_size = heap_data->size;
1164
Laura Abbottf68983e2012-06-13 16:23:23 -07001165 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -08001166 if (heap_data->extra_data) {
1167 struct ion_cp_heap_pdata *extra_data =
1168 heap_data->extra_data;
Laura Abbottcaafeea2011-12-13 11:43:10 -08001169 cp_heap->reusable = extra_data->reusable;
1170 cp_heap->reserved_vrange = extra_data->virt_addr;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001171 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -08001172 if (extra_data->secure_size) {
1173 cp_heap->secure_base = extra_data->secure_base;
1174 cp_heap->secure_size = extra_data->secure_size;
1175 }
Olav Haugan0703dbf2011-12-19 17:53:38 -08001176 if (extra_data->setup_region)
1177 cp_heap->bus_id = extra_data->setup_region();
1178 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001179 cp_heap->heap_request_region =
1180 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -08001181 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -07001182 cp_heap->heap_release_region =
1183 extra_data->release_region;
Olav Haugan8726caf2012-05-10 15:11:35 -07001184 cp_heap->iommu_map_all =
1185 extra_data->iommu_map_all;
1186 cp_heap->iommu_2x_map_domain =
1187 extra_data->iommu_2x_map_domain;
Laura Abbott3180a5f2012-08-03 17:31:03 -07001188 cp_heap->cma = extra_data->is_cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -08001189 cp_heap->allow_non_secure_allocation =
1190 extra_data->allow_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -07001191
Olav Haugan0703dbf2011-12-19 17:53:38 -08001192 }
Olav Haugan8726caf2012-05-10 15:11:35 -07001193
Laura Abbott3180a5f2012-08-03 17:31:03 -07001194 if (cp_heap->cma) {
1195 cp_heap->pool = NULL;
1196 cp_heap->cpu_addr = 0;
1197 cp_heap->heap.priv = heap_data->priv;
1198 } else {
1199 cp_heap->pool = gen_pool_create(12, -1);
1200 if (!cp_heap->pool)
1201 goto free_heap;
1202
1203 cp_heap->base = heap_data->base;
1204 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
1205 heap_data->size, -1);
1206 if (ret < 0)
1207 goto destroy_pool;
1208
1209 }
Olav Haugan0a852512012-01-09 10:20:55 -08001210 return &cp_heap->heap;
1211
1212destroy_pool:
1213 gen_pool_destroy(cp_heap->pool);
1214
1215free_heap:
1216 kfree(cp_heap);
1217
1218 return ERR_PTR(-ENOMEM);
1219}
1220
1221void ion_cp_heap_destroy(struct ion_heap *heap)
1222{
1223 struct ion_cp_heap *cp_heap =
1224 container_of(heap, struct ion_cp_heap, heap);
1225
1226 gen_pool_destroy(cp_heap->pool);
1227 kfree(cp_heap);
1228 cp_heap = NULL;
1229}
1230
Olav Haugan0671b9a2012-05-25 11:58:56 -07001231void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
1232 unsigned long *size) \
1233{
1234 struct ion_cp_heap *cp_heap =
1235 container_of(heap, struct ion_cp_heap, heap);
1236 *base = cp_heap->base;
1237 *size = cp_heap->total_size;
1238}
Olav Haugan0a852512012-01-09 10:20:55 -08001239
Olav Haugan0a852512012-01-09 10:20:55 -08001240