blob: f1868a8c153bd2421052c1f88acbaee275141b83 [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott7db4e0b2013-01-03 14:20:16 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080029#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070030#include <linux/dma-mapping.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080031#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032
33#include <asm/mach/map.h>
34
Olav Haugan0a852512012-01-09 10:20:55 -080035#include <mach/msm_memtypes.h>
36#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080037#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070038
39#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080040
41#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070042#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080043
Laura Abbott7e446482012-06-13 15:59:39 -070044#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080045/**
46 * struct ion_cp_heap - container for the heap and shared heap data
47
48 * @heap: the heap information structure
49 * @pool: memory pool to allocate from.
50 * @base: the base address of the memory pool.
51 * @permission_type: Identifier for the memory used by SCM for protecting
52 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080053 * @secure_base: Base address used when securing a heap that is shared.
54 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080055 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080056 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080057 * @allocated_bytes: the total number of allocated bytes from the pool.
58 * @total_size: the total size of the memory pool.
59 * @request_region: function pointer to call when first mapping of memory
60 * occurs.
61 * @release_region: function pointer to call when last mapping of memory
62 * unmapped.
63 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080064 * @kmap_cached_count: the total number of times this heap has been mapped in
65 * kernel space (cached).
66 * @kmap_uncached_count:the total number of times this heap has been mapped in
67 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080068 * @umap_count: the total number of times this heap has been mapped in
69 * user space.
Olav Haugan85c95402012-05-30 17:32:37 -070070 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
71*/
Olav Haugan0a852512012-01-09 10:20:55 -080072struct ion_cp_heap {
73 struct ion_heap heap;
74 struct gen_pool *pool;
75 ion_phys_addr_t base;
76 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080077 ion_phys_addr_t secure_base;
78 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080079 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080080 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080081 unsigned long allocated_bytes;
82 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070083 int (*heap_request_region)(void *);
84 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080085 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080086 unsigned long kmap_cached_count;
87 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080088 unsigned long umap_count;
Olav Haugan85c95402012-05-30 17:32:37 -070089 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -070090 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -070091 void *cpu_addr;
92 size_t heap_size;
93 dma_addr_t handle;
94 int cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -080095 int allow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -080096};
97
98enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -080099 HEAP_NOT_PROTECTED = 0,
100 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800101};
102
Laura Abbott3180a5f2012-08-03 17:31:03 -0700103#define DMA_ALLOC_TRIES 5
104
Laura Abbott3180a5f2012-08-03 17:31:03 -0700105static int allocate_heap_memory(struct ion_heap *heap)
106{
107 struct device *dev = heap->priv;
108 struct ion_cp_heap *cp_heap =
109 container_of(heap, struct ion_cp_heap, heap);
110 int ret;
111 int tries = 0;
112 DEFINE_DMA_ATTRS(attrs);
113 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
114
115
116 if (cp_heap->cpu_addr)
117 return 0;
118
119 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
120 cp_heap->cpu_addr = dma_alloc_attrs(dev,
121 cp_heap->heap_size,
122 &(cp_heap->handle),
123 0,
124 &attrs);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800125 if (!cp_heap->cpu_addr) {
126 trace_ion_cp_alloc_retry(tries);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700127 msleep(20);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800128 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700129 }
130
131 if (!cp_heap->cpu_addr)
132 goto out;
133
134 cp_heap->base = cp_heap->handle;
135
136 cp_heap->pool = gen_pool_create(12, -1);
137 if (!cp_heap->pool)
138 goto out_free;
139
140 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
141 cp_heap->heap_size, -1);
142 if (ret < 0)
143 goto out_pool;
144
145 return 0;
146
147out_pool:
148 gen_pool_destroy(cp_heap->pool);
149out_free:
150 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
151 cp_heap->handle);
152out:
153 return ION_CP_ALLOCATE_FAIL;
154}
155
156static void free_heap_memory(struct ion_heap *heap)
157{
158 struct device *dev = heap->priv;
159 struct ion_cp_heap *cp_heap =
160 container_of(heap, struct ion_cp_heap, heap);
161
162 /* release memory */
163 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
164 cp_heap->handle);
165 gen_pool_destroy(cp_heap->pool);
166 cp_heap->pool = NULL;
167 cp_heap->cpu_addr = 0;
168}
169
170
171
Olav Haugan2a5404b2012-02-01 17:51:30 -0800172/**
173 * Get the total number of kernel mappings.
174 * Must be called with heap->lock locked.
175 */
176static unsigned long ion_cp_get_total_kmap_count(
177 const struct ion_cp_heap *cp_heap)
178{
179 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
180}
Olav Haugan0a852512012-01-09 10:20:55 -0800181
Laura Abbott94ad25e2012-08-03 13:56:21 -0700182static int ion_on_first_alloc(struct ion_heap *heap)
183{
184 struct ion_cp_heap *cp_heap =
185 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700186 int ret_value;
187
Laura Abbott3180a5f2012-08-03 17:31:03 -0700188 if (cp_heap->cma) {
189 ret_value = allocate_heap_memory(heap);
190 if (ret_value)
191 return 1;
192 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700193 return 0;
194}
195
196static void ion_on_last_free(struct ion_heap *heap)
197{
198 struct ion_cp_heap *cp_heap =
199 container_of(heap, struct ion_cp_heap, heap);
200
Laura Abbott3180a5f2012-08-03 17:31:03 -0700201 if (cp_heap->cma)
202 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700203}
204
Olav Haugan0a852512012-01-09 10:20:55 -0800205/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800206 * Protects memory if heap is unsecured heap. Also ensures that we are in
207 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800208 * Must be called with heap->lock locked.
209 */
Laura Abbott7e446482012-06-13 15:59:39 -0700210static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800211{
212 struct ion_cp_heap *cp_heap =
213 container_of(heap, struct ion_cp_heap, heap);
214 int ret_value = 0;
215
Laura Abbottf68983e2012-06-13 16:23:23 -0700216 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800217 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700218 if (!cp_heap->allocated_bytes)
219 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800220 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800221
Olav Hauganea66e7a2012-01-23 17:30:27 -0800222 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700223 cp_heap->secure_size, cp_heap->permission_type,
224 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800225 if (ret_value) {
226 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800227 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800228
Laura Abbott94ad25e2012-08-03 13:56:21 -0700229 if (!cp_heap->allocated_bytes)
230 ion_on_last_free(heap);
231
Laura Abbottf68983e2012-06-13 16:23:23 -0700232 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800233 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800234 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbott1135c9e2013-03-13 15:33:40 -0700235 pr_debug("Protected heap %s @ 0x%pa\n",
236 heap->name, &cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800237 }
238 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800239out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700240 pr_debug("%s: protect count is %d\n", __func__,
241 atomic_read(&cp_heap->protect_cnt));
242 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800243 return ret_value;
244}
245
246/**
Laura Abbottcaafeea2011-12-13 11:43:10 -0800247 * Unprotects memory if heap is secure heap. Also ensures that we are in
248 * the correct FMEM state if this heap is a reusable heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800249 * Must be called with heap->lock locked.
250 */
Laura Abbott7e446482012-06-13 15:59:39 -0700251static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800252{
253 struct ion_cp_heap *cp_heap =
254 container_of(heap, struct ion_cp_heap, heap);
255
Laura Abbottf68983e2012-06-13 16:23:23 -0700256 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800257 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800258 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700259 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800260 if (error_code) {
261 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800262 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800263 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800264 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800265 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800266 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800267
Laura Abbott94ad25e2012-08-03 13:56:21 -0700268 if (!cp_heap->allocated_bytes)
269 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800270 }
271 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700272 pr_debug("%s: protect count is %d\n", __func__,
273 atomic_read(&cp_heap->protect_cnt));
274 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800275}
276
277ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
278 unsigned long size,
279 unsigned long align,
280 unsigned long flags)
281{
282 unsigned long offset;
Adrian Alexei92538592013-03-27 10:53:43 -0700283 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
284 unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
Olav Haugan0a852512012-01-09 10:20:55 -0800285
286 struct ion_cp_heap *cp_heap =
287 container_of(heap, struct ion_cp_heap, heap);
288
289 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800290 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800291 mutex_unlock(&cp_heap->lock);
292 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800293 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800294 return ION_CP_ALLOCATE_FAIL;
295 }
296
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800297 if (!force_contig && !secure_allocation &&
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800298 !cp_heap->allow_non_secure_allocation) {
Laura Abbottac963312012-12-11 15:09:03 -0800299 mutex_unlock(&cp_heap->lock);
300 pr_debug("%s: non-secure allocation disallowed from this heap\n",
301 __func__);
302 return ION_CP_ALLOCATE_FAIL;
303 }
304
Laura Abbott087db592012-11-01 09:41:37 -0700305 /*
306 * The check above already checked for non-secure allocations when the
307 * heap is protected. HEAP_PROTECTED implies that this must be a secure
308 * allocation. If the heap is protected and there are userspace or
309 * cached kernel mappings, something has gone wrong in the security
310 * model.
311 */
312 if (cp_heap->heap_protected == HEAP_PROTECTED) {
313 BUG_ON(cp_heap->umap_count != 0);
314 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800315 }
316
Laura Abbottcaafeea2011-12-13 11:43:10 -0800317 /*
318 * if this is the first reusable allocation, transition
319 * the heap
320 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700321 if (!cp_heap->allocated_bytes)
322 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800323 mutex_unlock(&cp_heap->lock);
324 return ION_RESERVED_ALLOCATE_FAIL;
325 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800326
Olav Haugan0a852512012-01-09 10:20:55 -0800327 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800328 mutex_unlock(&cp_heap->lock);
329
330 offset = gen_pool_alloc_aligned(cp_heap->pool,
331 size, ilog2(align));
332
333 if (!offset) {
334 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700335 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800336 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700337 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800338 pr_debug("%s: heap %s has enough memory (%lx) but"
339 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800340 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800341 __func__, heap->name,
342 cp_heap->total_size -
343 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700344 if (!cp_heap->allocated_bytes &&
345 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
346 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800347 mutex_unlock(&cp_heap->lock);
348
349 return ION_CP_ALLOCATE_FAIL;
350 }
351
352 return offset;
353}
354
355void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
356 unsigned long size)
357{
358 struct ion_cp_heap *cp_heap =
359 container_of(heap, struct ion_cp_heap, heap);
360
361 if (addr == ION_CP_ALLOCATE_FAIL)
362 return;
363 gen_pool_free(cp_heap->pool, addr, size);
364
365 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800366 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800367
Laura Abbott94ad25e2012-08-03 13:56:21 -0700368 if (!cp_heap->allocated_bytes &&
369 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
370 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700371
Olav Haugan0a852512012-01-09 10:20:55 -0800372 mutex_unlock(&cp_heap->lock);
373}
374
375static int ion_cp_heap_phys(struct ion_heap *heap,
376 struct ion_buffer *buffer,
377 ion_phys_addr_t *addr, size_t *len)
378{
Laura Abbott60ae9372012-10-10 16:28:59 -0700379 struct ion_cp_buffer *buf = buffer->priv_virt;
380
381 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800382 *len = buffer->size;
383 return 0;
384}
385
386static int ion_cp_heap_allocate(struct ion_heap *heap,
387 struct ion_buffer *buffer,
388 unsigned long size, unsigned long align,
389 unsigned long flags)
390{
Laura Abbott60ae9372012-10-10 16:28:59 -0700391 struct ion_cp_buffer *buf;
392 phys_addr_t addr;
393
Mitchel Humpherys8fd2ac22013-02-27 14:41:36 -0800394 /*
395 * we never want Ion to fault pages in for us with this
396 * heap. We want to set up the mappings ourselves in .map_user
397 */
398 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
399
Laura Abbott60ae9372012-10-10 16:28:59 -0700400 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
401 if (!buf)
402 return ION_CP_ALLOCATE_FAIL;
403
404 addr = ion_cp_allocate(heap, size, align, flags);
405 if (addr == ION_CP_ALLOCATE_FAIL)
406 return -ENOMEM;
407
408 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700409 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700410 atomic_set(&buf->secure_cnt, 0);
411 mutex_init(&buf->lock);
Adrian Alexei92538592013-03-27 10:53:43 -0700412 buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700413 buffer->priv_virt = buf;
414
415 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800416}
417
418static void ion_cp_heap_free(struct ion_buffer *buffer)
419{
420 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700421 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800422
Laura Abbott60ae9372012-10-10 16:28:59 -0700423 ion_cp_free(heap, buf->buffer, buffer->size);
424 WARN_ON(atomic_read(&buf->secure_cnt));
425 WARN_ON(atomic_read(&buf->map_cnt));
426 kfree(buf);
427
428 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800429}
430
Laura Abbottb14ed962012-01-30 14:18:08 -0800431struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800432{
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800433 size_t chunk_size = buffer->size;
Laura Abbott60ae9372012-10-10 16:28:59 -0700434 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800435
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800436 if (ION_IS_CACHED(buffer->flags))
437 chunk_size = PAGE_SIZE;
438 else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
439 chunk_size = SZ_1M;
440
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800441 return ion_create_chunked_sg_table(buf->buffer, chunk_size,
442 buffer->size);
Olav Haugan0a852512012-01-09 10:20:55 -0800443}
444
Laura Abbottb14ed962012-01-30 14:18:08 -0800445struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700446 struct ion_buffer *buffer)
447{
Laura Abbottb14ed962012-01-30 14:18:08 -0800448 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700449}
450
Olav Haugan0a852512012-01-09 10:20:55 -0800451void ion_cp_heap_unmap_dma(struct ion_heap *heap,
452 struct ion_buffer *buffer)
453{
Laura Abbottb14ed962012-01-30 14:18:08 -0800454 if (buffer->sg_table)
455 sg_free_table(buffer->sg_table);
456 kfree(buffer->sg_table);
457 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800458}
459
460/**
461 * Call request region for SMI memory of this is the first mapping.
462 */
463static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
464{
465 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800466 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700467 if (cp_heap->heap_request_region)
468 ret_value = cp_heap->heap_request_region(
469 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800470 return ret_value;
471}
472
473/**
474 * Call release region for SMI memory of this is the last un-mapping.
475 */
476static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
477{
478 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800479 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700480 if (cp_heap->heap_release_region)
481 ret_value = cp_heap->heap_release_region(
482 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800483 return ret_value;
484}
485
Laura Abbottb14ed962012-01-30 14:18:08 -0800486void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800487{
488 struct ion_cp_heap *cp_heap =
489 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800490 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700491 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800492
493 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800494 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
495 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800496 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800497
Olav Hauganea66e7a2012-01-23 17:30:27 -0800498 if (ion_cp_request_region(cp_heap)) {
499 mutex_unlock(&cp_heap->lock);
500 return NULL;
501 }
Olav Haugan0a852512012-01-09 10:20:55 -0800502
Laura Abbott7fe43d62013-03-25 16:44:58 -0700503 if (cp_heap->cma) {
Laura Abbott3180a5f2012-08-03 17:31:03 -0700504 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
505 struct page **pages = vmalloc(
506 sizeof(struct page *) * npages);
507 int i;
508 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800509
Olav Haugan245db992013-03-26 11:25:41 -0700510 if (!pages) {
511 mutex_unlock(&cp_heap->lock);
512 return ERR_PTR(-ENOMEM);
513 }
514
Laura Abbott3180a5f2012-08-03 17:31:03 -0700515 if (ION_IS_CACHED(buffer->flags))
516 pgprot = PAGE_KERNEL;
517 else
518 pgprot = pgprot_writecombine(PAGE_KERNEL);
519
520 for (i = 0; i < npages; i++) {
521 pages[i] = phys_to_page(buf->buffer +
522 i * PAGE_SIZE);
523 }
524 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
525 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800526 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800527 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700528 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800529 buffer->size);
530 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700531 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800532 buffer->size);
533 }
Olav Haugan0a852512012-01-09 10:20:55 -0800534
Olav Haugan2a5404b2012-02-01 17:51:30 -0800535 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800536 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800537 } else {
538 if (ION_IS_CACHED(buffer->flags))
539 ++cp_heap->kmap_cached_count;
540 else
541 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700542 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800543 }
Olav Haugan0a852512012-01-09 10:20:55 -0800544 }
545 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800546 return ret_value;
547}
548
549void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
550 struct ion_buffer *buffer)
551{
552 struct ion_cp_heap *cp_heap =
553 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700554 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800555
Laura Abbott7fe43d62013-03-25 16:44:58 -0700556 if (cp_heap->cma)
Laura Abbott3180a5f2012-08-03 17:31:03 -0700557 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800558 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700559 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800560
Olav Haugan0a852512012-01-09 10:20:55 -0800561 buffer->vaddr = NULL;
562
563 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800564 if (ION_IS_CACHED(buffer->flags))
565 --cp_heap->kmap_cached_count;
566 else
567 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700568
569 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800570 ion_cp_release_region(cp_heap);
571 mutex_unlock(&cp_heap->lock);
572
573 return;
574}
575
576int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800577 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800578{
579 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800580 struct ion_cp_heap *cp_heap =
581 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700582 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800583
Olav Hauganea66e7a2012-01-23 17:30:27 -0800584 mutex_lock(&cp_heap->lock);
Mitchel Humpherys8d0a4922013-01-21 16:49:09 -0800585 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
Olav Haugan0a852512012-01-09 10:20:55 -0800586 if (ion_cp_request_region(cp_heap)) {
587 mutex_unlock(&cp_heap->lock);
588 return -EINVAL;
589 }
Olav Haugan0a852512012-01-09 10:20:55 -0800590
Laura Abbottb14ed962012-01-30 14:18:08 -0800591 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800592 vma->vm_page_prot = pgprot_writecombine(
593 vma->vm_page_prot);
594
595 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700596 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800597 vma->vm_end - vma->vm_start,
598 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800599
Laura Abbott60ae9372012-10-10 16:28:59 -0700600 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800601 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700602 } else {
603 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800604 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700605 }
606
Olav Haugan0a852512012-01-09 10:20:55 -0800607 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800608 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800609 return ret_value;
610}
611
612void ion_cp_heap_unmap_user(struct ion_heap *heap,
613 struct ion_buffer *buffer)
614{
615 struct ion_cp_heap *cp_heap =
616 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700617 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800618
619 mutex_lock(&cp_heap->lock);
620 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700621 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800622 ion_cp_release_region(cp_heap);
623 mutex_unlock(&cp_heap->lock);
624}
625
Olav Haugan0671b9a2012-05-25 11:58:56 -0700626static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
627 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800628{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800629 unsigned long total_alloc;
630 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800631 unsigned long umap_count;
632 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800633 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800634 struct ion_cp_heap *cp_heap =
635 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800636
637 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800638 total_alloc = cp_heap->allocated_bytes;
639 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800640 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800641 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800642 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800643 mutex_unlock(&cp_heap->lock);
644
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800645 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
646 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800647 seq_printf(s, "umapping count: %lx\n", umap_count);
648 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800649 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800650
Olav Haugan0671b9a2012-05-25 11:58:56 -0700651 if (mem_map) {
652 unsigned long base = cp_heap->base;
653 unsigned long size = cp_heap->total_size;
654 unsigned long end = base+size;
655 unsigned long last_end = base;
656 struct rb_node *n;
657
658 seq_printf(s, "\nMemory Map\n");
659 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
660 "client", "start address", "end address",
661 "size (hex)");
662
663 for (n = rb_first(mem_map); n; n = rb_next(n)) {
664 struct mem_map_data *data =
665 rb_entry(n, struct mem_map_data, node);
666 const char *client_name = "(null)";
667
668 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700669 phys_addr_t da;
670
671 da = data->addr-1;
672 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
673 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700674 data->addr-last_end,
675 data->addr-last_end);
676 }
677
678 if (data->client_name)
679 client_name = data->client_name;
680
Laura Abbott1135c9e2013-03-13 15:33:40 -0700681 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
682 client_name, &data->addr,
683 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700684 data->size, data->size);
685 last_end = data->addr_end+1;
686 }
687 if (last_end < end) {
688 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
689 last_end, end-1, end-last_end, end-last_end);
690 }
691 }
692
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800693 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800694}
695
Laura Abbott7e446482012-06-13 15:59:39 -0700696int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800697{
698 int ret_value;
699 struct ion_cp_heap *cp_heap =
700 container_of(heap, struct ion_cp_heap, heap);
701 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800702 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -0700703 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800704 } else {
705 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -0800706 "User space: %lu, kernel space (cached): %lu\n",
707 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800708 ret_value = -EINVAL;
709 }
710
Olav Haugan0a852512012-01-09 10:20:55 -0800711 mutex_unlock(&cp_heap->lock);
712 return ret_value;
713}
714
Laura Abbott7e446482012-06-13 15:59:39 -0700715int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800716{
717 int ret_value = 0;
718 struct ion_cp_heap *cp_heap =
719 container_of(heap, struct ion_cp_heap, heap);
720 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -0700721 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800722 mutex_unlock(&cp_heap->lock);
723 return ret_value;
724}
725
Olav Haugan0a852512012-01-09 10:20:55 -0800726static struct ion_heap_ops cp_heap_ops = {
727 .allocate = ion_cp_heap_allocate,
728 .free = ion_cp_heap_free,
729 .phys = ion_cp_heap_phys,
730 .map_user = ion_cp_heap_map_user,
731 .unmap_user = ion_cp_heap_unmap_user,
732 .map_kernel = ion_cp_heap_map_kernel,
733 .unmap_kernel = ion_cp_heap_unmap_kernel,
734 .map_dma = ion_cp_heap_map_dma,
735 .unmap_dma = ion_cp_heap_unmap_dma,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800736 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -0800737 .secure_heap = ion_cp_secure_heap,
738 .unsecure_heap = ion_cp_unsecure_heap,
Laura Abbott93619302012-10-11 11:51:40 -0700739 .secure_buffer = ion_cp_secure_buffer,
740 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -0800741};
742
Olav Haugan0a852512012-01-09 10:20:55 -0800743struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
744{
745 struct ion_cp_heap *cp_heap;
746 int ret;
747
748 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
749 if (!cp_heap)
750 return ERR_PTR(-ENOMEM);
751
Olav Haugan0a852512012-01-09 10:20:55 -0800752 mutex_init(&cp_heap->lock);
753
Olav Haugan0a852512012-01-09 10:20:55 -0800754
Olav Haugan0a852512012-01-09 10:20:55 -0800755 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800756 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800757 cp_heap->kmap_cached_count = 0;
758 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800759 cp_heap->total_size = heap_data->size;
760 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700761 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800762 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700763 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -0800764 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -0700765 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700766 cp_heap->heap_size = heap_data->size;
767
Laura Abbottf68983e2012-06-13 16:23:23 -0700768 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -0800769 if (heap_data->extra_data) {
770 struct ion_cp_heap_pdata *extra_data =
771 heap_data->extra_data;
772 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -0800773 if (extra_data->secure_size) {
774 cp_heap->secure_base = extra_data->secure_base;
775 cp_heap->secure_size = extra_data->secure_size;
776 }
Olav Haugan0703dbf2011-12-19 17:53:38 -0800777 if (extra_data->setup_region)
778 cp_heap->bus_id = extra_data->setup_region();
779 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -0700780 cp_heap->heap_request_region =
781 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -0800782 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -0700783 cp_heap->heap_release_region =
784 extra_data->release_region;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700785 cp_heap->cma = extra_data->is_cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800786 cp_heap->allow_non_secure_allocation =
787 extra_data->allow_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -0700788
Olav Haugan0703dbf2011-12-19 17:53:38 -0800789 }
Olav Haugan8726caf2012-05-10 15:11:35 -0700790
Laura Abbott3180a5f2012-08-03 17:31:03 -0700791 if (cp_heap->cma) {
792 cp_heap->pool = NULL;
793 cp_heap->cpu_addr = 0;
794 cp_heap->heap.priv = heap_data->priv;
795 } else {
796 cp_heap->pool = gen_pool_create(12, -1);
797 if (!cp_heap->pool)
798 goto free_heap;
799
800 cp_heap->base = heap_data->base;
801 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
802 heap_data->size, -1);
803 if (ret < 0)
804 goto destroy_pool;
805
806 }
Olav Haugan0a852512012-01-09 10:20:55 -0800807 return &cp_heap->heap;
808
809destroy_pool:
810 gen_pool_destroy(cp_heap->pool);
811
812free_heap:
813 kfree(cp_heap);
814
815 return ERR_PTR(-ENOMEM);
816}
817
818void ion_cp_heap_destroy(struct ion_heap *heap)
819{
820 struct ion_cp_heap *cp_heap =
821 container_of(heap, struct ion_cp_heap, heap);
822
823 gen_pool_destroy(cp_heap->pool);
824 kfree(cp_heap);
825 cp_heap = NULL;
826}
827
Olav Haugan0671b9a2012-05-25 11:58:56 -0700828void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
829 unsigned long *size) \
830{
831 struct ion_cp_heap *cp_heap =
832 container_of(heap, struct ion_cp_heap, heap);
833 *base = cp_heap->base;
834 *size = cp_heap->total_size;
835}
Olav Haugan0a852512012-01-09 10:20:55 -0800836
Olav Haugan0a852512012-01-09 10:20:55 -0800837