blob: f2f4fad08a30e96bb9b3cdb1d9a60ad828e07981 [file] [log] [blame]
Olav Haugan0a852512012-01-09 10:20:55 -08001/*
2 * drivers/gpu/ion/ion_cp_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Laura Abbott7db4e0b2013-01-03 14:20:16 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Olav Haugan0a852512012-01-09 10:20:55 -08006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070018#include <linux/delay.h>
Olav Haugan0a852512012-01-09 10:20:55 -080019#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070022#include <linux/msm_ion.h>
Olav Haugan0a852512012-01-09 10:20:55 -080023#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memory_alloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080029#include <linux/iommu.h>
Laura Abbott3180a5f2012-08-03 17:31:03 -070030#include <linux/dma-mapping.h>
Liam Markcc2d4bd2013-01-16 10:14:40 -080031#include <trace/events/kmem.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070032
33#include <asm/mach/map.h>
34
Olav Haugan0a852512012-01-09 10:20:55 -080035#include <mach/msm_memtypes.h>
36#include <mach/scm.h>
Olav Haugan1c94f7b2012-02-08 09:45:53 -080037#include <mach/iommu_domains.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070038
39#include "ion_priv.h"
Olav Haugan0a852512012-01-09 10:20:55 -080040
41#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070042#include <asm/cacheflush.h>
Olav Haugan0a852512012-01-09 10:20:55 -080043
Laura Abbott7e446482012-06-13 15:59:39 -070044#include "msm/ion_cp_common.h"
Olav Haugan0a852512012-01-09 10:20:55 -080045/**
46 * struct ion_cp_heap - container for the heap and shared heap data
47
48 * @heap: the heap information structure
49 * @pool: memory pool to allocate from.
50 * @base: the base address of the memory pool.
51 * @permission_type: Identifier for the memory used by SCM for protecting
52 * and unprotecting memory.
Olav Haugan42ebe712012-01-10 16:30:58 -080053 * @secure_base: Base address used when securing a heap that is shared.
54 * @secure_size: Size used when securing a heap that is shared.
Olav Haugan0a852512012-01-09 10:20:55 -080055 * @lock: mutex to protect shared access.
Olav Hauganea66e7a2012-01-23 17:30:27 -080056 * @heap_protected: Indicates whether heap has been protected or not.
Olav Haugan0a852512012-01-09 10:20:55 -080057 * @allocated_bytes: the total number of allocated bytes from the pool.
58 * @total_size: the total size of the memory pool.
59 * @request_region: function pointer to call when first mapping of memory
60 * occurs.
61 * @release_region: function pointer to call when last mapping of memory
62 * unmapped.
63 * @bus_id: token used with request/release region.
Olav Haugan2a5404b2012-02-01 17:51:30 -080064 * @kmap_cached_count: the total number of times this heap has been mapped in
65 * kernel space (cached).
66 * @kmap_uncached_count:the total number of times this heap has been mapped in
67 * kernel space (un-cached).
Olav Haugan0a852512012-01-09 10:20:55 -080068 * @umap_count: the total number of times this heap has been mapped in
69 * user space.
Olav Haugan85c95402012-05-30 17:32:37 -070070 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
71*/
Olav Haugan0a852512012-01-09 10:20:55 -080072struct ion_cp_heap {
73 struct ion_heap heap;
74 struct gen_pool *pool;
75 ion_phys_addr_t base;
76 unsigned int permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -080077 ion_phys_addr_t secure_base;
78 size_t secure_size;
Olav Haugan0a852512012-01-09 10:20:55 -080079 struct mutex lock;
Olav Hauganea66e7a2012-01-23 17:30:27 -080080 unsigned int heap_protected;
Olav Haugan0a852512012-01-09 10:20:55 -080081 unsigned long allocated_bytes;
82 unsigned long total_size;
Laura Abbottaedbe422012-08-03 17:06:22 -070083 int (*heap_request_region)(void *);
84 int (*heap_release_region)(void *);
Olav Haugan0a852512012-01-09 10:20:55 -080085 void *bus_id;
Olav Haugan2a5404b2012-02-01 17:51:30 -080086 unsigned long kmap_cached_count;
87 unsigned long kmap_uncached_count;
Olav Haugan0a852512012-01-09 10:20:55 -080088 unsigned long umap_count;
Olav Haugan85c95402012-05-30 17:32:37 -070089 unsigned int has_outer_cache;
Laura Abbottf68983e2012-06-13 16:23:23 -070090 atomic_t protect_cnt;
Laura Abbott3180a5f2012-08-03 17:31:03 -070091 void *cpu_addr;
92 size_t heap_size;
93 dma_addr_t handle;
94 int cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -080095 int allow_non_secure_allocation;
Olav Haugan0a852512012-01-09 10:20:55 -080096};
97
98enum {
Olav Hauganea66e7a2012-01-23 17:30:27 -080099 HEAP_NOT_PROTECTED = 0,
100 HEAP_PROTECTED = 1,
Olav Haugan0a852512012-01-09 10:20:55 -0800101};
102
Laura Abbott3180a5f2012-08-03 17:31:03 -0700103#define DMA_ALLOC_TRIES 5
104
Laura Abbott3180a5f2012-08-03 17:31:03 -0700105static int allocate_heap_memory(struct ion_heap *heap)
106{
107 struct device *dev = heap->priv;
108 struct ion_cp_heap *cp_heap =
109 container_of(heap, struct ion_cp_heap, heap);
110 int ret;
111 int tries = 0;
112 DEFINE_DMA_ATTRS(attrs);
113 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
114
115
116 if (cp_heap->cpu_addr)
117 return 0;
118
119 while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
120 cp_heap->cpu_addr = dma_alloc_attrs(dev,
121 cp_heap->heap_size,
122 &(cp_heap->handle),
123 0,
124 &attrs);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800125 if (!cp_heap->cpu_addr) {
126 trace_ion_cp_alloc_retry(tries);
Laura Abbott3180a5f2012-08-03 17:31:03 -0700127 msleep(20);
Liam Markcc2d4bd2013-01-16 10:14:40 -0800128 }
Laura Abbott3180a5f2012-08-03 17:31:03 -0700129 }
130
131 if (!cp_heap->cpu_addr)
132 goto out;
133
134 cp_heap->base = cp_heap->handle;
135
136 cp_heap->pool = gen_pool_create(12, -1);
137 if (!cp_heap->pool)
138 goto out_free;
139
140 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
141 cp_heap->heap_size, -1);
142 if (ret < 0)
143 goto out_pool;
144
145 return 0;
146
147out_pool:
148 gen_pool_destroy(cp_heap->pool);
149out_free:
150 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
151 cp_heap->handle);
152out:
153 return ION_CP_ALLOCATE_FAIL;
154}
155
156static void free_heap_memory(struct ion_heap *heap)
157{
158 struct device *dev = heap->priv;
159 struct ion_cp_heap *cp_heap =
160 container_of(heap, struct ion_cp_heap, heap);
161
162 /* release memory */
163 dma_free_coherent(dev, cp_heap->heap_size, cp_heap->cpu_addr,
164 cp_heap->handle);
165 gen_pool_destroy(cp_heap->pool);
166 cp_heap->pool = NULL;
167 cp_heap->cpu_addr = 0;
168}
169
170
171
Olav Haugan2a5404b2012-02-01 17:51:30 -0800172/**
173 * Get the total number of kernel mappings.
174 * Must be called with heap->lock locked.
175 */
176static unsigned long ion_cp_get_total_kmap_count(
177 const struct ion_cp_heap *cp_heap)
178{
179 return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
180}
Olav Haugan0a852512012-01-09 10:20:55 -0800181
Laura Abbott94ad25e2012-08-03 13:56:21 -0700182static int ion_on_first_alloc(struct ion_heap *heap)
183{
184 struct ion_cp_heap *cp_heap =
185 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700186 int ret_value;
187
Laura Abbott3180a5f2012-08-03 17:31:03 -0700188 if (cp_heap->cma) {
189 ret_value = allocate_heap_memory(heap);
190 if (ret_value)
191 return 1;
192 }
Laura Abbott94ad25e2012-08-03 13:56:21 -0700193 return 0;
194}
195
196static void ion_on_last_free(struct ion_heap *heap)
197{
198 struct ion_cp_heap *cp_heap =
199 container_of(heap, struct ion_cp_heap, heap);
200
Laura Abbott3180a5f2012-08-03 17:31:03 -0700201 if (cp_heap->cma)
202 free_heap_memory(heap);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700203}
204
Olav Haugan0a852512012-01-09 10:20:55 -0800205/**
Larry Basseld653d092013-06-20 10:46:21 -0700206 * Protects memory if heap is unsecured heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800207 * Must be called with heap->lock locked.
208 */
Laura Abbott7e446482012-06-13 15:59:39 -0700209static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800210{
211 struct ion_cp_heap *cp_heap =
212 container_of(heap, struct ion_cp_heap, heap);
213 int ret_value = 0;
214
Laura Abbottf68983e2012-06-13 16:23:23 -0700215 if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800216 /* Make sure we are in C state when the heap is protected. */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700217 if (!cp_heap->allocated_bytes)
218 if (ion_on_first_alloc(heap))
Laura Abbottcaafeea2011-12-13 11:43:10 -0800219 goto out;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800220
Olav Hauganea66e7a2012-01-23 17:30:27 -0800221 ret_value = ion_cp_protect_mem(cp_heap->secure_base,
Laura Abbott7e446482012-06-13 15:59:39 -0700222 cp_heap->secure_size, cp_heap->permission_type,
223 version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800224 if (ret_value) {
225 pr_err("Failed to protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800226 "error code: %d\n", heap->name, ret_value);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800227
Laura Abbott94ad25e2012-08-03 13:56:21 -0700228 if (!cp_heap->allocated_bytes)
229 ion_on_last_free(heap);
230
Laura Abbottf68983e2012-06-13 16:23:23 -0700231 atomic_dec(&cp_heap->protect_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800232 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800233 cp_heap->heap_protected = HEAP_PROTECTED;
Laura Abbott1135c9e2013-03-13 15:33:40 -0700234 pr_debug("Protected heap %s @ 0x%pa\n",
235 heap->name, &cp_heap->base);
Olav Haugan0a852512012-01-09 10:20:55 -0800236 }
237 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800238out:
Laura Abbottf68983e2012-06-13 16:23:23 -0700239 pr_debug("%s: protect count is %d\n", __func__,
240 atomic_read(&cp_heap->protect_cnt));
241 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800242 return ret_value;
243}
244
245/**
Larry Basseld653d092013-06-20 10:46:21 -0700246 * Unprotects memory if heap is secure heap.
Olav Haugan0a852512012-01-09 10:20:55 -0800247 * Must be called with heap->lock locked.
248 */
Laura Abbott7e446482012-06-13 15:59:39 -0700249static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800250{
251 struct ion_cp_heap *cp_heap =
252 container_of(heap, struct ion_cp_heap, heap);
253
Laura Abbottf68983e2012-06-13 16:23:23 -0700254 if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
Olav Haugan0a852512012-01-09 10:20:55 -0800255 int error_code = ion_cp_unprotect_mem(
Olav Haugan42ebe712012-01-10 16:30:58 -0800256 cp_heap->secure_base, cp_heap->secure_size,
Laura Abbott7e446482012-06-13 15:59:39 -0700257 cp_heap->permission_type, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800258 if (error_code) {
259 pr_err("Failed to un-protect memory for heap %s - "
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800260 "error code: %d\n", heap->name, error_code);
Olav Haugan0a852512012-01-09 10:20:55 -0800261 } else {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800262 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800263 pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
Olav Haugan0a852512012-01-09 10:20:55 -0800264 (unsigned int) cp_heap->base);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800265
Laura Abbott94ad25e2012-08-03 13:56:21 -0700266 if (!cp_heap->allocated_bytes)
267 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800268 }
269 }
Laura Abbottf68983e2012-06-13 16:23:23 -0700270 pr_debug("%s: protect count is %d\n", __func__,
271 atomic_read(&cp_heap->protect_cnt));
272 BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800273}
274
275ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
276 unsigned long size,
277 unsigned long align,
278 unsigned long flags)
279{
280 unsigned long offset;
Adrian Alexei92538592013-03-27 10:53:43 -0700281 unsigned long secure_allocation = flags & ION_FLAG_SECURE;
282 unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
Olav Haugan0a852512012-01-09 10:20:55 -0800283
284 struct ion_cp_heap *cp_heap =
285 container_of(heap, struct ion_cp_heap, heap);
286
287 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800288 if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
Olav Haugan0a852512012-01-09 10:20:55 -0800289 mutex_unlock(&cp_heap->lock);
290 pr_err("ION cannot allocate un-secure memory from protected"
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800291 " heap %s\n", heap->name);
Olav Haugan0a852512012-01-09 10:20:55 -0800292 return ION_CP_ALLOCATE_FAIL;
293 }
294
Laura Abbott7db4e0b2013-01-03 14:20:16 -0800295 if (!force_contig && !secure_allocation &&
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800296 !cp_heap->allow_non_secure_allocation) {
Laura Abbottac963312012-12-11 15:09:03 -0800297 mutex_unlock(&cp_heap->lock);
298 pr_debug("%s: non-secure allocation disallowed from this heap\n",
299 __func__);
300 return ION_CP_ALLOCATE_FAIL;
301 }
302
Laura Abbott087db592012-11-01 09:41:37 -0700303 /*
304 * The check above already checked for non-secure allocations when the
305 * heap is protected. HEAP_PROTECTED implies that this must be a secure
306 * allocation. If the heap is protected and there are userspace or
307 * cached kernel mappings, something has gone wrong in the security
308 * model.
309 */
310 if (cp_heap->heap_protected == HEAP_PROTECTED) {
311 BUG_ON(cp_heap->umap_count != 0);
312 BUG_ON(cp_heap->kmap_cached_count != 0);
Olav Haugan0a852512012-01-09 10:20:55 -0800313 }
314
Laura Abbottcaafeea2011-12-13 11:43:10 -0800315 /*
316 * if this is the first reusable allocation, transition
317 * the heap
318 */
Laura Abbott94ad25e2012-08-03 13:56:21 -0700319 if (!cp_heap->allocated_bytes)
320 if (ion_on_first_alloc(heap)) {
Laura Abbottcaafeea2011-12-13 11:43:10 -0800321 mutex_unlock(&cp_heap->lock);
322 return ION_RESERVED_ALLOCATE_FAIL;
323 }
Laura Abbottcaafeea2011-12-13 11:43:10 -0800324
Olav Haugan0a852512012-01-09 10:20:55 -0800325 cp_heap->allocated_bytes += size;
Olav Haugan0a852512012-01-09 10:20:55 -0800326 mutex_unlock(&cp_heap->lock);
327
328 offset = gen_pool_alloc_aligned(cp_heap->pool,
329 size, ilog2(align));
330
331 if (!offset) {
332 mutex_lock(&cp_heap->lock);
Olav Haugand710ed12012-04-19 14:23:04 -0700333 cp_heap->allocated_bytes -= size;
Olav Haugan0a852512012-01-09 10:20:55 -0800334 if ((cp_heap->total_size -
Olav Haugand710ed12012-04-19 14:23:04 -0700335 cp_heap->allocated_bytes) >= size)
Olav Haugan0a852512012-01-09 10:20:55 -0800336 pr_debug("%s: heap %s has enough memory (%lx) but"
337 " the allocation of size %lx still failed."
Olav Haugan9b2d1c22012-01-09 15:23:08 -0800338 " Memory is probably fragmented.\n",
Olav Haugan0a852512012-01-09 10:20:55 -0800339 __func__, heap->name,
340 cp_heap->total_size -
341 cp_heap->allocated_bytes, size);
Laura Abbott94ad25e2012-08-03 13:56:21 -0700342 if (!cp_heap->allocated_bytes &&
343 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
344 ion_on_last_free(heap);
Olav Haugan0a852512012-01-09 10:20:55 -0800345 mutex_unlock(&cp_heap->lock);
346
347 return ION_CP_ALLOCATE_FAIL;
348 }
349
350 return offset;
351}
352
353void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
354 unsigned long size)
355{
356 struct ion_cp_heap *cp_heap =
357 container_of(heap, struct ion_cp_heap, heap);
358
359 if (addr == ION_CP_ALLOCATE_FAIL)
360 return;
361 gen_pool_free(cp_heap->pool, addr, size);
362
363 mutex_lock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800364 cp_heap->allocated_bytes -= size;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800365
Laura Abbott94ad25e2012-08-03 13:56:21 -0700366 if (!cp_heap->allocated_bytes &&
367 cp_heap->heap_protected == HEAP_NOT_PROTECTED)
368 ion_on_last_free(heap);
Olav Haugan8726caf2012-05-10 15:11:35 -0700369
Olav Haugan0a852512012-01-09 10:20:55 -0800370 mutex_unlock(&cp_heap->lock);
371}
372
373static int ion_cp_heap_phys(struct ion_heap *heap,
374 struct ion_buffer *buffer,
375 ion_phys_addr_t *addr, size_t *len)
376{
Laura Abbott60ae9372012-10-10 16:28:59 -0700377 struct ion_cp_buffer *buf = buffer->priv_virt;
378
379 *addr = buf->buffer;
Olav Haugan0a852512012-01-09 10:20:55 -0800380 *len = buffer->size;
381 return 0;
382}
383
384static int ion_cp_heap_allocate(struct ion_heap *heap,
385 struct ion_buffer *buffer,
386 unsigned long size, unsigned long align,
387 unsigned long flags)
388{
Laura Abbott60ae9372012-10-10 16:28:59 -0700389 struct ion_cp_buffer *buf;
390 phys_addr_t addr;
391
Mitchel Humpherys8fd2ac22013-02-27 14:41:36 -0800392 /*
393 * we never want Ion to fault pages in for us with this
394 * heap. We want to set up the mappings ourselves in .map_user
395 */
396 flags |= ION_FLAG_CACHED_NEEDS_SYNC;
397
Laura Abbott60ae9372012-10-10 16:28:59 -0700398 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
399 if (!buf)
400 return ION_CP_ALLOCATE_FAIL;
401
402 addr = ion_cp_allocate(heap, size, align, flags);
403 if (addr == ION_CP_ALLOCATE_FAIL)
404 return -ENOMEM;
405
406 buf->buffer = addr;
Laura Abbott93619302012-10-11 11:51:40 -0700407 buf->want_delayed_unsecure = 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700408 atomic_set(&buf->secure_cnt, 0);
409 mutex_init(&buf->lock);
Adrian Alexei92538592013-03-27 10:53:43 -0700410 buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
Laura Abbott60ae9372012-10-10 16:28:59 -0700411 buffer->priv_virt = buf;
412
413 return 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800414}
415
416static void ion_cp_heap_free(struct ion_buffer *buffer)
417{
418 struct ion_heap *heap = buffer->heap;
Laura Abbott60ae9372012-10-10 16:28:59 -0700419 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800420
Laura Abbott60ae9372012-10-10 16:28:59 -0700421 ion_cp_free(heap, buf->buffer, buffer->size);
422 WARN_ON(atomic_read(&buf->secure_cnt));
423 WARN_ON(atomic_read(&buf->map_cnt));
424 kfree(buf);
425
426 buffer->priv_virt = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800427}
428
Laura Abbottb14ed962012-01-30 14:18:08 -0800429struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800430{
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800431 size_t chunk_size = buffer->size;
Laura Abbott60ae9372012-10-10 16:28:59 -0700432 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800433
Mitchel Humpherysc0aab032013-01-07 14:21:33 -0800434 if (ION_IS_CACHED(buffer->flags))
435 chunk_size = PAGE_SIZE;
436 else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
437 chunk_size = SZ_1M;
438
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800439 return ion_create_chunked_sg_table(buf->buffer, chunk_size,
440 buffer->size);
Olav Haugan0a852512012-01-09 10:20:55 -0800441}
442
Laura Abbottb14ed962012-01-30 14:18:08 -0800443struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
Olav Haugan16cdb412012-03-27 13:02:17 -0700444 struct ion_buffer *buffer)
445{
Laura Abbottb14ed962012-01-30 14:18:08 -0800446 return ion_cp_heap_create_sg_table(buffer);
Olav Haugan16cdb412012-03-27 13:02:17 -0700447}
448
Olav Haugan0a852512012-01-09 10:20:55 -0800449void ion_cp_heap_unmap_dma(struct ion_heap *heap,
450 struct ion_buffer *buffer)
451{
Laura Abbottb14ed962012-01-30 14:18:08 -0800452 if (buffer->sg_table)
453 sg_free_table(buffer->sg_table);
454 kfree(buffer->sg_table);
455 buffer->sg_table = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800456}
457
458/**
459 * Call request region for SMI memory of this is the first mapping.
460 */
461static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
462{
463 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800464 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700465 if (cp_heap->heap_request_region)
466 ret_value = cp_heap->heap_request_region(
467 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800468 return ret_value;
469}
470
471/**
472 * Call release region for SMI memory of this is the last un-mapping.
473 */
474static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
475{
476 int ret_value = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800477 if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
Laura Abbottaedbe422012-08-03 17:06:22 -0700478 if (cp_heap->heap_release_region)
479 ret_value = cp_heap->heap_release_region(
480 cp_heap->bus_id);
Olav Haugan0a852512012-01-09 10:20:55 -0800481 return ret_value;
482}
483
Laura Abbottb14ed962012-01-30 14:18:08 -0800484void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
Olav Haugan0a852512012-01-09 10:20:55 -0800485{
486 struct ion_cp_heap *cp_heap =
487 container_of(heap, struct ion_cp_heap, heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800488 void *ret_value = NULL;
Laura Abbott60ae9372012-10-10 16:28:59 -0700489 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800490
491 mutex_lock(&cp_heap->lock);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800492 if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
493 ((cp_heap->heap_protected == HEAP_PROTECTED) &&
Laura Abbottb14ed962012-01-30 14:18:08 -0800494 !ION_IS_CACHED(buffer->flags))) {
Olav Haugan0a852512012-01-09 10:20:55 -0800495
Olav Hauganea66e7a2012-01-23 17:30:27 -0800496 if (ion_cp_request_region(cp_heap)) {
497 mutex_unlock(&cp_heap->lock);
498 return NULL;
499 }
Olav Haugan0a852512012-01-09 10:20:55 -0800500
Laura Abbott7fe43d62013-03-25 16:44:58 -0700501 if (cp_heap->cma) {
Laura Abbott3180a5f2012-08-03 17:31:03 -0700502 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
503 struct page **pages = vmalloc(
504 sizeof(struct page *) * npages);
505 int i;
506 pgprot_t pgprot;
Laura Abbottcaafeea2011-12-13 11:43:10 -0800507
Olav Haugan245db992013-03-26 11:25:41 -0700508 if (!pages) {
509 mutex_unlock(&cp_heap->lock);
510 return ERR_PTR(-ENOMEM);
511 }
512
Laura Abbott3180a5f2012-08-03 17:31:03 -0700513 if (ION_IS_CACHED(buffer->flags))
514 pgprot = PAGE_KERNEL;
515 else
516 pgprot = pgprot_writecombine(PAGE_KERNEL);
517
518 for (i = 0; i < npages; i++) {
519 pages[i] = phys_to_page(buf->buffer +
520 i * PAGE_SIZE);
521 }
522 ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
523 vfree(pages);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800524 } else {
Laura Abbottb14ed962012-01-30 14:18:08 -0800525 if (ION_IS_CACHED(buffer->flags))
Laura Abbott60ae9372012-10-10 16:28:59 -0700526 ret_value = ioremap_cached(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800527 buffer->size);
528 else
Laura Abbott60ae9372012-10-10 16:28:59 -0700529 ret_value = ioremap(buf->buffer,
Laura Abbottcaafeea2011-12-13 11:43:10 -0800530 buffer->size);
531 }
Olav Haugan0a852512012-01-09 10:20:55 -0800532
Olav Haugan2a5404b2012-02-01 17:51:30 -0800533 if (!ret_value) {
Olav Hauganea66e7a2012-01-23 17:30:27 -0800534 ion_cp_release_region(cp_heap);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800535 } else {
536 if (ION_IS_CACHED(buffer->flags))
537 ++cp_heap->kmap_cached_count;
538 else
539 ++cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700540 atomic_inc(&buf->map_cnt);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800541 }
Olav Haugan0a852512012-01-09 10:20:55 -0800542 }
543 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800544 return ret_value;
545}
546
547void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
548 struct ion_buffer *buffer)
549{
550 struct ion_cp_heap *cp_heap =
551 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700552 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800553
Laura Abbott7fe43d62013-03-25 16:44:58 -0700554 if (cp_heap->cma)
Laura Abbott3180a5f2012-08-03 17:31:03 -0700555 vunmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800556 else
Steve Mucklef132c6c2012-06-06 18:30:57 -0700557 __arm_iounmap(buffer->vaddr);
Laura Abbottcaafeea2011-12-13 11:43:10 -0800558
Olav Haugan0a852512012-01-09 10:20:55 -0800559 buffer->vaddr = NULL;
560
561 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800562 if (ION_IS_CACHED(buffer->flags))
563 --cp_heap->kmap_cached_count;
564 else
565 --cp_heap->kmap_uncached_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700566
567 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800568 ion_cp_release_region(cp_heap);
569 mutex_unlock(&cp_heap->lock);
570
571 return;
572}
573
574int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800575 struct vm_area_struct *vma)
Olav Haugan0a852512012-01-09 10:20:55 -0800576{
577 int ret_value = -EAGAIN;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800578 struct ion_cp_heap *cp_heap =
579 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700580 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800581
Olav Hauganea66e7a2012-01-23 17:30:27 -0800582 mutex_lock(&cp_heap->lock);
Mitchel Humpherys8d0a4922013-01-21 16:49:09 -0800583 if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
Olav Haugan0a852512012-01-09 10:20:55 -0800584 if (ion_cp_request_region(cp_heap)) {
585 mutex_unlock(&cp_heap->lock);
586 return -EINVAL;
587 }
Olav Haugan0a852512012-01-09 10:20:55 -0800588
Laura Abbottb14ed962012-01-30 14:18:08 -0800589 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800590 vma->vm_page_prot = pgprot_writecombine(
591 vma->vm_page_prot);
592
593 ret_value = remap_pfn_range(vma, vma->vm_start,
Laura Abbott60ae9372012-10-10 16:28:59 -0700594 __phys_to_pfn(buf->buffer) + vma->vm_pgoff,
Olav Haugande074a72012-02-22 15:39:54 -0800595 vma->vm_end - vma->vm_start,
596 vma->vm_page_prot);
Olav Haugan0a852512012-01-09 10:20:55 -0800597
Laura Abbott60ae9372012-10-10 16:28:59 -0700598 if (ret_value) {
Olav Haugan0a852512012-01-09 10:20:55 -0800599 ion_cp_release_region(cp_heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700600 } else {
601 atomic_inc(&buf->map_cnt);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800602 ++cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700603 }
604
Olav Haugan0a852512012-01-09 10:20:55 -0800605 }
Olav Hauganea66e7a2012-01-23 17:30:27 -0800606 mutex_unlock(&cp_heap->lock);
Olav Haugan0a852512012-01-09 10:20:55 -0800607 return ret_value;
608}
609
610void ion_cp_heap_unmap_user(struct ion_heap *heap,
611 struct ion_buffer *buffer)
612{
613 struct ion_cp_heap *cp_heap =
614 container_of(heap, struct ion_cp_heap, heap);
Laura Abbott60ae9372012-10-10 16:28:59 -0700615 struct ion_cp_buffer *buf = buffer->priv_virt;
Olav Haugan0a852512012-01-09 10:20:55 -0800616
617 mutex_lock(&cp_heap->lock);
618 --cp_heap->umap_count;
Laura Abbott60ae9372012-10-10 16:28:59 -0700619 atomic_dec(&buf->map_cnt);
Olav Haugan0a852512012-01-09 10:20:55 -0800620 ion_cp_release_region(cp_heap);
621 mutex_unlock(&cp_heap->lock);
622}
623
Olav Haugan0671b9a2012-05-25 11:58:56 -0700624static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s,
625 const struct rb_root *mem_map)
Olav Haugan0a852512012-01-09 10:20:55 -0800626{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800627 unsigned long total_alloc;
628 unsigned long total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800629 unsigned long umap_count;
630 unsigned long kmap_count;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800631 unsigned long heap_protected;
Olav Haugane1f5d832011-12-13 15:16:28 -0800632 struct ion_cp_heap *cp_heap =
633 container_of(heap, struct ion_cp_heap, heap);
Olav Haugane1f5d832011-12-13 15:16:28 -0800634
635 mutex_lock(&cp_heap->lock);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800636 total_alloc = cp_heap->allocated_bytes;
637 total_size = cp_heap->total_size;
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800638 umap_count = cp_heap->umap_count;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800639 kmap_count = ion_cp_get_total_kmap_count(cp_heap);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800640 heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
Olav Haugane1f5d832011-12-13 15:16:28 -0800641 mutex_unlock(&cp_heap->lock);
642
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800643 seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
644 seq_printf(s, "total heap size: %lx\n", total_size);
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800645 seq_printf(s, "umapping count: %lx\n", umap_count);
646 seq_printf(s, "kmapping count: %lx\n", kmap_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800647 seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800648
Olav Haugan0671b9a2012-05-25 11:58:56 -0700649 if (mem_map) {
650 unsigned long base = cp_heap->base;
651 unsigned long size = cp_heap->total_size;
652 unsigned long end = base+size;
653 unsigned long last_end = base;
654 struct rb_node *n;
655
656 seq_printf(s, "\nMemory Map\n");
657 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
658 "client", "start address", "end address",
659 "size (hex)");
660
661 for (n = rb_first(mem_map); n; n = rb_next(n)) {
662 struct mem_map_data *data =
663 rb_entry(n, struct mem_map_data, node);
664 const char *client_name = "(null)";
665
666 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700667 phys_addr_t da;
668
669 da = data->addr-1;
670 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
671 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700672 data->addr-last_end,
673 data->addr-last_end);
674 }
675
676 if (data->client_name)
677 client_name = data->client_name;
678
Laura Abbott1135c9e2013-03-13 15:33:40 -0700679 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
680 client_name, &data->addr,
681 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700682 data->size, data->size);
683 last_end = data->addr_end+1;
684 }
685 if (last_end < end) {
686 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
687 last_end, end-1, end-last_end, end-last_end);
688 }
689 }
690
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800691 return 0;
Olav Haugane1f5d832011-12-13 15:16:28 -0800692}
693
Laura Abbott7e446482012-06-13 15:59:39 -0700694int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800695{
696 int ret_value;
697 struct ion_cp_heap *cp_heap =
698 container_of(heap, struct ion_cp_heap, heap);
699 mutex_lock(&cp_heap->lock);
Olav Haugan2a5404b2012-02-01 17:51:30 -0800700 if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
Laura Abbott7e446482012-06-13 15:59:39 -0700701 ret_value = ion_cp_protect(heap, version, data);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800702 } else {
703 pr_err("ION cannot secure heap with outstanding mappings: "
Olav Haugan2a5404b2012-02-01 17:51:30 -0800704 "User space: %lu, kernel space (cached): %lu\n",
705 cp_heap->umap_count, cp_heap->kmap_cached_count);
Olav Hauganea66e7a2012-01-23 17:30:27 -0800706 ret_value = -EINVAL;
707 }
708
Olav Haugan0a852512012-01-09 10:20:55 -0800709 mutex_unlock(&cp_heap->lock);
710 return ret_value;
711}
712
Laura Abbott7e446482012-06-13 15:59:39 -0700713int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800714{
715 int ret_value = 0;
716 struct ion_cp_heap *cp_heap =
717 container_of(heap, struct ion_cp_heap, heap);
718 mutex_lock(&cp_heap->lock);
Laura Abbott7e446482012-06-13 15:59:39 -0700719 ion_cp_unprotect(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -0800720 mutex_unlock(&cp_heap->lock);
721 return ret_value;
722}
723
Olav Haugan0a852512012-01-09 10:20:55 -0800724static struct ion_heap_ops cp_heap_ops = {
725 .allocate = ion_cp_heap_allocate,
726 .free = ion_cp_heap_free,
727 .phys = ion_cp_heap_phys,
728 .map_user = ion_cp_heap_map_user,
729 .unmap_user = ion_cp_heap_unmap_user,
730 .map_kernel = ion_cp_heap_map_kernel,
731 .unmap_kernel = ion_cp_heap_unmap_kernel,
732 .map_dma = ion_cp_heap_map_dma,
733 .unmap_dma = ion_cp_heap_unmap_dma,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800734 .print_debug = ion_cp_print_debug,
Olav Haugan0a852512012-01-09 10:20:55 -0800735 .secure_heap = ion_cp_secure_heap,
736 .unsecure_heap = ion_cp_unsecure_heap,
Laura Abbott93619302012-10-11 11:51:40 -0700737 .secure_buffer = ion_cp_secure_buffer,
738 .unsecure_buffer = ion_cp_unsecure_buffer,
Olav Haugan0a852512012-01-09 10:20:55 -0800739};
740
Olav Haugan0a852512012-01-09 10:20:55 -0800741struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
742{
743 struct ion_cp_heap *cp_heap;
744 int ret;
745
746 cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
747 if (!cp_heap)
748 return ERR_PTR(-ENOMEM);
749
Olav Haugan0a852512012-01-09 10:20:55 -0800750 mutex_init(&cp_heap->lock);
751
Olav Haugan0a852512012-01-09 10:20:55 -0800752
Olav Haugan0a852512012-01-09 10:20:55 -0800753 cp_heap->allocated_bytes = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800754 cp_heap->umap_count = 0;
Olav Haugan2a5404b2012-02-01 17:51:30 -0800755 cp_heap->kmap_cached_count = 0;
756 cp_heap->kmap_uncached_count = 0;
Olav Haugan0a852512012-01-09 10:20:55 -0800757 cp_heap->total_size = heap_data->size;
758 cp_heap->heap.ops = &cp_heap_ops;
Mitchel Humpherys362b52b2012-09-13 10:53:22 -0700759 cp_heap->heap.type = (enum ion_heap_type) ION_HEAP_TYPE_CP;
Olav Hauganea66e7a2012-01-23 17:30:27 -0800760 cp_heap->heap_protected = HEAP_NOT_PROTECTED;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700761 cp_heap->secure_base = heap_data->base;
Olav Haugan42ebe712012-01-10 16:30:58 -0800762 cp_heap->secure_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -0700763 cp_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700764 cp_heap->heap_size = heap_data->size;
765
Laura Abbottf68983e2012-06-13 16:23:23 -0700766 atomic_set(&cp_heap->protect_cnt, 0);
Olav Haugan0703dbf2011-12-19 17:53:38 -0800767 if (heap_data->extra_data) {
768 struct ion_cp_heap_pdata *extra_data =
769 heap_data->extra_data;
770 cp_heap->permission_type = extra_data->permission_type;
Olav Haugan42ebe712012-01-10 16:30:58 -0800771 if (extra_data->secure_size) {
772 cp_heap->secure_base = extra_data->secure_base;
773 cp_heap->secure_size = extra_data->secure_size;
774 }
Olav Haugan0703dbf2011-12-19 17:53:38 -0800775 if (extra_data->setup_region)
776 cp_heap->bus_id = extra_data->setup_region();
777 if (extra_data->request_region)
Laura Abbottaedbe422012-08-03 17:06:22 -0700778 cp_heap->heap_request_region =
779 extra_data->request_region;
Olav Haugan0703dbf2011-12-19 17:53:38 -0800780 if (extra_data->release_region)
Laura Abbottaedbe422012-08-03 17:06:22 -0700781 cp_heap->heap_release_region =
782 extra_data->release_region;
Laura Abbott3180a5f2012-08-03 17:31:03 -0700783 cp_heap->cma = extra_data->is_cma;
Mitchel Humpherys345f0232013-01-11 10:55:25 -0800784 cp_heap->allow_non_secure_allocation =
785 extra_data->allow_nonsecure_alloc;
Olav Haugan8726caf2012-05-10 15:11:35 -0700786
Olav Haugan0703dbf2011-12-19 17:53:38 -0800787 }
Olav Haugan8726caf2012-05-10 15:11:35 -0700788
Laura Abbott3180a5f2012-08-03 17:31:03 -0700789 if (cp_heap->cma) {
790 cp_heap->pool = NULL;
791 cp_heap->cpu_addr = 0;
792 cp_heap->heap.priv = heap_data->priv;
793 } else {
794 cp_heap->pool = gen_pool_create(12, -1);
795 if (!cp_heap->pool)
796 goto free_heap;
797
798 cp_heap->base = heap_data->base;
799 ret = gen_pool_add(cp_heap->pool, cp_heap->base,
800 heap_data->size, -1);
801 if (ret < 0)
802 goto destroy_pool;
803
804 }
Olav Haugan0a852512012-01-09 10:20:55 -0800805 return &cp_heap->heap;
806
807destroy_pool:
808 gen_pool_destroy(cp_heap->pool);
809
810free_heap:
811 kfree(cp_heap);
812
813 return ERR_PTR(-ENOMEM);
814}
815
816void ion_cp_heap_destroy(struct ion_heap *heap)
817{
818 struct ion_cp_heap *cp_heap =
819 container_of(heap, struct ion_cp_heap, heap);
820
821 gen_pool_destroy(cp_heap->pool);
822 kfree(cp_heap);
823 cp_heap = NULL;
824}
825
Olav Haugan0671b9a2012-05-25 11:58:56 -0700826void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base,
827 unsigned long *size) \
828{
829 struct ion_cp_heap *cp_heap =
830 container_of(heap, struct ion_cp_heap, heap);
831 *base = cp_heap->base;
832 *size = cp_heap->total_size;
833}
Olav Haugan0a852512012-01-09 10:20:55 -0800834
Olav Haugan0a852512012-01-09 10:20:55 -0800835