blob: 35fbfffa1ee2de88357f4601df767974debbe227 [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080017#include <asm/page.h>
18#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080019#include <linux/err.h>
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080020#include <linux/highmem.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/mm.h>
22#include <linux/scatterlist.h>
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080023#include <linux/seq_file.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080024#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080029static const unsigned int orders[] = {8, 4, 0};
30static const int num_orders = ARRAY_SIZE(orders);
31static int order_to_index(unsigned int order)
32{
33 int i;
34 for (i = 0; i < num_orders; i++)
35 if (order == orders[i])
36 return i;
37 BUG();
38 return -1;
39}
40
41static unsigned int order_to_size(int order)
42{
43 return PAGE_SIZE << order;
44}
45
46struct ion_system_heap {
47 struct ion_heap heap;
48 struct ion_page_pool **pools;
49};
50
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080051struct page_info {
52 struct page *page;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080053 unsigned int order;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080054 struct list_head list;
55};
56
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080057static struct page *alloc_buffer_page(struct ion_system_heap *heap,
58 struct ion_buffer *buffer,
59 unsigned long order)
60{
61 bool cached = ion_buffer_cached(buffer);
62 bool split_pages = ion_buffer_fault_user_mappings(buffer);
63 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
64 struct page *page;
Rebecca Schultz Zavinb0599c02013-12-13 14:24:08 -080065
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080066 if (!cached)
67 page = ion_page_pool_alloc(pool);
68 else
69 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
70 __GFP_NOWARN | __GFP_NORETRY, order);
71 if (!page)
72 return 0;
73 if (split_pages)
74 split_page(page, order);
75 return page;
76}
77
78static void free_buffer_page(struct ion_system_heap *heap,
79 struct ion_buffer *buffer, struct page *page,
80 unsigned int order)
81{
82 bool cached = ion_buffer_cached(buffer);
83 bool split_pages = ion_buffer_fault_user_mappings(buffer);
84 int i;
85
86 if (!cached) {
87 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
88 /* zero the pages before returning them to the pool for
89 security. This uses vmap as we want to set the pgprot so
90 the writes to occur to noncached mappings, as the pool's
91 purpose is to keep the pages out of the cache */
92 for (i = 0; i < order / PAGE_SIZE; i++) {
93 struct page *sub_page = page + i;
94 void *addr = vmap(&sub_page, 1, VM_MAP,
95 pgprot_writecombine(PAGE_KERNEL));
96 memset(addr, 0, PAGE_SIZE);
97 vunmap(addr);
98 }
99 ion_page_pool_free(pool, page);
100 } else if (split_pages) {
101 for (i = 0; i < (1 << order); i++)
102 __free_page(page + i);
103 } else {
104 __free_pages(page, order);
105 }
106}
107
108
109static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
110 struct ion_buffer *buffer,
111 unsigned long size,
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800112 unsigned int max_order)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800113{
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800114 struct page *page;
115 struct page_info *info;
116 int i;
117
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800118 for (i = 0; i < num_orders; i++) {
119 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800120 continue;
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800121 if (max_order < orders[i])
122 continue;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800123
124 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800125 if (!page)
126 continue;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800127
128 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800129 info->page = page;
130 info->order = orders[i];
131 return info;
132 }
133 return NULL;
134}
135
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800136static int ion_system_heap_allocate(struct ion_heap *heap,
137 struct ion_buffer *buffer,
138 unsigned long size, unsigned long align,
139 unsigned long flags)
140{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800141 struct ion_system_heap *sys_heap = container_of(heap,
142 struct ion_system_heap,
143 heap);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800144 struct sg_table *table;
145 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800146 int ret;
147 struct list_head pages;
148 struct page_info *info, *tmp_info;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800149 int i = 0;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800150 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800151 unsigned int max_order = orders[0];
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800152 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800153
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800154 INIT_LIST_HEAD(&pages);
155 while (size_remaining > 0) {
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800156 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800157 if (!info)
158 goto err;
159 list_add_tail(&info->list, &pages);
160 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800161 max_order = info->order;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800162 i++;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800163 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800164
165 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
166 if (!table)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800167 goto err;
168
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800169 if (split_pages)
170 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
171 GFP_KERNEL);
172 else
173 ret = sg_alloc_table(table, i, GFP_KERNEL);
174
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800175 if (ret)
176 goto err1;
177
178 sg = table->sgl;
179 list_for_each_entry_safe(info, tmp_info, &pages, list) {
180 struct page *page = info->page;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800181 if (split_pages) {
182 for (i = 0; i < (1 << info->order); i++) {
183 sg_set_page(sg, page + i, PAGE_SIZE, 0);
184 sg = sg_next(sg);
185 }
186 } else {
187 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
188 0);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800189 sg = sg_next(sg);
190 }
191 list_del(&info->list);
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -0800192 kfree(info);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800193 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800194
195 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
196 DMA_BIDIRECTIONAL);
197
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800198 buffer->priv_virt = table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800199 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800200err1:
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800201 kfree(table);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800202err:
203 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800204 free_buffer_page(sys_heap, buffer, info->page, info->order);
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -0800205 kfree(info);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800206 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800207 return -ENOMEM;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800208}
209
210void ion_system_heap_free(struct ion_buffer *buffer)
211{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800212 struct ion_heap *heap = buffer->heap;
213 struct ion_system_heap *sys_heap = container_of(heap,
214 struct ion_system_heap,
215 heap);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800216 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800217 struct scatterlist *sg;
218 LIST_HEAD(pages);
219 int i;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800220
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800221 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800222 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
223 sg_free_table(table);
224 kfree(table);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800225}
226
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800227struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
228 struct ion_buffer *buffer)
229{
230 return buffer->priv_virt;
231}
232
233void ion_system_heap_unmap_dma(struct ion_heap *heap,
234 struct ion_buffer *buffer)
235{
236 return;
237}
238
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800239void *ion_system_heap_map_kernel(struct ion_heap *heap,
240 struct ion_buffer *buffer)
241{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800242 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800243 int i, j;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800244 void *vaddr;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800245 pgprot_t pgprot;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800246 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800247 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
Rebecca Schultz Zavin98d5d5f2013-12-13 14:24:09 -0800248 struct page **pages = vmalloc(sizeof(struct page *) * npages);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800249 struct page **tmp = pages;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800250
Rebecca Schultz Zavin98d5d5f2013-12-13 14:24:09 -0800251 if (!pages)
252 return 0;
253
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800254 if (buffer->flags & ION_FLAG_CACHED)
255 pgprot = PAGE_KERNEL;
256 else
257 pgprot = pgprot_writecombine(PAGE_KERNEL);
258
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800259 for_each_sg(table->sgl, sg, table->nents, i) {
260 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
261 struct page *page = sg_page(sg);
262 BUG_ON(i >= npages);
263 for (j = 0; j < npages_this_entry; j++) {
264 *(tmp++) = page++;
265 }
266 }
267 vaddr = vmap(pages, npages, VM_MAP, pgprot);
Rebecca Schultz Zavin98d5d5f2013-12-13 14:24:09 -0800268 vfree(pages);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800269
270 return vaddr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800271}
272
273void ion_system_heap_unmap_kernel(struct ion_heap *heap,
274 struct ion_buffer *buffer)
275{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800276 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800277}
278
279int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
280 struct vm_area_struct *vma)
281{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800282 struct sg_table *table = buffer->priv_virt;
283 unsigned long addr = vma->vm_start;
284 unsigned long offset = vma->vm_pgoff;
285 struct scatterlist *sg;
286 int i;
287
288 for_each_sg(table->sgl, sg, table->nents, i) {
289 if (offset) {
290 offset--;
291 continue;
292 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800293 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
294 sg_dma_len(sg), vma->vm_page_prot);
295 addr += sg_dma_len(sg);
Rebecca Schultz Zavinb8230242013-12-13 14:23:58 -0800296 if (addr >= vma->vm_end)
297 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800298 }
299 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800300}
301
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800302static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800303 .allocate = ion_system_heap_allocate,
304 .free = ion_system_heap_free,
305 .map_dma = ion_system_heap_map_dma,
306 .unmap_dma = ion_system_heap_unmap_dma,
307 .map_kernel = ion_system_heap_map_kernel,
308 .unmap_kernel = ion_system_heap_unmap_kernel,
309 .map_user = ion_system_heap_map_user,
310};
311
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800312static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
313 void *unused)
314{
315
316 struct ion_system_heap *sys_heap = container_of(heap,
317 struct ion_system_heap,
318 heap);
319 int i;
320 for (i = 0; i < num_orders; i++) {
321 struct ion_page_pool *pool = sys_heap->pools[i];
322 seq_printf(s, "%d order %u pages in pool = %lu total\n",
323 pool->count, pool->order,
324 (1 << pool->order) * PAGE_SIZE * pool->count);
325 }
326 return 0;
327}
328
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800329struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
330{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800331 struct ion_system_heap *heap;
332 int i;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800333
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800334 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800335 if (!heap)
336 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800337 heap->heap.ops = &system_heap_ops;
338 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
339 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
340 GFP_KERNEL);
341 if (!heap->pools)
342 goto err_alloc_pools;
343 for (i = 0; i < num_orders; i++) {
344 struct ion_page_pool *pool;
345 pool = ion_page_pool_create(GFP_HIGHUSER | __GFP_ZERO |
346 __GFP_NOWARN | __GFP_NORETRY,
347 orders[i]);
348 if (!pool)
349 goto err_create_pool;
350 heap->pools[i] = pool;
351 }
352 heap->heap.debug_show = ion_system_heap_debug_show;
353 return &heap->heap;
354err_create_pool:
355 for (i = 0; i < num_orders; i++)
356 if (heap->pools[i])
357 ion_page_pool_destroy(heap->pools[i]);
358 kfree(heap->pools);
359err_alloc_pools:
360 kfree(heap);
361 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800362}
363
364void ion_system_heap_destroy(struct ion_heap *heap)
365{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800366 struct ion_system_heap *sys_heap = container_of(heap,
367 struct ion_system_heap,
368 heap);
369 int i;
370
371 for (i = 0; i < num_orders; i++)
372 ion_page_pool_destroy(sys_heap->pools[i]);
373 kfree(sys_heap->pools);
374 kfree(sys_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800375}
376
377static int ion_system_contig_heap_allocate(struct ion_heap *heap,
378 struct ion_buffer *buffer,
379 unsigned long len,
380 unsigned long align,
381 unsigned long flags)
382{
383 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
384 if (!buffer->priv_virt)
385 return -ENOMEM;
386 return 0;
387}
388
389void ion_system_contig_heap_free(struct ion_buffer *buffer)
390{
391 kfree(buffer->priv_virt);
392}
393
394static int ion_system_contig_heap_phys(struct ion_heap *heap,
395 struct ion_buffer *buffer,
396 ion_phys_addr_t *addr, size_t *len)
397{
398 *addr = virt_to_phys(buffer->priv_virt);
399 *len = buffer->size;
400 return 0;
401}
402
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800403struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800404 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800405{
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800406 struct sg_table *table;
407 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800408
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800409 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
410 if (!table)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800411 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800412 ret = sg_alloc_table(table, 1, GFP_KERNEL);
413 if (ret) {
414 kfree(table);
415 return ERR_PTR(ret);
416 }
417 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
418 0);
419 return table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800420}
421
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800422void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
423 struct ion_buffer *buffer)
424{
425 sg_free_table(buffer->sg_table);
426 kfree(buffer->sg_table);
427}
428
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800429int ion_system_contig_heap_map_user(struct ion_heap *heap,
430 struct ion_buffer *buffer,
431 struct vm_area_struct *vma)
432{
433 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
434 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
435 vma->vm_end - vma->vm_start,
436 vma->vm_page_prot);
437
438}
439
440static struct ion_heap_ops kmalloc_ops = {
441 .allocate = ion_system_contig_heap_allocate,
442 .free = ion_system_contig_heap_free,
443 .phys = ion_system_contig_heap_phys,
444 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800445 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800446 .map_kernel = ion_system_heap_map_kernel,
447 .unmap_kernel = ion_system_heap_unmap_kernel,
448 .map_user = ion_system_contig_heap_map_user,
449};
450
451struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
452{
453 struct ion_heap *heap;
454
455 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
456 if (!heap)
457 return ERR_PTR(-ENOMEM);
458 heap->ops = &kmalloc_ops;
459 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
460 return heap;
461}
462
463void ion_system_contig_heap_destroy(struct ion_heap *heap)
464{
465 kfree(heap);
466}
467