blob: 47cf5fedc95a967b9a8e6a1fda39e3550f160135 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070025#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070026#include <linux/slab.h>
27#include <linux/vmalloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070029#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070030#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070031#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070032#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080033#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070034
Laura Abbott68c80642011-10-21 17:32:27 -070035static atomic_t system_heap_allocated;
36static atomic_t system_contig_heap_allocated;
37
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070038static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
39 __GFP_NOWARN | __GFP_NORETRY |
Rebecca Schultz Zavin1797e59a2012-10-18 21:51:53 -070040 __GFP_NO_KSWAPD) & ~__GFP_WAIT;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070041static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
42 __GFP_NOWARN);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070043static const unsigned int orders[] = {8, 4, 0};
44static const int num_orders = ARRAY_SIZE(orders);
45static int order_to_index(unsigned int order)
46{
47 int i;
48 for (i = 0; i < num_orders; i++)
49 if (order == orders[i])
50 return i;
51 BUG();
52 return -1;
53}
54
55static unsigned int order_to_size(int order)
56{
57 return PAGE_SIZE << order;
58}
59
60struct ion_system_heap {
61 struct ion_heap heap;
62 struct ion_page_pool **pools;
63};
64
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070065struct page_info {
66 struct page *page;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070067 unsigned int order;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070068 struct list_head list;
69};
70
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070071static struct page *alloc_buffer_page(struct ion_system_heap *heap,
72 struct ion_buffer *buffer,
73 unsigned long order)
74{
75 bool cached = ion_buffer_cached(buffer);
76 bool split_pages = ion_buffer_fault_user_mappings(buffer);
77 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
78 struct page *page;
Rebecca Schultz Zavin96dd58d2012-09-26 10:58:30 -070079
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070080 if (!cached) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070081 page = ion_page_pool_alloc(pool);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070082 } else {
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070083 struct scatterlist sg;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070084 gfp_t gfp_flags = low_order_gfp_flags;
85
86 if (order > 4)
87 gfp_flags = high_order_gfp_flags;
88 page = alloc_pages(gfp_flags, order);
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070089 if (!page)
90 return 0;
91 sg_init_table(&sg, 1);
92 sg_set_page(&sg, page, PAGE_SIZE << order, 0);
93 dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070094 }
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070095 if (!page)
96 return 0;
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070097
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070098 if (split_pages)
99 split_page(page, order);
100 return page;
101}
102
103static void free_buffer_page(struct ion_system_heap *heap,
104 struct ion_buffer *buffer, struct page *page,
105 unsigned int order)
106{
107 bool cached = ion_buffer_cached(buffer);
108 bool split_pages = ion_buffer_fault_user_mappings(buffer);
109 int i;
110
111 if (!cached) {
112 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
113 /* zero the pages before returning them to the pool for
114 security. This uses vmap as we want to set the pgprot so
115 the writes to occur to noncached mappings, as the pool's
116 purpose is to keep the pages out of the cache */
Rebecca Schultz Zavin430fca92012-12-03 11:43:49 -0800117 for (i = 0; i < (1 << order); i++) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700118 struct page *sub_page = page + i;
119 void *addr = vmap(&sub_page, 1, VM_MAP,
120 pgprot_writecombine(PAGE_KERNEL));
121 memset(addr, 0, PAGE_SIZE);
122 vunmap(addr);
123 }
124 ion_page_pool_free(pool, page);
125 } else if (split_pages) {
126 for (i = 0; i < (1 << order); i++)
127 __free_page(page + i);
128 } else {
129 __free_pages(page, order);
130 }
131}
132
133
134static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
135 struct ion_buffer *buffer,
136 unsigned long size,
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700137 unsigned int max_order)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700138{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700139 struct page *page;
140 struct page_info *info;
141 int i;
142
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700143 for (i = 0; i < num_orders; i++) {
144 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700145 continue;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700146 if (max_order < orders[i])
147 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700148
149 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700150 if (!page)
151 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700152
153 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700154 info->page = page;
155 info->order = orders[i];
156 return info;
157 }
158 return NULL;
159}
160
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700161static int ion_system_heap_allocate(struct ion_heap *heap,
162 struct ion_buffer *buffer,
163 unsigned long size, unsigned long align,
164 unsigned long flags)
165{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700166 struct ion_system_heap *sys_heap = container_of(heap,
167 struct ion_system_heap,
168 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800169 struct sg_table *table;
170 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700171 int ret;
172 struct list_head pages;
173 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700174 int i = 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700175 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700176 unsigned int max_order = orders[0];
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700177 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700178
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700179 INIT_LIST_HEAD(&pages);
180 while (size_remaining > 0) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700181 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700182 if (!info)
183 goto err;
184 list_add_tail(&info->list, &pages);
185 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700186 max_order = info->order;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700187 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700188 }
Laura Abbott68c80642011-10-21 17:32:27 -0700189
Laura Abbottb14ed962012-01-30 14:18:08 -0800190 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
191 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700192 goto err;
193
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700194 if (split_pages)
195 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
196 GFP_KERNEL);
197 else
198 ret = sg_alloc_table(table, i, GFP_KERNEL);
199
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700200 if (ret)
201 goto err1;
202
203 sg = table->sgl;
204 list_for_each_entry_safe(info, tmp_info, &pages, list) {
205 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700206 if (split_pages) {
207 for (i = 0; i < (1 << info->order); i++) {
208 sg_set_page(sg, page + i, PAGE_SIZE, 0);
209 sg = sg_next(sg);
210 }
211 } else {
212 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
213 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700214 sg = sg_next(sg);
215 }
216 list_del(&info->list);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700217 kfree(info);
Laura Abbottb14ed962012-01-30 14:18:08 -0800218 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700219
Laura Abbottb14ed962012-01-30 14:18:08 -0800220 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -0700221 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700222 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800223err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800224 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700225err:
226 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700227 free_buffer_page(sys_heap, buffer, info->page, info->order);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700228 kfree(info);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700229 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800230 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700231}
232
233void ion_system_heap_free(struct ion_buffer *buffer)
234{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700235 struct ion_heap *heap = buffer->heap;
236 struct ion_system_heap *sys_heap = container_of(heap,
237 struct ion_system_heap,
238 heap);
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800239 struct sg_table *table = buffer->sg_table;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700240 struct scatterlist *sg;
241 LIST_HEAD(pages);
242 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -0800243
244 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700245 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
246 sg_free_table(table);
247 kfree(table);
Laura Abbott68c80642011-10-21 17:32:27 -0700248 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700249}
250
Laura Abbottb14ed962012-01-30 14:18:08 -0800251struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
252 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700253{
Laura Abbottb14ed962012-01-30 14:18:08 -0800254 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700255}
256
257void ion_system_heap_unmap_dma(struct ion_heap *heap,
258 struct ion_buffer *buffer)
259{
Laura Abbottb14ed962012-01-30 14:18:08 -0800260 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700261}
262
Olav Haugan0671b9a2012-05-25 11:58:56 -0700263static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
264 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700265{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800266 seq_printf(s, "total bytes currently allocated: %lx\n",
267 (unsigned long) atomic_read(&system_heap_allocated));
268
269 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700270}
271
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700272static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700273 .allocate = ion_system_heap_allocate,
274 .free = ion_system_heap_free,
275 .map_dma = ion_system_heap_map_dma,
276 .unmap_dma = ion_system_heap_unmap_dma,
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800277 .map_kernel = ion_heap_map_kernel,
278 .unmap_kernel = ion_heap_unmap_kernel,
279 .map_user = ion_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800280 .print_debug = ion_system_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700281};
282
Olav Haugan85c95402012-05-30 17:32:37 -0700283struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700284{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700285 struct ion_system_heap *heap;
286 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700287
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700288 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700289 if (!heap)
290 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700291 heap->heap.ops = &system_heap_ops;
292 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
293 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
294 GFP_KERNEL);
295 if (!heap->pools)
296 goto err_alloc_pools;
297 for (i = 0; i < num_orders; i++) {
298 struct ion_page_pool *pool;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -0700299 gfp_t gfp_flags = low_order_gfp_flags;
300
301 if (orders[i] > 4)
302 gfp_flags = high_order_gfp_flags;
303 pool = ion_page_pool_create(gfp_flags, orders[i]);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700304 if (!pool)
305 goto err_create_pool;
306 heap->pools[i] = pool;
307 }
308 return &heap->heap;
309err_create_pool:
310 for (i = 0; i < num_orders; i++)
311 if (heap->pools[i])
312 ion_page_pool_destroy(heap->pools[i]);
313 kfree(heap->pools);
314err_alloc_pools:
315 kfree(heap);
316 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700317}
318
319void ion_system_heap_destroy(struct ion_heap *heap)
320{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700321 struct ion_system_heap *sys_heap = container_of(heap,
322 struct ion_system_heap,
323 heap);
324 int i;
325
326 for (i = 0; i < num_orders; i++)
327 ion_page_pool_destroy(sys_heap->pools[i]);
328 kfree(sys_heap->pools);
329 kfree(sys_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700330}
331
332static int ion_system_contig_heap_allocate(struct ion_heap *heap,
333 struct ion_buffer *buffer,
334 unsigned long len,
335 unsigned long align,
336 unsigned long flags)
337{
338 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
339 if (!buffer->priv_virt)
340 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700341 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700342 return 0;
343}
344
345void ion_system_contig_heap_free(struct ion_buffer *buffer)
346{
347 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700348 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700349}
350
351static int ion_system_contig_heap_phys(struct ion_heap *heap,
352 struct ion_buffer *buffer,
353 ion_phys_addr_t *addr, size_t *len)
354{
355 *addr = virt_to_phys(buffer->priv_virt);
356 *len = buffer->size;
357 return 0;
358}
359
Laura Abbottb14ed962012-01-30 14:18:08 -0800360struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700361 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700362{
Laura Abbottb14ed962012-01-30 14:18:08 -0800363 struct sg_table *table;
364 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700365
Laura Abbottb14ed962012-01-30 14:18:08 -0800366 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
367 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700368 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800369 ret = sg_alloc_table(table, 1, GFP_KERNEL);
370 if (ret) {
371 kfree(table);
372 return ERR_PTR(ret);
373 }
374 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
375 0);
376 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700377}
378
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700379void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
380 struct ion_buffer *buffer)
381{
382 sg_free_table(buffer->sg_table);
383 kfree(buffer->sg_table);
384}
385
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700386int ion_system_contig_heap_map_user(struct ion_heap *heap,
387 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800388 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700389{
390 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700391
Laura Abbottb14ed962012-01-30 14:18:08 -0800392 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700393 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700394 vma->vm_end - vma->vm_start,
395 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700396 else {
397 pr_err("%s: cannot map system heap uncached\n", __func__);
398 return -EINVAL;
399 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700400}
401
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800402static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700403 struct seq_file *s,
404 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700405{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800406 seq_printf(s, "total bytes currently allocated: %lx\n",
407 (unsigned long) atomic_read(&system_contig_heap_allocated));
408
409 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700410}
411
Rohit Vaswani35edc882012-11-20 10:20:47 -0800412void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
413 struct ion_buffer *buffer)
414{
415 return buffer->priv_virt;
416}
417
418void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
419 struct ion_buffer *buffer)
420{
421 return;
422}
423
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700424static struct ion_heap_ops kmalloc_ops = {
425 .allocate = ion_system_contig_heap_allocate,
426 .free = ion_system_contig_heap_free,
427 .phys = ion_system_contig_heap_phys,
428 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700429 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800430 .map_kernel = ion_heap_map_kernel,
431 .unmap_kernel = ion_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700432 .map_user = ion_system_contig_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800433 .print_debug = ion_system_contig_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700434};
435
Olav Haugan85c95402012-05-30 17:32:37 -0700436struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700437{
438 struct ion_heap *heap;
439
440 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
441 if (!heap)
442 return ERR_PTR(-ENOMEM);
443 heap->ops = &kmalloc_ops;
444 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700445 return heap;
446}
447
448void ion_system_contig_heap_destroy(struct ion_heap *heap)
449{
450 kfree(heap);
451}
452