blob: ff70ce7246823ed6b2c72e34f2e252a860dbf28b [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070025#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070026#include <linux/slab.h>
27#include <linux/vmalloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070029#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070030#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070031#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070032#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080033#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070034
Laura Abbott68c80642011-10-21 17:32:27 -070035static atomic_t system_heap_allocated;
36static atomic_t system_contig_heap_allocated;
37
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070038static const unsigned int orders[] = {8, 4, 0};
39static const int num_orders = ARRAY_SIZE(orders);
40static int order_to_index(unsigned int order)
41{
42 int i;
43 for (i = 0; i < num_orders; i++)
44 if (order == orders[i])
45 return i;
46 BUG();
47 return -1;
48}
49
50static unsigned int order_to_size(int order)
51{
52 return PAGE_SIZE << order;
53}
54
55struct ion_system_heap {
56 struct ion_heap heap;
57 struct ion_page_pool **pools;
58};
59
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070060struct page_info {
61 struct page *page;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070062 unsigned int order;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070063 struct list_head list;
64};
65
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070066static struct page *alloc_buffer_page(struct ion_system_heap *heap,
67 struct ion_buffer *buffer,
68 unsigned long order)
69{
70 bool cached = ion_buffer_cached(buffer);
71 bool split_pages = ion_buffer_fault_user_mappings(buffer);
72 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
73 struct page *page;
Rebecca Schultz Zavin96dd58d2012-09-26 10:58:30 -070074
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070075 if (!cached)
76 page = ion_page_pool_alloc(pool);
77 else
78 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
79 __GFP_NOWARN | __GFP_NORETRY, order);
80 if (!page)
81 return 0;
82 if (split_pages)
83 split_page(page, order);
84 return page;
85}
86
87static void free_buffer_page(struct ion_system_heap *heap,
88 struct ion_buffer *buffer, struct page *page,
89 unsigned int order)
90{
91 bool cached = ion_buffer_cached(buffer);
92 bool split_pages = ion_buffer_fault_user_mappings(buffer);
93 int i;
94
95 if (!cached) {
96 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
97 /* zero the pages before returning them to the pool for
98 security. This uses vmap as we want to set the pgprot so
99 the writes to occur to noncached mappings, as the pool's
100 purpose is to keep the pages out of the cache */
101 for (i = 0; i < order / PAGE_SIZE; i++) {
102 struct page *sub_page = page + i;
103 void *addr = vmap(&sub_page, 1, VM_MAP,
104 pgprot_writecombine(PAGE_KERNEL));
105 memset(addr, 0, PAGE_SIZE);
106 vunmap(addr);
107 }
108 ion_page_pool_free(pool, page);
109 } else if (split_pages) {
110 for (i = 0; i < (1 << order); i++)
111 __free_page(page + i);
112 } else {
113 __free_pages(page, order);
114 }
115}
116
117
118static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
119 struct ion_buffer *buffer,
120 unsigned long size,
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700121 unsigned int max_order)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700122{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700123 struct page *page;
124 struct page_info *info;
125 int i;
126
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700127 for (i = 0; i < num_orders; i++) {
128 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700129 continue;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700130 if (max_order < orders[i])
131 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700132
133 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700134 if (!page)
135 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700136
137 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700138 info->page = page;
139 info->order = orders[i];
140 return info;
141 }
142 return NULL;
143}
144
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700145static int ion_system_heap_allocate(struct ion_heap *heap,
146 struct ion_buffer *buffer,
147 unsigned long size, unsigned long align,
148 unsigned long flags)
149{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700150 struct ion_system_heap *sys_heap = container_of(heap,
151 struct ion_system_heap,
152 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800153 struct sg_table *table;
154 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700155 int ret;
156 struct list_head pages;
157 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700158 int i = 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700159 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700160 unsigned int max_order = orders[0];
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700161 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700162
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700163 INIT_LIST_HEAD(&pages);
164 while (size_remaining > 0) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700165 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700166 if (!info)
167 goto err;
168 list_add_tail(&info->list, &pages);
169 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700170 max_order = info->order;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700171 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700172 }
Laura Abbott68c80642011-10-21 17:32:27 -0700173
Laura Abbottb14ed962012-01-30 14:18:08 -0800174 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
175 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700176 goto err;
177
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700178 if (split_pages)
179 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
180 GFP_KERNEL);
181 else
182 ret = sg_alloc_table(table, i, GFP_KERNEL);
183
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700184 if (ret)
185 goto err1;
186
187 sg = table->sgl;
188 list_for_each_entry_safe(info, tmp_info, &pages, list) {
189 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700190 if (split_pages) {
191 for (i = 0; i < (1 << info->order); i++) {
192 sg_set_page(sg, page + i, PAGE_SIZE, 0);
193 sg = sg_next(sg);
194 }
195 } else {
196 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
197 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700198 sg = sg_next(sg);
199 }
200 list_del(&info->list);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700201 kfree(info);
Laura Abbottb14ed962012-01-30 14:18:08 -0800202 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700203
204 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
205 DMA_BIDIRECTIONAL);
206
Laura Abbottb14ed962012-01-30 14:18:08 -0800207 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -0700208 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700209 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800210err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800211 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700212err:
213 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700214 free_buffer_page(sys_heap, buffer, info->page, info->order);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700215 kfree(info);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700216 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800217 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700218}
219
220void ion_system_heap_free(struct ion_buffer *buffer)
221{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700222 struct ion_heap *heap = buffer->heap;
223 struct ion_system_heap *sys_heap = container_of(heap,
224 struct ion_system_heap,
225 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800226 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700227 struct scatterlist *sg;
228 LIST_HEAD(pages);
229 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -0800230
231 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700232 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
233 sg_free_table(table);
234 kfree(table);
Laura Abbott68c80642011-10-21 17:32:27 -0700235 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700236}
237
Laura Abbottb14ed962012-01-30 14:18:08 -0800238struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
239 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700240{
Laura Abbottb14ed962012-01-30 14:18:08 -0800241 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700242}
243
244void ion_system_heap_unmap_dma(struct ion_heap *heap,
245 struct ion_buffer *buffer)
246{
Laura Abbottb14ed962012-01-30 14:18:08 -0800247 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700248}
249
250void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800251 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700252{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700253 struct scatterlist *sg;
254 int i, j;
255 void *vaddr;
256 pgprot_t pgprot;
257 struct sg_table *table = buffer->priv_virt;
258 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
Rebecca Schultz Zavin21413b62012-09-30 14:53:27 -0700259 struct page **pages = vmalloc(sizeof(struct page *) * npages);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700260 struct page **tmp = pages;
Laura Abbottb14ed962012-01-30 14:18:08 -0800261
Rebecca Schultz Zavin21413b62012-09-30 14:53:27 -0700262 if (!pages)
263 return 0;
264
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700265 if (buffer->flags & ION_FLAG_CACHED)
266 pgprot = PAGE_KERNEL;
267 else
268 pgprot = pgprot_writecombine(PAGE_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800269
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700270 for_each_sg(table->sgl, sg, table->nents, i) {
271 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
272 struct page *page = sg_page(sg);
273 BUG_ON(i >= npages);
274 for (j = 0; j < npages_this_entry; j++) {
275 *(tmp++) = page++;
276 }
Laura Abbott894fd582011-08-19 13:33:56 -0700277 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700278 vaddr = vmap(pages, npages, VM_MAP, pgprot);
Rebecca Schultz Zavin21413b62012-09-30 14:53:27 -0700279 vfree(pages);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700280
281 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700282}
283
284void ion_system_heap_unmap_kernel(struct ion_heap *heap,
285 struct ion_buffer *buffer)
286{
Laura Abbottb14ed962012-01-30 14:18:08 -0800287 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700288}
289
290int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800291 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700292{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700293 struct sg_table *table = buffer->priv_virt;
294 unsigned long addr = vma->vm_start;
295 unsigned long offset = vma->vm_pgoff;
296 struct scatterlist *sg;
297 int i;
298
Laura Abbottb14ed962012-01-30 14:18:08 -0800299 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700300 pr_err("%s: cannot map system heap uncached\n", __func__);
301 return -EINVAL;
302 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700303
304 for_each_sg(table->sgl, sg, table->nents, i) {
305 if (offset) {
306 offset--;
307 continue;
308 }
309 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
310 sg_dma_len(sg), vma->vm_page_prot);
311 addr += sg_dma_len(sg);
Rebecca Schultz Zavin805f1302012-08-09 21:29:52 -0700312 if (addr >= vma->vm_end)
313 return 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700314 }
315 return 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700316}
317
Olav Haugan0671b9a2012-05-25 11:58:56 -0700318static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
319 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700320{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800321 seq_printf(s, "total bytes currently allocated: %lx\n",
322 (unsigned long) atomic_read(&system_heap_allocated));
323
324 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700325}
326
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700327static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700328 .allocate = ion_system_heap_allocate,
329 .free = ion_system_heap_free,
330 .map_dma = ion_system_heap_map_dma,
331 .unmap_dma = ion_system_heap_unmap_dma,
332 .map_kernel = ion_system_heap_map_kernel,
333 .unmap_kernel = ion_system_heap_unmap_kernel,
334 .map_user = ion_system_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800335 .print_debug = ion_system_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700336};
337
Olav Haugan85c95402012-05-30 17:32:37 -0700338struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700339{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700340 struct ion_system_heap *heap;
341 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700342
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700343 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700344 if (!heap)
345 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700346 heap->heap.ops = &system_heap_ops;
347 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
348 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
349 GFP_KERNEL);
350 if (!heap->pools)
351 goto err_alloc_pools;
352 for (i = 0; i < num_orders; i++) {
353 struct ion_page_pool *pool;
354 pool = ion_page_pool_create(GFP_HIGHUSER | __GFP_ZERO |
355 __GFP_NOWARN | __GFP_NORETRY,
356 orders[i]);
357 if (!pool)
358 goto err_create_pool;
359 heap->pools[i] = pool;
360 }
361 return &heap->heap;
362err_create_pool:
363 for (i = 0; i < num_orders; i++)
364 if (heap->pools[i])
365 ion_page_pool_destroy(heap->pools[i]);
366 kfree(heap->pools);
367err_alloc_pools:
368 kfree(heap);
369 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700370}
371
372void ion_system_heap_destroy(struct ion_heap *heap)
373{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700374 struct ion_system_heap *sys_heap = container_of(heap,
375 struct ion_system_heap,
376 heap);
377 int i;
378
379 for (i = 0; i < num_orders; i++)
380 ion_page_pool_destroy(sys_heap->pools[i]);
381 kfree(sys_heap->pools);
382 kfree(sys_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700383}
384
385static int ion_system_contig_heap_allocate(struct ion_heap *heap,
386 struct ion_buffer *buffer,
387 unsigned long len,
388 unsigned long align,
389 unsigned long flags)
390{
391 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
392 if (!buffer->priv_virt)
393 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700394 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700395 return 0;
396}
397
398void ion_system_contig_heap_free(struct ion_buffer *buffer)
399{
400 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700401 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700402}
403
404static int ion_system_contig_heap_phys(struct ion_heap *heap,
405 struct ion_buffer *buffer,
406 ion_phys_addr_t *addr, size_t *len)
407{
408 *addr = virt_to_phys(buffer->priv_virt);
409 *len = buffer->size;
410 return 0;
411}
412
Laura Abbottb14ed962012-01-30 14:18:08 -0800413struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700414 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700415{
Laura Abbottb14ed962012-01-30 14:18:08 -0800416 struct sg_table *table;
417 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700418
Laura Abbottb14ed962012-01-30 14:18:08 -0800419 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
420 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700421 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800422 ret = sg_alloc_table(table, 1, GFP_KERNEL);
423 if (ret) {
424 kfree(table);
425 return ERR_PTR(ret);
426 }
427 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
428 0);
429 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700430}
431
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700432void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
433 struct ion_buffer *buffer)
434{
435 sg_free_table(buffer->sg_table);
436 kfree(buffer->sg_table);
437}
438
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700439int ion_system_contig_heap_map_user(struct ion_heap *heap,
440 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800441 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700442{
443 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700444
Laura Abbottb14ed962012-01-30 14:18:08 -0800445 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700446 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700447 vma->vm_end - vma->vm_start,
448 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700449 else {
450 pr_err("%s: cannot map system heap uncached\n", __func__);
451 return -EINVAL;
452 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700453}
454
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800455static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700456 struct seq_file *s,
457 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700458{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800459 seq_printf(s, "total bytes currently allocated: %lx\n",
460 (unsigned long) atomic_read(&system_contig_heap_allocated));
461
462 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700463}
464
Rohit Vaswani35edc882012-11-20 10:20:47 -0800465void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
466 struct ion_buffer *buffer)
467{
468 return buffer->priv_virt;
469}
470
471void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
472 struct ion_buffer *buffer)
473{
474 return;
475}
476
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700477static struct ion_heap_ops kmalloc_ops = {
478 .allocate = ion_system_contig_heap_allocate,
479 .free = ion_system_contig_heap_free,
480 .phys = ion_system_contig_heap_phys,
481 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700482 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800483 .map_kernel = ion_system_contig_heap_map_kernel,
484 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700485 .map_user = ion_system_contig_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800486 .print_debug = ion_system_contig_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700487};
488
Olav Haugan85c95402012-05-30 17:32:37 -0700489struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700490{
491 struct ion_heap *heap;
492
493 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
494 if (!heap)
495 return ERR_PTR(-ENOMEM);
496 heap->ops = &kmalloc_ops;
497 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700498 return heap;
499}
500
501void ion_system_contig_heap_destroy(struct ion_heap *heap)
502{
503 kfree(heap);
504}
505