blob: 457344ff6a4962332330ffe166e705d794d8371a [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070025#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070026#include <linux/slab.h>
27#include <linux/vmalloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070029#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070030#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070031#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070032#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080033#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070034
Laura Abbott68c80642011-10-21 17:32:27 -070035static atomic_t system_heap_allocated;
36static atomic_t system_contig_heap_allocated;
37
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070038static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
39 __GFP_NOWARN | __GFP_NORETRY |
Rebecca Schultz Zavin1797e59a2012-10-18 21:51:53 -070040 __GFP_NO_KSWAPD) & ~__GFP_WAIT;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070041static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
42 __GFP_NOWARN);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070043static const unsigned int orders[] = {8, 4, 0};
44static const int num_orders = ARRAY_SIZE(orders);
45static int order_to_index(unsigned int order)
46{
47 int i;
48 for (i = 0; i < num_orders; i++)
49 if (order == orders[i])
50 return i;
51 BUG();
52 return -1;
53}
54
55static unsigned int order_to_size(int order)
56{
57 return PAGE_SIZE << order;
58}
59
60struct ion_system_heap {
61 struct ion_heap heap;
62 struct ion_page_pool **pools;
63};
64
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070065struct page_info {
66 struct page *page;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070067 unsigned int order;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070068 struct list_head list;
69};
70
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070071static struct page *alloc_buffer_page(struct ion_system_heap *heap,
72 struct ion_buffer *buffer,
73 unsigned long order)
74{
75 bool cached = ion_buffer_cached(buffer);
76 bool split_pages = ion_buffer_fault_user_mappings(buffer);
77 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
78 struct page *page;
Rebecca Schultz Zavin96dd58d2012-09-26 10:58:30 -070079
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070080 if (!cached) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070081 page = ion_page_pool_alloc(pool);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070082 } else {
83 gfp_t gfp_flags = low_order_gfp_flags;
84
85 if (order > 4)
86 gfp_flags = high_order_gfp_flags;
87 page = alloc_pages(gfp_flags, order);
88 }
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070089 if (!page)
90 return 0;
91 if (split_pages)
92 split_page(page, order);
93 return page;
94}
95
96static void free_buffer_page(struct ion_system_heap *heap,
97 struct ion_buffer *buffer, struct page *page,
98 unsigned int order)
99{
100 bool cached = ion_buffer_cached(buffer);
101 bool split_pages = ion_buffer_fault_user_mappings(buffer);
102 int i;
103
104 if (!cached) {
105 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
106 /* zero the pages before returning them to the pool for
107 security. This uses vmap as we want to set the pgprot so
108 the writes to occur to noncached mappings, as the pool's
109 purpose is to keep the pages out of the cache */
110 for (i = 0; i < order / PAGE_SIZE; i++) {
111 struct page *sub_page = page + i;
112 void *addr = vmap(&sub_page, 1, VM_MAP,
113 pgprot_writecombine(PAGE_KERNEL));
114 memset(addr, 0, PAGE_SIZE);
115 vunmap(addr);
116 }
117 ion_page_pool_free(pool, page);
118 } else if (split_pages) {
119 for (i = 0; i < (1 << order); i++)
120 __free_page(page + i);
121 } else {
122 __free_pages(page, order);
123 }
124}
125
126
127static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
128 struct ion_buffer *buffer,
129 unsigned long size,
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700130 unsigned int max_order)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700131{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700132 struct page *page;
133 struct page_info *info;
134 int i;
135
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700136 for (i = 0; i < num_orders; i++) {
137 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700138 continue;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700139 if (max_order < orders[i])
140 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700141
142 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700143 if (!page)
144 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700145
146 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700147 info->page = page;
148 info->order = orders[i];
149 return info;
150 }
151 return NULL;
152}
153
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700154static int ion_system_heap_allocate(struct ion_heap *heap,
155 struct ion_buffer *buffer,
156 unsigned long size, unsigned long align,
157 unsigned long flags)
158{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700159 struct ion_system_heap *sys_heap = container_of(heap,
160 struct ion_system_heap,
161 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800162 struct sg_table *table;
163 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700164 int ret;
165 struct list_head pages;
166 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700167 int i = 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700168 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700169 unsigned int max_order = orders[0];
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700170 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700171
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700172 INIT_LIST_HEAD(&pages);
173 while (size_remaining > 0) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700174 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700175 if (!info)
176 goto err;
177 list_add_tail(&info->list, &pages);
178 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700179 max_order = info->order;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700180 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700181 }
Laura Abbott68c80642011-10-21 17:32:27 -0700182
Laura Abbottb14ed962012-01-30 14:18:08 -0800183 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700185 goto err;
186
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700187 if (split_pages)
188 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
189 GFP_KERNEL);
190 else
191 ret = sg_alloc_table(table, i, GFP_KERNEL);
192
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700193 if (ret)
194 goto err1;
195
196 sg = table->sgl;
197 list_for_each_entry_safe(info, tmp_info, &pages, list) {
198 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700199 if (split_pages) {
200 for (i = 0; i < (1 << info->order); i++) {
201 sg_set_page(sg, page + i, PAGE_SIZE, 0);
202 sg = sg_next(sg);
203 }
204 } else {
205 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
206 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700207 sg = sg_next(sg);
208 }
209 list_del(&info->list);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700210 kfree(info);
Laura Abbottb14ed962012-01-30 14:18:08 -0800211 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700212
213 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
214 DMA_BIDIRECTIONAL);
215
Laura Abbottb14ed962012-01-30 14:18:08 -0800216 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -0700217 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700218 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800219err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800220 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700221err:
222 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700223 free_buffer_page(sys_heap, buffer, info->page, info->order);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700224 kfree(info);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700225 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800226 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700227}
228
229void ion_system_heap_free(struct ion_buffer *buffer)
230{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700231 struct ion_heap *heap = buffer->heap;
232 struct ion_system_heap *sys_heap = container_of(heap,
233 struct ion_system_heap,
234 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800235 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700236 struct scatterlist *sg;
237 LIST_HEAD(pages);
238 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -0800239
240 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700241 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
242 sg_free_table(table);
243 kfree(table);
Laura Abbott68c80642011-10-21 17:32:27 -0700244 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700245}
246
Laura Abbottb14ed962012-01-30 14:18:08 -0800247struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
248 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700249{
Laura Abbottb14ed962012-01-30 14:18:08 -0800250 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700251}
252
253void ion_system_heap_unmap_dma(struct ion_heap *heap,
254 struct ion_buffer *buffer)
255{
Laura Abbottb14ed962012-01-30 14:18:08 -0800256 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700257}
258
259void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800260 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700261{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700262 struct scatterlist *sg;
263 int i, j;
264 void *vaddr;
265 pgprot_t pgprot;
266 struct sg_table *table = buffer->priv_virt;
267 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
Rebecca Schultz Zavin21413b62012-09-30 14:53:27 -0700268 struct page **pages = vmalloc(sizeof(struct page *) * npages);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700269 struct page **tmp = pages;
Laura Abbottb14ed962012-01-30 14:18:08 -0800270
Rebecca Schultz Zavin21413b62012-09-30 14:53:27 -0700271 if (!pages)
272 return 0;
273
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700274 if (buffer->flags & ION_FLAG_CACHED)
275 pgprot = PAGE_KERNEL;
276 else
277 pgprot = pgprot_writecombine(PAGE_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800278
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700279 for_each_sg(table->sgl, sg, table->nents, i) {
280 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
281 struct page *page = sg_page(sg);
282 BUG_ON(i >= npages);
283 for (j = 0; j < npages_this_entry; j++) {
284 *(tmp++) = page++;
285 }
Laura Abbott894fd582011-08-19 13:33:56 -0700286 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700287 vaddr = vmap(pages, npages, VM_MAP, pgprot);
Rebecca Schultz Zavin21413b62012-09-30 14:53:27 -0700288 vfree(pages);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700289
290 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700291}
292
293void ion_system_heap_unmap_kernel(struct ion_heap *heap,
294 struct ion_buffer *buffer)
295{
Laura Abbottb14ed962012-01-30 14:18:08 -0800296 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700297}
298
299int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800300 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700301{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700302 struct sg_table *table = buffer->priv_virt;
303 unsigned long addr = vma->vm_start;
304 unsigned long offset = vma->vm_pgoff;
305 struct scatterlist *sg;
306 int i;
307
Laura Abbottb14ed962012-01-30 14:18:08 -0800308 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700309 pr_err("%s: cannot map system heap uncached\n", __func__);
310 return -EINVAL;
311 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700312
313 for_each_sg(table->sgl, sg, table->nents, i) {
314 if (offset) {
315 offset--;
316 continue;
317 }
318 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
319 sg_dma_len(sg), vma->vm_page_prot);
320 addr += sg_dma_len(sg);
Rebecca Schultz Zavin805f1302012-08-09 21:29:52 -0700321 if (addr >= vma->vm_end)
322 return 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700323 }
324 return 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700325}
326
Olav Haugan0671b9a2012-05-25 11:58:56 -0700327static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
328 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700329{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800330 seq_printf(s, "total bytes currently allocated: %lx\n",
331 (unsigned long) atomic_read(&system_heap_allocated));
332
333 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700334}
335
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700336static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700337 .allocate = ion_system_heap_allocate,
338 .free = ion_system_heap_free,
339 .map_dma = ion_system_heap_map_dma,
340 .unmap_dma = ion_system_heap_unmap_dma,
341 .map_kernel = ion_system_heap_map_kernel,
342 .unmap_kernel = ion_system_heap_unmap_kernel,
343 .map_user = ion_system_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800344 .print_debug = ion_system_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700345};
346
Olav Haugan85c95402012-05-30 17:32:37 -0700347struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700348{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700349 struct ion_system_heap *heap;
350 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700351
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700352 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700353 if (!heap)
354 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700355 heap->heap.ops = &system_heap_ops;
356 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
357 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
358 GFP_KERNEL);
359 if (!heap->pools)
360 goto err_alloc_pools;
361 for (i = 0; i < num_orders; i++) {
362 struct ion_page_pool *pool;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -0700363 gfp_t gfp_flags = low_order_gfp_flags;
364
365 if (orders[i] > 4)
366 gfp_flags = high_order_gfp_flags;
367 pool = ion_page_pool_create(gfp_flags, orders[i]);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700368 if (!pool)
369 goto err_create_pool;
370 heap->pools[i] = pool;
371 }
372 return &heap->heap;
373err_create_pool:
374 for (i = 0; i < num_orders; i++)
375 if (heap->pools[i])
376 ion_page_pool_destroy(heap->pools[i]);
377 kfree(heap->pools);
378err_alloc_pools:
379 kfree(heap);
380 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700381}
382
383void ion_system_heap_destroy(struct ion_heap *heap)
384{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700385 struct ion_system_heap *sys_heap = container_of(heap,
386 struct ion_system_heap,
387 heap);
388 int i;
389
390 for (i = 0; i < num_orders; i++)
391 ion_page_pool_destroy(sys_heap->pools[i]);
392 kfree(sys_heap->pools);
393 kfree(sys_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700394}
395
396static int ion_system_contig_heap_allocate(struct ion_heap *heap,
397 struct ion_buffer *buffer,
398 unsigned long len,
399 unsigned long align,
400 unsigned long flags)
401{
402 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
403 if (!buffer->priv_virt)
404 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700405 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700406 return 0;
407}
408
409void ion_system_contig_heap_free(struct ion_buffer *buffer)
410{
411 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700412 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700413}
414
415static int ion_system_contig_heap_phys(struct ion_heap *heap,
416 struct ion_buffer *buffer,
417 ion_phys_addr_t *addr, size_t *len)
418{
419 *addr = virt_to_phys(buffer->priv_virt);
420 *len = buffer->size;
421 return 0;
422}
423
Laura Abbottb14ed962012-01-30 14:18:08 -0800424struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700425 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700426{
Laura Abbottb14ed962012-01-30 14:18:08 -0800427 struct sg_table *table;
428 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700429
Laura Abbottb14ed962012-01-30 14:18:08 -0800430 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
431 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700432 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800433 ret = sg_alloc_table(table, 1, GFP_KERNEL);
434 if (ret) {
435 kfree(table);
436 return ERR_PTR(ret);
437 }
438 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
439 0);
440 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700441}
442
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700443void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
444 struct ion_buffer *buffer)
445{
446 sg_free_table(buffer->sg_table);
447 kfree(buffer->sg_table);
448}
449
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700450int ion_system_contig_heap_map_user(struct ion_heap *heap,
451 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800452 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700453{
454 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700455
Laura Abbottb14ed962012-01-30 14:18:08 -0800456 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700457 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700458 vma->vm_end - vma->vm_start,
459 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700460 else {
461 pr_err("%s: cannot map system heap uncached\n", __func__);
462 return -EINVAL;
463 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700464}
465
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800466static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700467 struct seq_file *s,
468 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700469{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800470 seq_printf(s, "total bytes currently allocated: %lx\n",
471 (unsigned long) atomic_read(&system_contig_heap_allocated));
472
473 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700474}
475
Rohit Vaswani35edc882012-11-20 10:20:47 -0800476void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
477 struct ion_buffer *buffer)
478{
479 return buffer->priv_virt;
480}
481
482void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
483 struct ion_buffer *buffer)
484{
485 return;
486}
487
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700488static struct ion_heap_ops kmalloc_ops = {
489 .allocate = ion_system_contig_heap_allocate,
490 .free = ion_system_contig_heap_free,
491 .phys = ion_system_contig_heap_phys,
492 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700493 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800494 .map_kernel = ion_system_contig_heap_map_kernel,
495 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700496 .map_user = ion_system_contig_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800497 .print_debug = ion_system_contig_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700498};
499
Olav Haugan85c95402012-05-30 17:32:37 -0700500struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700501{
502 struct ion_heap *heap;
503
504 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
505 if (!heap)
506 return ERR_PTR(-ENOMEM);
507 heap->ops = &kmalloc_ops;
508 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700509 return heap;
510}
511
512void ion_system_contig_heap_destroy(struct ion_heap *heap)
513{
514 kfree(heap);
515}
516