blob: 2204206ef5ff1de5ca7e128f87b73c3c950d387b [file] [log] [blame]
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -08001/*
2 * drivers/staging/android/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080017#include <asm/page.h>
18#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080019#include <linux/err.h>
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080020#include <linux/highmem.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080021#include <linux/mm.h>
22#include <linux/scatterlist.h>
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080023#include <linux/seq_file.h>
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -080024#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion.h"
27#include "ion_priv.h"
28
Rebecca Schultz Zavinee4a4982013-12-13 14:24:12 -080029static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30 __GFP_NOWARN | __GFP_NORETRY |
31 __GFP_NO_KSWAPD);
32static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
33 __GFP_NOWARN);
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080034static const unsigned int orders[] = {8, 4, 0};
35static const int num_orders = ARRAY_SIZE(orders);
36static int order_to_index(unsigned int order)
37{
38 int i;
39 for (i = 0; i < num_orders; i++)
40 if (order == orders[i])
41 return i;
42 BUG();
43 return -1;
44}
45
46static unsigned int order_to_size(int order)
47{
48 return PAGE_SIZE << order;
49}
50
51struct ion_system_heap {
52 struct ion_heap heap;
53 struct ion_page_pool **pools;
54};
55
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080056struct page_info {
57 struct page *page;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080058 unsigned int order;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -080059 struct list_head list;
60};
61
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080062static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63 struct ion_buffer *buffer,
64 unsigned long order)
65{
66 bool cached = ion_buffer_cached(buffer);
67 bool split_pages = ion_buffer_fault_user_mappings(buffer);
68 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69 struct page *page;
Rebecca Schultz Zavinb0599c02013-12-13 14:24:08 -080070
Rebecca Schultz Zavinee4a4982013-12-13 14:24:12 -080071 if (!cached) {
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080072 page = ion_page_pool_alloc(pool);
Rebecca Schultz Zavinee4a4982013-12-13 14:24:12 -080073 } else {
74 gfp_t gfp_flags = low_order_gfp_flags;
75
76 if (order > 4)
77 gfp_flags = high_order_gfp_flags;
78 page = alloc_pages(gfp_flags, order);
79 }
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -080080 if (!page)
81 return 0;
82 if (split_pages)
83 split_page(page, order);
84 return page;
85}
86
87static void free_buffer_page(struct ion_system_heap *heap,
88 struct ion_buffer *buffer, struct page *page,
89 unsigned int order)
90{
91 bool cached = ion_buffer_cached(buffer);
92 bool split_pages = ion_buffer_fault_user_mappings(buffer);
93 int i;
94
95 if (!cached) {
96 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
97 /* zero the pages before returning them to the pool for
98 security. This uses vmap as we want to set the pgprot so
99 the writes to occur to noncached mappings, as the pool's
100 purpose is to keep the pages out of the cache */
101 for (i = 0; i < order / PAGE_SIZE; i++) {
102 struct page *sub_page = page + i;
103 void *addr = vmap(&sub_page, 1, VM_MAP,
104 pgprot_writecombine(PAGE_KERNEL));
105 memset(addr, 0, PAGE_SIZE);
106 vunmap(addr);
107 }
108 ion_page_pool_free(pool, page);
109 } else if (split_pages) {
110 for (i = 0; i < (1 << order); i++)
111 __free_page(page + i);
112 } else {
113 __free_pages(page, order);
114 }
115}
116
117
118static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
119 struct ion_buffer *buffer,
120 unsigned long size,
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800121 unsigned int max_order)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800122{
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800123 struct page *page;
124 struct page_info *info;
125 int i;
126
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800127 for (i = 0; i < num_orders; i++) {
128 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800129 continue;
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800130 if (max_order < orders[i])
131 continue;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800132
133 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800134 if (!page)
135 continue;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800136
137 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800138 info->page = page;
139 info->order = orders[i];
140 return info;
141 }
142 return NULL;
143}
144
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800145static int ion_system_heap_allocate(struct ion_heap *heap,
146 struct ion_buffer *buffer,
147 unsigned long size, unsigned long align,
148 unsigned long flags)
149{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800150 struct ion_system_heap *sys_heap = container_of(heap,
151 struct ion_system_heap,
152 heap);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800153 struct sg_table *table;
154 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800155 int ret;
156 struct list_head pages;
157 struct page_info *info, *tmp_info;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800158 int i = 0;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800159 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800160 unsigned int max_order = orders[0];
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800161 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800162
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800163 INIT_LIST_HEAD(&pages);
164 while (size_remaining > 0) {
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800165 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800166 if (!info)
167 goto err;
168 list_add_tail(&info->list, &pages);
169 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavinba96a2e2013-12-13 14:24:07 -0800170 max_order = info->order;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800171 i++;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800172 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800173
174 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
175 if (!table)
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800176 goto err;
177
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800178 if (split_pages)
179 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
180 GFP_KERNEL);
181 else
182 ret = sg_alloc_table(table, i, GFP_KERNEL);
183
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800184 if (ret)
185 goto err1;
186
187 sg = table->sgl;
188 list_for_each_entry_safe(info, tmp_info, &pages, list) {
189 struct page *page = info->page;
Rebecca Schultz Zavin13ba7802013-12-13 14:24:06 -0800190 if (split_pages) {
191 for (i = 0; i < (1 << info->order); i++) {
192 sg_set_page(sg, page + i, PAGE_SIZE, 0);
193 sg = sg_next(sg);
194 }
195 } else {
196 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
197 0);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800198 sg = sg_next(sg);
199 }
200 list_del(&info->list);
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -0800201 kfree(info);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800202 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800203
204 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
205 DMA_BIDIRECTIONAL);
206
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800207 buffer->priv_virt = table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800208 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800209err1:
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800210 kfree(table);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800211err:
212 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800213 free_buffer_page(sys_heap, buffer, info->page, info->order);
Rebecca Schultz Zavin708f0ca2013-12-13 14:23:59 -0800214 kfree(info);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800215 }
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800216 return -ENOMEM;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800217}
218
219void ion_system_heap_free(struct ion_buffer *buffer)
220{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800221 struct ion_heap *heap = buffer->heap;
222 struct ion_system_heap *sys_heap = container_of(heap,
223 struct ion_system_heap,
224 heap);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800225 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800226 struct scatterlist *sg;
227 LIST_HEAD(pages);
228 int i;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800229
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800230 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800231 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
232 sg_free_table(table);
233 kfree(table);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800234}
235
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800236struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
237 struct ion_buffer *buffer)
238{
239 return buffer->priv_virt;
240}
241
242void ion_system_heap_unmap_dma(struct ion_heap *heap,
243 struct ion_buffer *buffer)
244{
245 return;
246}
247
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800248void *ion_system_heap_map_kernel(struct ion_heap *heap,
249 struct ion_buffer *buffer)
250{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800251 struct scatterlist *sg;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800252 int i, j;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800253 void *vaddr;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800254 pgprot_t pgprot;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800255 struct sg_table *table = buffer->priv_virt;
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800256 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
Rebecca Schultz Zavin98d5d5f2013-12-13 14:24:09 -0800257 struct page **pages = vmalloc(sizeof(struct page *) * npages);
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800258 struct page **tmp = pages;
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800259
Rebecca Schultz Zavin98d5d5f2013-12-13 14:24:09 -0800260 if (!pages)
261 return 0;
262
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800263 if (buffer->flags & ION_FLAG_CACHED)
264 pgprot = PAGE_KERNEL;
265 else
266 pgprot = pgprot_writecombine(PAGE_KERNEL);
267
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800268 for_each_sg(table->sgl, sg, table->nents, i) {
269 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
270 struct page *page = sg_page(sg);
271 BUG_ON(i >= npages);
272 for (j = 0; j < npages_this_entry; j++) {
273 *(tmp++) = page++;
274 }
275 }
276 vaddr = vmap(pages, npages, VM_MAP, pgprot);
Rebecca Schultz Zavin98d5d5f2013-12-13 14:24:09 -0800277 vfree(pages);
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800278
279 return vaddr;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800280}
281
282void ion_system_heap_unmap_kernel(struct ion_heap *heap,
283 struct ion_buffer *buffer)
284{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800285 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800286}
287
288int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
289 struct vm_area_struct *vma)
290{
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800291 struct sg_table *table = buffer->priv_virt;
292 unsigned long addr = vma->vm_start;
293 unsigned long offset = vma->vm_pgoff;
294 struct scatterlist *sg;
295 int i;
296
297 for_each_sg(table->sgl, sg, table->nents, i) {
298 if (offset) {
299 offset--;
300 continue;
301 }
Rebecca Schultz Zavinbd5d6bd2013-12-13 14:23:51 -0800302 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
303 sg_dma_len(sg), vma->vm_page_prot);
304 addr += sg_dma_len(sg);
Rebecca Schultz Zavinb8230242013-12-13 14:23:58 -0800305 if (addr >= vma->vm_end)
306 return 0;
Rebecca Schultz Zavinb15934b2013-12-13 14:23:41 -0800307 }
308 return 0;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800309}
310
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800311static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800312 .allocate = ion_system_heap_allocate,
313 .free = ion_system_heap_free,
314 .map_dma = ion_system_heap_map_dma,
315 .unmap_dma = ion_system_heap_unmap_dma,
316 .map_kernel = ion_system_heap_map_kernel,
317 .unmap_kernel = ion_system_heap_unmap_kernel,
318 .map_user = ion_system_heap_map_user,
319};
320
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800321static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
322 void *unused)
323{
324
325 struct ion_system_heap *sys_heap = container_of(heap,
326 struct ion_system_heap,
327 heap);
328 int i;
329 for (i = 0; i < num_orders; i++) {
330 struct ion_page_pool *pool = sys_heap->pools[i];
Rebecca Schultz Zavin0fb9b812013-12-13 14:24:13 -0800331 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
332 pool->high_count, pool->order,
333 (1 << pool->order) * PAGE_SIZE * pool->high_count);
334 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
335 pool->low_count, pool->order,
336 (1 << pool->order) * PAGE_SIZE * pool->low_count);
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800337 }
338 return 0;
339}
340
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800341struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
342{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800343 struct ion_system_heap *heap;
344 int i;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800345
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800346 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800347 if (!heap)
348 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800349 heap->heap.ops = &system_heap_ops;
350 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
351 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
352 GFP_KERNEL);
353 if (!heap->pools)
354 goto err_alloc_pools;
355 for (i = 0; i < num_orders; i++) {
356 struct ion_page_pool *pool;
Rebecca Schultz Zavinee4a4982013-12-13 14:24:12 -0800357 gfp_t gfp_flags = low_order_gfp_flags;
358
359 if (orders[i] > 4)
360 gfp_flags = high_order_gfp_flags;
361 pool = ion_page_pool_create(gfp_flags, orders[i]);
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800362 if (!pool)
363 goto err_create_pool;
364 heap->pools[i] = pool;
365 }
366 heap->heap.debug_show = ion_system_heap_debug_show;
367 return &heap->heap;
368err_create_pool:
369 for (i = 0; i < num_orders; i++)
370 if (heap->pools[i])
371 ion_page_pool_destroy(heap->pools[i]);
372 kfree(heap->pools);
373err_alloc_pools:
374 kfree(heap);
375 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800376}
377
378void ion_system_heap_destroy(struct ion_heap *heap)
379{
Rebecca Schultz Zavin45b17a82013-12-13 14:24:11 -0800380 struct ion_system_heap *sys_heap = container_of(heap,
381 struct ion_system_heap,
382 heap);
383 int i;
384
385 for (i = 0; i < num_orders; i++)
386 ion_page_pool_destroy(sys_heap->pools[i]);
387 kfree(sys_heap->pools);
388 kfree(sys_heap);
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800389}
390
391static int ion_system_contig_heap_allocate(struct ion_heap *heap,
392 struct ion_buffer *buffer,
393 unsigned long len,
394 unsigned long align,
395 unsigned long flags)
396{
397 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
398 if (!buffer->priv_virt)
399 return -ENOMEM;
400 return 0;
401}
402
403void ion_system_contig_heap_free(struct ion_buffer *buffer)
404{
405 kfree(buffer->priv_virt);
406}
407
408static int ion_system_contig_heap_phys(struct ion_heap *heap,
409 struct ion_buffer *buffer,
410 ion_phys_addr_t *addr, size_t *len)
411{
412 *addr = virt_to_phys(buffer->priv_virt);
413 *len = buffer->size;
414 return 0;
415}
416
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800417struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800418 struct ion_buffer *buffer)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800419{
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800420 struct sg_table *table;
421 int ret;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800422
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800423 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
424 if (!table)
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800425 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin4d5ca322013-12-13 14:23:37 -0800426 ret = sg_alloc_table(table, 1, GFP_KERNEL);
427 if (ret) {
428 kfree(table);
429 return ERR_PTR(ret);
430 }
431 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
432 0);
433 return table;
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800434}
435
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800436void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
437 struct ion_buffer *buffer)
438{
439 sg_free_table(buffer->sg_table);
440 kfree(buffer->sg_table);
441}
442
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800443int ion_system_contig_heap_map_user(struct ion_heap *heap,
444 struct ion_buffer *buffer,
445 struct vm_area_struct *vma)
446{
447 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
448 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
449 vma->vm_end - vma->vm_start,
450 vma->vm_page_prot);
451
452}
453
454static struct ion_heap_ops kmalloc_ops = {
455 .allocate = ion_system_contig_heap_allocate,
456 .free = ion_system_contig_heap_free,
457 .phys = ion_system_contig_heap_phys,
458 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavin56a7c182013-12-13 14:23:50 -0800459 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavinc30707b2013-12-13 19:38:38 -0800460 .map_kernel = ion_system_heap_map_kernel,
461 .unmap_kernel = ion_system_heap_unmap_kernel,
462 .map_user = ion_system_contig_heap_map_user,
463};
464
465struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
466{
467 struct ion_heap *heap;
468
469 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
470 if (!heap)
471 return ERR_PTR(-ENOMEM);
472 heap->ops = &kmalloc_ops;
473 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
474 return heap;
475}
476
477void ion_system_contig_heap_destroy(struct ion_heap *heap)
478{
479 kfree(heap);
480}
481