blob: 2687dd142a771be478de3dd75e3fe23dcfaf6b83 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070025#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070026#include <linux/slab.h>
27#include <linux/vmalloc.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070029#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070030#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070031#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070032#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080033#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070034
Laura Abbott68c80642011-10-21 17:32:27 -070035static atomic_t system_heap_allocated;
36static atomic_t system_contig_heap_allocated;
37
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070038static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
39 __GFP_NOWARN | __GFP_NORETRY |
Rebecca Schultz Zavin1797e59a2012-10-18 21:51:53 -070040 __GFP_NO_KSWAPD) & ~__GFP_WAIT;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070041static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
42 __GFP_NOWARN);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070043static const unsigned int orders[] = {8, 4, 0};
44static const int num_orders = ARRAY_SIZE(orders);
45static int order_to_index(unsigned int order)
46{
47 int i;
48 for (i = 0; i < num_orders; i++)
49 if (order == orders[i])
50 return i;
51 BUG();
52 return -1;
53}
54
55static unsigned int order_to_size(int order)
56{
57 return PAGE_SIZE << order;
58}
59
60struct ion_system_heap {
61 struct ion_heap heap;
62 struct ion_page_pool **pools;
63};
64
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070065struct page_info {
66 struct page *page;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070067 unsigned int order;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070068 struct list_head list;
69};
70
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070071static struct page *alloc_buffer_page(struct ion_system_heap *heap,
72 struct ion_buffer *buffer,
73 unsigned long order)
74{
75 bool cached = ion_buffer_cached(buffer);
76 bool split_pages = ion_buffer_fault_user_mappings(buffer);
77 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
78 struct page *page;
Rebecca Schultz Zavin96dd58d2012-09-26 10:58:30 -070079
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070080 if (!cached) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070081 page = ion_page_pool_alloc(pool);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070082 } else {
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070083 struct scatterlist sg;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070084 gfp_t gfp_flags = low_order_gfp_flags;
85
86 if (order > 4)
87 gfp_flags = high_order_gfp_flags;
88 page = alloc_pages(gfp_flags, order);
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070089 if (!page)
90 return 0;
91 sg_init_table(&sg, 1);
92 sg_set_page(&sg, page, PAGE_SIZE << order, 0);
93 dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070094 }
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070095 if (!page)
96 return 0;
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070097
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070098 if (split_pages)
99 split_page(page, order);
100 return page;
101}
102
103static void free_buffer_page(struct ion_system_heap *heap,
104 struct ion_buffer *buffer, struct page *page,
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800105 unsigned int order, struct vm_struct *vm_struct)
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700106{
107 bool cached = ion_buffer_cached(buffer);
108 bool split_pages = ion_buffer_fault_user_mappings(buffer);
109 int i;
110
111 if (!cached) {
112 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
113 /* zero the pages before returning them to the pool for
114 security. This uses vmap as we want to set the pgprot so
115 the writes to occur to noncached mappings, as the pool's
116 purpose is to keep the pages out of the cache */
Rebecca Schultz Zavin430fca92012-12-03 11:43:49 -0800117 for (i = 0; i < (1 << order); i++) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700118 struct page *sub_page = page + i;
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800119 struct page **pages = &sub_page;
120 map_vm_area(vm_struct,
121 pgprot_writecombine(PAGE_KERNEL),
122 &pages);
123 memset(vm_struct->addr, 0, PAGE_SIZE);
124 unmap_kernel_range((unsigned long)vm_struct->addr,
125 PAGE_SIZE);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700126 }
127 ion_page_pool_free(pool, page);
128 } else if (split_pages) {
129 for (i = 0; i < (1 << order); i++)
130 __free_page(page + i);
131 } else {
132 __free_pages(page, order);
133 }
134}
135
136
137static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
138 struct ion_buffer *buffer,
139 unsigned long size,
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700140 unsigned int max_order)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700141{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700142 struct page *page;
143 struct page_info *info;
144 int i;
145
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700146 for (i = 0; i < num_orders; i++) {
147 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700148 continue;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700149 if (max_order < orders[i])
150 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700151
152 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700153 if (!page)
154 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700155
156 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700157 info->page = page;
158 info->order = orders[i];
159 return info;
160 }
161 return NULL;
162}
163
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700164static int ion_system_heap_allocate(struct ion_heap *heap,
165 struct ion_buffer *buffer,
166 unsigned long size, unsigned long align,
167 unsigned long flags)
168{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700169 struct ion_system_heap *sys_heap = container_of(heap,
170 struct ion_system_heap,
171 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800172 struct sg_table *table;
173 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700174 int ret;
175 struct list_head pages;
176 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700177 int i = 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700178 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700179 unsigned int max_order = orders[0];
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700180 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800181 struct vm_struct *vm_struct;
182 pte_t *ptes;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700183
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700184 INIT_LIST_HEAD(&pages);
185 while (size_remaining > 0) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700186 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700187 if (!info)
188 goto err;
189 list_add_tail(&info->list, &pages);
190 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700191 max_order = info->order;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700192 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700193 }
Laura Abbott68c80642011-10-21 17:32:27 -0700194
Laura Abbottb14ed962012-01-30 14:18:08 -0800195 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
196 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700197 goto err;
198
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700199 if (split_pages)
200 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
201 GFP_KERNEL);
202 else
203 ret = sg_alloc_table(table, i, GFP_KERNEL);
204
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700205 if (ret)
206 goto err1;
207
208 sg = table->sgl;
209 list_for_each_entry_safe(info, tmp_info, &pages, list) {
210 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700211 if (split_pages) {
212 for (i = 0; i < (1 << info->order); i++) {
213 sg_set_page(sg, page + i, PAGE_SIZE, 0);
214 sg = sg_next(sg);
215 }
216 } else {
217 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
218 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700219 sg = sg_next(sg);
220 }
221 list_del(&info->list);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700222 kfree(info);
Laura Abbottb14ed962012-01-30 14:18:08 -0800223 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700224
Laura Abbottb14ed962012-01-30 14:18:08 -0800225 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -0700226 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700227 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800228err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800229 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700230err:
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800231 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700232 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800233 free_buffer_page(sys_heap, buffer, info->page, info->order,
234 vm_struct);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700235 kfree(info);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700236 }
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800237 free_vm_area(vm_struct);
Laura Abbottb14ed962012-01-30 14:18:08 -0800238 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700239}
240
241void ion_system_heap_free(struct ion_buffer *buffer)
242{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700243 struct ion_heap *heap = buffer->heap;
244 struct ion_system_heap *sys_heap = container_of(heap,
245 struct ion_system_heap,
246 heap);
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800247 struct sg_table *table = buffer->sg_table;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700248 struct scatterlist *sg;
249 LIST_HEAD(pages);
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800250 struct vm_struct *vm_struct;
251 pte_t *ptes;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700252 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -0800253
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800254 vm_struct = get_vm_area(PAGE_SIZE, &ptes);
255
Laura Abbottb14ed962012-01-30 14:18:08 -0800256 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800257 free_buffer_page(sys_heap, buffer, sg_page(sg),
258 get_order(sg_dma_len(sg)), vm_struct);
259 free_vm_area(vm_struct);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700260 sg_free_table(table);
261 kfree(table);
Laura Abbott68c80642011-10-21 17:32:27 -0700262 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700263}
264
Laura Abbottb14ed962012-01-30 14:18:08 -0800265struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
266 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700267{
Laura Abbottb14ed962012-01-30 14:18:08 -0800268 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700269}
270
271void ion_system_heap_unmap_dma(struct ion_heap *heap,
272 struct ion_buffer *buffer)
273{
Laura Abbottb14ed962012-01-30 14:18:08 -0800274 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700275}
276
Olav Haugan0671b9a2012-05-25 11:58:56 -0700277static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
278 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700279{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800280 seq_printf(s, "total bytes currently allocated: %lx\n",
281 (unsigned long) atomic_read(&system_heap_allocated));
282
283 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700284}
285
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700286static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700287 .allocate = ion_system_heap_allocate,
288 .free = ion_system_heap_free,
289 .map_dma = ion_system_heap_map_dma,
290 .unmap_dma = ion_system_heap_unmap_dma,
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800291 .map_kernel = ion_heap_map_kernel,
292 .unmap_kernel = ion_heap_unmap_kernel,
293 .map_user = ion_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800294 .print_debug = ion_system_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700295};
296
Olav Haugan85c95402012-05-30 17:32:37 -0700297struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700298{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700299 struct ion_system_heap *heap;
300 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700301
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700302 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700303 if (!heap)
304 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700305 heap->heap.ops = &system_heap_ops;
306 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
307 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
308 GFP_KERNEL);
309 if (!heap->pools)
310 goto err_alloc_pools;
311 for (i = 0; i < num_orders; i++) {
312 struct ion_page_pool *pool;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -0700313 gfp_t gfp_flags = low_order_gfp_flags;
314
315 if (orders[i] > 4)
316 gfp_flags = high_order_gfp_flags;
317 pool = ion_page_pool_create(gfp_flags, orders[i]);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700318 if (!pool)
319 goto err_create_pool;
320 heap->pools[i] = pool;
321 }
322 return &heap->heap;
323err_create_pool:
324 for (i = 0; i < num_orders; i++)
325 if (heap->pools[i])
326 ion_page_pool_destroy(heap->pools[i]);
327 kfree(heap->pools);
328err_alloc_pools:
329 kfree(heap);
330 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700331}
332
333void ion_system_heap_destroy(struct ion_heap *heap)
334{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700335 struct ion_system_heap *sys_heap = container_of(heap,
336 struct ion_system_heap,
337 heap);
338 int i;
339
340 for (i = 0; i < num_orders; i++)
341 ion_page_pool_destroy(sys_heap->pools[i]);
342 kfree(sys_heap->pools);
343 kfree(sys_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700344}
345
346static int ion_system_contig_heap_allocate(struct ion_heap *heap,
347 struct ion_buffer *buffer,
348 unsigned long len,
349 unsigned long align,
350 unsigned long flags)
351{
352 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
353 if (!buffer->priv_virt)
354 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700355 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700356 return 0;
357}
358
359void ion_system_contig_heap_free(struct ion_buffer *buffer)
360{
361 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700362 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700363}
364
365static int ion_system_contig_heap_phys(struct ion_heap *heap,
366 struct ion_buffer *buffer,
367 ion_phys_addr_t *addr, size_t *len)
368{
369 *addr = virt_to_phys(buffer->priv_virt);
370 *len = buffer->size;
371 return 0;
372}
373
Laura Abbottb14ed962012-01-30 14:18:08 -0800374struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700375 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700376{
Laura Abbottb14ed962012-01-30 14:18:08 -0800377 struct sg_table *table;
378 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700379
Laura Abbottb14ed962012-01-30 14:18:08 -0800380 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
381 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700382 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800383 ret = sg_alloc_table(table, 1, GFP_KERNEL);
384 if (ret) {
385 kfree(table);
386 return ERR_PTR(ret);
387 }
388 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
389 0);
390 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700391}
392
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700393void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
394 struct ion_buffer *buffer)
395{
396 sg_free_table(buffer->sg_table);
397 kfree(buffer->sg_table);
398}
399
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700400int ion_system_contig_heap_map_user(struct ion_heap *heap,
401 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800402 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700403{
404 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700405
Laura Abbottb14ed962012-01-30 14:18:08 -0800406 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700407 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700408 vma->vm_end - vma->vm_start,
409 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700410 else {
411 pr_err("%s: cannot map system heap uncached\n", __func__);
412 return -EINVAL;
413 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700414}
415
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800416static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700417 struct seq_file *s,
418 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700419{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800420 seq_printf(s, "total bytes currently allocated: %lx\n",
421 (unsigned long) atomic_read(&system_contig_heap_allocated));
422
423 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700424}
425
Rohit Vaswani35edc882012-11-20 10:20:47 -0800426void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
427 struct ion_buffer *buffer)
428{
429 return buffer->priv_virt;
430}
431
432void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
433 struct ion_buffer *buffer)
434{
435 return;
436}
437
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700438static struct ion_heap_ops kmalloc_ops = {
439 .allocate = ion_system_contig_heap_allocate,
440 .free = ion_system_contig_heap_free,
441 .phys = ion_system_contig_heap_phys,
442 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700443 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800444 .map_kernel = ion_heap_map_kernel,
445 .unmap_kernel = ion_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700446 .map_user = ion_system_contig_heap_map_user,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800447 .print_debug = ion_system_contig_print_debug,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700448};
449
Olav Haugan85c95402012-05-30 17:32:37 -0700450struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700451{
452 struct ion_heap *heap;
453
454 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
455 if (!heap)
456 return ERR_PTR(-ENOMEM);
457 heap->ops = &kmalloc_ops;
458 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700459 return heap;
460}
461
462void ion_system_contig_heap_destroy(struct ion_heap *heap)
463{
464 kfree(heap);
465}
466