blob: a44011f24fcfa8e2f3ff1bad426c2af90a8d5d16 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070025#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070026#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070029#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070030#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070031#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080032#include <linux/dma-mapping.h>
Adrian Alexei21f62bd2013-04-22 12:57:41 -070033#include <trace/events/kmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070034
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070035static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
36 __GFP_NOWARN | __GFP_NORETRY |
Rebecca Schultz Zavin1797e59a2012-10-18 21:51:53 -070037 __GFP_NO_KSWAPD) & ~__GFP_WAIT;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070038static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
39 __GFP_NOWARN);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070040static const unsigned int orders[] = {8, 4, 0};
41static const int num_orders = ARRAY_SIZE(orders);
42static int order_to_index(unsigned int order)
43{
44 int i;
45 for (i = 0; i < num_orders; i++)
46 if (order == orders[i])
47 return i;
48 BUG();
49 return -1;
50}
51
52static unsigned int order_to_size(int order)
53{
54 return PAGE_SIZE << order;
55}
56
57struct ion_system_heap {
58 struct ion_heap heap;
59 struct ion_page_pool **pools;
60};
61
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070062struct page_info {
63 struct page *page;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070064 unsigned int order;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070065 struct list_head list;
66};
67
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070068static struct page *alloc_buffer_page(struct ion_system_heap *heap,
69 struct ion_buffer *buffer,
70 unsigned long order)
71{
72 bool cached = ion_buffer_cached(buffer);
73 bool split_pages = ion_buffer_fault_user_mappings(buffer);
74 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
75 struct page *page;
Rebecca Schultz Zavin96dd58d2012-09-26 10:58:30 -070076
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070077 if (!cached) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070078 page = ion_page_pool_alloc(pool);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070079 } else {
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070080 struct scatterlist sg;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070081 gfp_t gfp_flags = low_order_gfp_flags;
82
83 if (order > 4)
84 gfp_flags = high_order_gfp_flags;
Adrian Alexei21f62bd2013-04-22 12:57:41 -070085 trace_alloc_pages_sys_start(gfp_flags, order);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070086 page = alloc_pages(gfp_flags, order);
Adrian Alexei21f62bd2013-04-22 12:57:41 -070087 trace_alloc_pages_sys_end(gfp_flags, order);
88 if (!page) {
89 trace_alloc_pages_sys_fail(gfp_flags, order);
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070090 return 0;
Adrian Alexei21f62bd2013-04-22 12:57:41 -070091 }
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070092 sg_init_table(&sg, 1);
93 sg_set_page(&sg, page, PAGE_SIZE << order, 0);
94 dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -070095 }
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070096 if (!page)
97 return 0;
Rebecca Schultz Zavin220a6522012-10-18 21:54:01 -070098
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -070099 if (split_pages)
100 split_page(page, order);
101 return page;
102}
103
104static void free_buffer_page(struct ion_system_heap *heap,
105 struct ion_buffer *buffer, struct page *page,
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800106 unsigned int order)
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700107{
108 bool cached = ion_buffer_cached(buffer);
109 bool split_pages = ion_buffer_fault_user_mappings(buffer);
110 int i;
111
112 if (!cached) {
113 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700114 ion_page_pool_free(pool, page);
115 } else if (split_pages) {
116 for (i = 0; i < (1 << order); i++)
117 __free_page(page + i);
118 } else {
119 __free_pages(page, order);
120 }
121}
122
123
124static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
125 struct ion_buffer *buffer,
126 unsigned long size,
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700127 unsigned int max_order)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700128{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700129 struct page *page;
130 struct page_info *info;
131 int i;
132
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700133 for (i = 0; i < num_orders; i++) {
134 if (size < order_to_size(orders[i]))
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700135 continue;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700136 if (max_order < orders[i])
137 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700138
139 page = alloc_buffer_page(heap, buffer, orders[i]);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700140 if (!page)
141 continue;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700142
143 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700144 info->page = page;
145 info->order = orders[i];
146 return info;
147 }
148 return NULL;
149}
150
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700151static int ion_system_heap_allocate(struct ion_heap *heap,
152 struct ion_buffer *buffer,
153 unsigned long size, unsigned long align,
154 unsigned long flags)
155{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700156 struct ion_system_heap *sys_heap = container_of(heap,
157 struct ion_system_heap,
158 heap);
Laura Abbottb14ed962012-01-30 14:18:08 -0800159 struct sg_table *table;
160 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700161 int ret;
162 struct list_head pages;
163 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700164 int i = 0;
Olav Haugand8770692013-04-17 16:11:31 -0700165 unsigned long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700166 unsigned int max_order = orders[0];
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700167 bool split_pages = ion_buffer_fault_user_mappings(buffer);
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700168
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700169 INIT_LIST_HEAD(&pages);
170 while (size_remaining > 0) {
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700171 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700172 if (!info)
173 goto err;
174 list_add_tail(&info->list, &pages);
175 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavin158316f2012-09-25 20:55:27 -0700176 max_order = info->order;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700177 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700178 }
Laura Abbott68c80642011-10-21 17:32:27 -0700179
Laura Abbottb14ed962012-01-30 14:18:08 -0800180 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
181 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700182 goto err;
183
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700184 if (split_pages)
185 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
186 GFP_KERNEL);
187 else
188 ret = sg_alloc_table(table, i, GFP_KERNEL);
189
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700190 if (ret)
191 goto err1;
192
193 sg = table->sgl;
194 list_for_each_entry_safe(info, tmp_info, &pages, list) {
195 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700196 if (split_pages) {
197 for (i = 0; i < (1 << info->order); i++) {
198 sg_set_page(sg, page + i, PAGE_SIZE, 0);
199 sg = sg_next(sg);
200 }
201 } else {
202 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
203 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700204 sg = sg_next(sg);
205 }
206 list_del(&info->list);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700207 kfree(info);
Laura Abbottb14ed962012-01-30 14:18:08 -0800208 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700209
Laura Abbottb14ed962012-01-30 14:18:08 -0800210 buffer->priv_virt = table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700211 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800212err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800213 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700214err:
215 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800216 free_buffer_page(sys_heap, buffer, info->page, info->order);
Rebecca Schultz Zavin6a93a292012-08-21 21:35:20 -0700217 kfree(info);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700218 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800219 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700220}
221
222void ion_system_heap_free(struct ion_buffer *buffer)
223{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700224 struct ion_heap *heap = buffer->heap;
225 struct ion_system_heap *sys_heap = container_of(heap,
226 struct ion_system_heap,
227 heap);
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800228 struct sg_table *table = buffer->sg_table;
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800229 bool cached = ion_buffer_cached(buffer);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700230 struct scatterlist *sg;
231 LIST_HEAD(pages);
232 int i;
Laura Abbottb14ed962012-01-30 14:18:08 -0800233
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800234 /* uncached pages come from the page pools, zero them before returning
235 for security purposes (other allocations are zerod at alloc time */
236 if (!cached)
237 ion_heap_buffer_zero(buffer);
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800238
Laura Abbottb14ed962012-01-30 14:18:08 -0800239 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavinf9101222012-12-18 22:46:57 -0800240 free_buffer_page(sys_heap, buffer, sg_page(sg),
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800241 get_order(sg_dma_len(sg)));
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700242 sg_free_table(table);
243 kfree(table);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700244}
245
Laura Abbottb14ed962012-01-30 14:18:08 -0800246struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
247 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700248{
Laura Abbottb14ed962012-01-30 14:18:08 -0800249 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700250}
251
252void ion_system_heap_unmap_dma(struct ion_heap *heap,
253 struct ion_buffer *buffer)
254{
Laura Abbottb14ed962012-01-30 14:18:08 -0800255 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700256}
257
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700258static struct ion_heap_ops system_heap_ops = {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700259 .allocate = ion_system_heap_allocate,
260 .free = ion_system_heap_free,
261 .map_dma = ion_system_heap_map_dma,
262 .unmap_dma = ion_system_heap_unmap_dma,
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800263 .map_kernel = ion_heap_map_kernel,
264 .unmap_kernel = ion_heap_unmap_kernel,
265 .map_user = ion_heap_map_user,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700266};
267
Olav Haugan85c95402012-05-30 17:32:37 -0700268struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700269{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700270 struct ion_system_heap *heap;
271 int i;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700272
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700273 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700274 if (!heap)
275 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700276 heap->heap.ops = &system_heap_ops;
277 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800278 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700279 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
280 GFP_KERNEL);
281 if (!heap->pools)
282 goto err_alloc_pools;
283 for (i = 0; i < num_orders; i++) {
284 struct ion_page_pool *pool;
Rebecca Schultz Zavinbff299e2012-10-02 22:43:41 -0700285 gfp_t gfp_flags = low_order_gfp_flags;
286
287 if (orders[i] > 4)
288 gfp_flags = high_order_gfp_flags;
289 pool = ion_page_pool_create(gfp_flags, orders[i]);
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700290 if (!pool)
291 goto err_create_pool;
292 heap->pools[i] = pool;
293 }
294 return &heap->heap;
295err_create_pool:
296 for (i = 0; i < num_orders; i++)
297 if (heap->pools[i])
298 ion_page_pool_destroy(heap->pools[i]);
299 kfree(heap->pools);
300err_alloc_pools:
301 kfree(heap);
302 return ERR_PTR(-ENOMEM);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700303}
304
305void ion_system_heap_destroy(struct ion_heap *heap)
306{
Rebecca Schultz Zavin943facc2012-08-06 21:37:23 -0700307 struct ion_system_heap *sys_heap = container_of(heap,
308 struct ion_system_heap,
309 heap);
310 int i;
311
312 for (i = 0; i < num_orders; i++)
313 ion_page_pool_destroy(sys_heap->pools[i]);
314 kfree(sys_heap->pools);
315 kfree(sys_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700316}
317
318static int ion_system_contig_heap_allocate(struct ion_heap *heap,
319 struct ion_buffer *buffer,
320 unsigned long len,
321 unsigned long align,
322 unsigned long flags)
323{
324 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
325 if (!buffer->priv_virt)
326 return -ENOMEM;
327 return 0;
328}
329
330void ion_system_contig_heap_free(struct ion_buffer *buffer)
331{
332 kfree(buffer->priv_virt);
333}
334
335static int ion_system_contig_heap_phys(struct ion_heap *heap,
336 struct ion_buffer *buffer,
337 ion_phys_addr_t *addr, size_t *len)
338{
339 *addr = virt_to_phys(buffer->priv_virt);
340 *len = buffer->size;
341 return 0;
342}
343
Laura Abbottb14ed962012-01-30 14:18:08 -0800344struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700345 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700346{
Laura Abbottb14ed962012-01-30 14:18:08 -0800347 struct sg_table *table;
348 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700349
Laura Abbottb14ed962012-01-30 14:18:08 -0800350 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
351 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700352 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800353 ret = sg_alloc_table(table, 1, GFP_KERNEL);
354 if (ret) {
355 kfree(table);
356 return ERR_PTR(ret);
357 }
358 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
359 0);
360 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700361}
362
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700363void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
364 struct ion_buffer *buffer)
365{
366 sg_free_table(buffer->sg_table);
367 kfree(buffer->sg_table);
368}
369
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700370static struct ion_heap_ops kmalloc_ops = {
371 .allocate = ion_system_contig_heap_allocate,
372 .free = ion_system_contig_heap_free,
373 .phys = ion_system_contig_heap_phys,
374 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700375 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rebecca Schultz Zavin3df181c2012-11-15 10:43:46 -0800376 .map_kernel = ion_heap_map_kernel,
377 .unmap_kernel = ion_heap_unmap_kernel,
Laura Abbottd59c2232013-04-17 17:19:40 -0700378 .map_user = ion_heap_map_user,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700379};
380
Olav Haugan85c95402012-05-30 17:32:37 -0700381struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700382{
383 struct ion_heap *heap;
384
385 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
386 if (!heap)
387 return ERR_PTR(-ENOMEM);
388 heap->ops = &kmalloc_ops;
389 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700390 return heap;
391}
392
393void ion_system_contig_heap_destroy(struct ion_heap *heap)
394{
395 kfree(heap);
396}
397