blob: b76f8982c2b1cfd229ff41351291298fd9b27b50 [file] [log] [blame]
Rebecca Schultz Zavind2ce6f82012-11-15 10:52:45 -08001/*
2 * drivers/gpu/ion/ion_chunk_heap.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16//#include <linux/spinlock.h>
17#include <linux/dma-mapping.h>
18#include <linux/err.h>
19#include <linux/genalloc.h>
20#include <linux/io.h>
21#include <linux/ion.h>
22#include <linux/mm.h>
23#include <linux/scatterlist.h>
24#include <linux/slab.h>
25#include <linux/vmalloc.h>
26#include "ion_priv.h"
27
28#include <asm/mach/map.h>
29
30struct ion_chunk_heap {
31 struct ion_heap heap;
32 struct gen_pool *pool;
33 ion_phys_addr_t base;
34 unsigned long chunk_size;
35 unsigned long size;
36 unsigned long allocated;
37};
38
39static int ion_chunk_heap_allocate(struct ion_heap *heap,
40 struct ion_buffer *buffer,
41 unsigned long size, unsigned long align,
42 unsigned long flags)
43{
44 struct ion_chunk_heap *chunk_heap =
45 container_of(heap, struct ion_chunk_heap, heap);
46 struct sg_table *table;
47 struct scatterlist *sg;
48 int ret, i;
49 unsigned long num_chunks;
50
51 if (ion_buffer_fault_user_mappings(buffer))
52 return -ENOMEM;
53
54 num_chunks = ALIGN(size, chunk_heap->chunk_size) /
55 chunk_heap->chunk_size;
56 buffer->size = num_chunks * chunk_heap->chunk_size;
57
58 if (buffer->size > chunk_heap->size - chunk_heap->allocated)
59 return -ENOMEM;
60
61 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
62 if (!table)
63 return -ENOMEM;
64 ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
65 if (ret) {
66 kfree(table);
67 return ret;
68 }
69
70 sg = table->sgl;
71 for (i = 0; i < num_chunks; i++) {
72 unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
73 chunk_heap->chunk_size);
74 if (!paddr)
75 goto err;
76 sg_set_page(sg, phys_to_page(paddr), chunk_heap->chunk_size, 0);
77 sg = sg_next(sg);
78 }
79
80 buffer->priv_virt = table;
81 chunk_heap->allocated += buffer->size;
82 return 0;
83err:
84 sg = table->sgl;
85 for (i -= 1; i >= 0; i--) {
86 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
87 sg_dma_len(sg));
88 sg = sg_next(sg);
89 }
90 sg_free_table(table);
91 kfree(table);
92 return -ENOMEM;
93}
94
95static void ion_chunk_heap_free(struct ion_buffer *buffer)
96{
97 struct ion_heap *heap = buffer->heap;
98 struct ion_chunk_heap *chunk_heap =
99 container_of(heap, struct ion_chunk_heap, heap);
100 struct sg_table *table = buffer->priv_virt;
101 struct scatterlist *sg;
102 int i;
103
Rebecca Schultz Zavinca12f5d2013-01-09 11:26:37 -0800104 ion_heap_buffer_zero(buffer);
105
Rebecca Schultz Zavind2ce6f82012-11-15 10:52:45 -0800106 for_each_sg(table->sgl, sg, table->nents, i) {
Rebecca Schultz Zavinf5dee362013-01-14 15:29:10 -0800107 if (ion_buffer_cached(buffer))
108 dma_sync_sg_for_device(NULL, sg, 1, DMA_BIDIRECTIONAL);
Rebecca Schultz Zavind2ce6f82012-11-15 10:52:45 -0800109 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
110 sg_dma_len(sg));
111 }
112 chunk_heap->allocated -= buffer->size;
113 sg_free_table(table);
114 kfree(table);
115}
116
117struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
118 struct ion_buffer *buffer)
119{
120 return buffer->priv_virt;
121}
122
123void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
124 struct ion_buffer *buffer)
125{
126 return;
127}
128
129static struct ion_heap_ops chunk_heap_ops = {
130 .allocate = ion_chunk_heap_allocate,
131 .free = ion_chunk_heap_free,
132 .map_dma = ion_chunk_heap_map_dma,
133 .unmap_dma = ion_chunk_heap_unmap_dma,
134 .map_user = ion_heap_map_user,
135 .map_kernel = ion_heap_map_kernel,
136 .unmap_kernel = ion_heap_unmap_kernel,
137};
138
139struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
140{
141 struct ion_chunk_heap *chunk_heap;
142 struct scatterlist sg;
143
144 chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
145 if (!chunk_heap)
146 return ERR_PTR(-ENOMEM);
147
148 chunk_heap->chunk_size = (unsigned long)heap_data->priv;
149 chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
150 PAGE_SHIFT, -1);
151 if (!chunk_heap->pool) {
152 kfree(chunk_heap);
153 return ERR_PTR(-ENOMEM);
154 }
155 chunk_heap->base = heap_data->base;
156 chunk_heap->size = heap_data->size;
157 chunk_heap->allocated = 0;
158
159 sg_init_table(&sg, 1);
160 sg_set_page(&sg, phys_to_page(heap_data->base), heap_data->size, 0);
161 dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);
162 gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
163 chunk_heap->heap.ops = &chunk_heap_ops;
164 chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
Rebecca Schultz Zavin618d6be2013-02-13 14:48:11 -0800165 chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
Rebecca Schultz Zavind2ce6f82012-11-15 10:52:45 -0800166 pr_info("%s: base %pa size %zd align %pa\n", __func__,
167 &chunk_heap->base, heap_data->size, &heap_data->align);
168
169 return &chunk_heap->heap;
170}
171
172void ion_chunk_heap_destroy(struct ion_heap *heap)
173{
174 struct ion_chunk_heap *chunk_heap =
175 container_of(heap, struct ion_chunk_heap, heap);
176
177 gen_pool_destroy(chunk_heap->pool);
178 kfree(chunk_heap);
179 chunk_heap = NULL;
180}