blob: 84d8d37317d1d538a36937155663c156a86d6bc7 [file] [log] [blame]
Laura Abbottf8a269c2013-04-01 16:26:00 -07001/*
2 * drivers/gpu/ion/ion_removed_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
18
19#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
22#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/seq_file.h>
28#include "ion_priv.h"
29
30#include <asm/mach/map.h>
31#include <asm/cacheflush.h>
32#include <linux/msm_ion.h>
33
34struct ion_removed_heap {
35 struct ion_heap heap;
36 struct gen_pool *pool;
37 ion_phys_addr_t base;
38 unsigned long allocated_bytes;
39 unsigned long total_size;
40 int (*request_region)(void *);
41 int (*release_region)(void *);
42 atomic_t map_count;
43 void *bus_id;
Laura Abbottf8a269c2013-04-01 16:26:00 -070044};
45
46ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap,
47 unsigned long size,
48 unsigned long align)
49{
50 struct ion_removed_heap *removed_heap =
51 container_of(heap, struct ion_removed_heap, heap);
52 unsigned long offset = gen_pool_alloc_aligned(removed_heap->pool,
53 size, ilog2(align));
54
55 if (!offset) {
56 if ((removed_heap->total_size -
57 removed_heap->allocated_bytes) >= size)
58 pr_debug("%s: heap %s has enough memory (%lx) but the allocation of size %lx still failed. Memory is probably fragmented.",
59 __func__, heap->name,
60 removed_heap->total_size -
61 removed_heap->allocated_bytes, size);
62 return ION_CARVEOUT_ALLOCATE_FAIL;
63 }
64
65 removed_heap->allocated_bytes += size;
66 return offset;
67}
68
69void ion_removed_free(struct ion_heap *heap, ion_phys_addr_t addr,
70 unsigned long size)
71{
72 struct ion_removed_heap *removed_heap =
73 container_of(heap, struct ion_removed_heap, heap);
74
75 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
76 return;
77 gen_pool_free(removed_heap->pool, addr, size);
78 removed_heap->allocated_bytes -= size;
79}
80
81static int ion_removed_heap_phys(struct ion_heap *heap,
82 struct ion_buffer *buffer,
83 ion_phys_addr_t *addr, size_t *len)
84{
85 *addr = buffer->priv_phys;
86 *len = buffer->size;
87 return 0;
88}
89
90static int ion_removed_heap_allocate(struct ion_heap *heap,
91 struct ion_buffer *buffer,
92 unsigned long size, unsigned long align,
93 unsigned long flags)
94{
95 buffer->priv_phys = ion_removed_allocate(heap, size, align);
96 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
97}
98
99static void ion_removed_heap_free(struct ion_buffer *buffer)
100{
101 struct ion_heap *heap = buffer->heap;
102
103 ion_removed_free(heap, buffer->priv_phys, buffer->size);
104 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
105}
106
107struct sg_table *ion_removed_heap_map_dma(struct ion_heap *heap,
108 struct ion_buffer *buffer)
109{
110 struct sg_table *table;
111 int ret;
112
113 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
114 if (!table)
115 return ERR_PTR(-ENOMEM);
116
117 ret = sg_alloc_table(table, 1, GFP_KERNEL);
118 if (ret)
119 goto err0;
120
121 table->sgl->length = buffer->size;
122 table->sgl->offset = 0;
123 table->sgl->dma_address = buffer->priv_phys;
124
125 return table;
126
127err0:
128 kfree(table);
129 return ERR_PTR(ret);
130}
131
132void ion_removed_heap_unmap_dma(struct ion_heap *heap,
133 struct ion_buffer *buffer)
134{
135 if (buffer->sg_table)
136 sg_free_table(buffer->sg_table);
137 kfree(buffer->sg_table);
138 buffer->sg_table = 0;
139}
140
141static int ion_removed_request_region(struct ion_removed_heap *removed_heap)
142{
143 int ret_value = 0;
144 if (atomic_inc_return(&removed_heap->map_count) == 1) {
145 if (removed_heap->request_region) {
146 ret_value = removed_heap->request_region(
147 removed_heap->bus_id);
148 if (ret_value) {
149 pr_err("Unable to request SMI region");
150 atomic_dec(&removed_heap->map_count);
151 }
152 }
153 }
154 return ret_value;
155}
156
157static int ion_removed_release_region(struct ion_removed_heap *removed_heap)
158{
159 int ret_value = 0;
160 if (atomic_dec_and_test(&removed_heap->map_count)) {
161 if (removed_heap->release_region) {
162 ret_value = removed_heap->release_region(
163 removed_heap->bus_id);
164 if (ret_value)
165 pr_err("Unable to release SMI region");
166 }
167 }
168 return ret_value;
169}
170
171void *ion_removed_heap_map_kernel(struct ion_heap *heap,
172 struct ion_buffer *buffer)
173{
174 struct ion_removed_heap *removed_heap =
175 container_of(heap, struct ion_removed_heap, heap);
176 void *ret_value;
177
178 if (ion_removed_request_region(removed_heap))
179 return NULL;
180
181 if (ION_IS_CACHED(buffer->flags))
182 ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
183 else
184 ret_value = ioremap(buffer->priv_phys, buffer->size);
185
186 if (!ret_value)
187 ion_removed_release_region(removed_heap);
188 return ret_value;
189}
190
191void ion_removed_heap_unmap_kernel(struct ion_heap *heap,
192 struct ion_buffer *buffer)
193{
194 struct ion_removed_heap *removed_heap =
195 container_of(heap, struct ion_removed_heap, heap);
196
197 __arm_iounmap(buffer->vaddr);
198 buffer->vaddr = NULL;
199
200 ion_removed_release_region(removed_heap);
201 return;
202}
203
204int ion_removed_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
205 struct vm_area_struct *vma)
206{
207 struct ion_removed_heap *removed_heap =
208 container_of(heap, struct ion_removed_heap, heap);
209 int ret_value = 0;
210
211 if (ion_removed_request_region(removed_heap))
212 return -EINVAL;
213
214 if (!ION_IS_CACHED(buffer->flags))
215 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
216
217 ret_value = remap_pfn_range(vma, vma->vm_start,
218 __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
219 vma->vm_end - vma->vm_start,
220 vma->vm_page_prot);
221
222 if (ret_value)
223 ion_removed_release_region(removed_heap);
224 return ret_value;
225}
226
227void ion_removed_heap_unmap_user(struct ion_heap *heap,
228 struct ion_buffer *buffer)
229{
230 struct ion_removed_heap *removed_heap =
231 container_of(heap, struct ion_removed_heap, heap);
232 ion_removed_release_region(removed_heap);
233}
234
Laura Abbottf8a269c2013-04-01 16:26:00 -0700235static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
236 const struct rb_root *mem_map)
237{
238 struct ion_removed_heap *removed_heap =
239 container_of(heap, struct ion_removed_heap, heap);
240
241 seq_printf(s, "total bytes currently allocated: %lx\n",
242 removed_heap->allocated_bytes);
243 seq_printf(s, "total heap size: %lx\n", removed_heap->total_size);
244
245 if (mem_map) {
246 unsigned long base = removed_heap->base;
247 unsigned long size = removed_heap->total_size;
248 unsigned long end = base+size;
249 unsigned long last_end = base;
250 struct rb_node *n;
251
252 seq_printf(s, "\nMemory Map\n");
253 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
254 "client", "start address", "end address",
255 "size (hex)");
256
257 for (n = rb_first(mem_map); n; n = rb_next(n)) {
258 struct mem_map_data *data =
259 rb_entry(n, struct mem_map_data, node);
260 const char *client_name = "(null)";
261
262 if (last_end < data->addr) {
263 phys_addr_t da;
264
265 da = data->addr-1;
266 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
267 "FREE", &last_end, &da,
268 data->addr-last_end,
269 data->addr-last_end);
270 }
271
272 if (data->client_name)
273 client_name = data->client_name;
274
275 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
276 client_name, &data->addr,
277 &data->addr_end,
278 data->size, data->size);
279 last_end = data->addr_end+1;
280 }
281 if (last_end < end) {
282 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
283 last_end, end-1, end-last_end, end-last_end);
284 }
285 }
286 return 0;
287}
288
289static struct ion_heap_ops removed_heap_ops = {
290 .allocate = ion_removed_heap_allocate,
291 .free = ion_removed_heap_free,
292 .phys = ion_removed_heap_phys,
293 .map_user = ion_removed_heap_map_user,
294 .map_kernel = ion_removed_heap_map_kernel,
295 .unmap_user = ion_removed_heap_unmap_user,
296 .unmap_kernel = ion_removed_heap_unmap_kernel,
297 .map_dma = ion_removed_heap_map_dma,
298 .unmap_dma = ion_removed_heap_unmap_dma,
Laura Abbottf8a269c2013-04-01 16:26:00 -0700299 .print_debug = ion_removed_print_debug,
300};
301
302struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *heap_data)
303{
304 struct ion_removed_heap *removed_heap;
305 int ret;
306
307 removed_heap = kzalloc(sizeof(struct ion_removed_heap), GFP_KERNEL);
308 if (!removed_heap)
309 return ERR_PTR(-ENOMEM);
310
311 removed_heap->pool = gen_pool_create(12, -1);
312 if (!removed_heap->pool) {
313 kfree(removed_heap);
314 return ERR_PTR(-ENOMEM);
315 }
316 removed_heap->base = heap_data->base;
317 ret = gen_pool_add(removed_heap->pool, removed_heap->base,
318 heap_data->size, -1);
319 if (ret < 0) {
320 gen_pool_destroy(removed_heap->pool);
321 kfree(removed_heap);
322 return ERR_PTR(-EINVAL);
323 }
324 removed_heap->heap.ops = &removed_heap_ops;
325 removed_heap->heap.type = ION_HEAP_TYPE_REMOVED;
326 removed_heap->allocated_bytes = 0;
327 removed_heap->total_size = heap_data->size;
Laura Abbottf8a269c2013-04-01 16:26:00 -0700328
329 if (heap_data->extra_data) {
330 struct ion_co_heap_pdata *extra_data =
331 heap_data->extra_data;
332
333 if (extra_data->setup_region)
334 removed_heap->bus_id = extra_data->setup_region();
335 if (extra_data->request_region)
336 removed_heap->request_region =
337 extra_data->request_region;
338 if (extra_data->release_region)
339 removed_heap->release_region =
340 extra_data->release_region;
341 }
342 return &removed_heap->heap;
343}
344
345void ion_removed_heap_destroy(struct ion_heap *heap)
346{
347 struct ion_removed_heap *removed_heap =
348 container_of(heap, struct ion_removed_heap, heap);
349
350 gen_pool_destroy(removed_heap->pool);
351 kfree(removed_heap);
352 removed_heap = NULL;
353}