blob: 4759e40367018808aedf3769f4b4abc8e842e996 [file] [log] [blame]
Laura Abbottf8a269c2013-04-01 16:26:00 -07001/*
2 * drivers/gpu/ion/ion_removed_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
18
19#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
22#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/seq_file.h>
28#include "ion_priv.h"
29
30#include <asm/mach/map.h>
31#include <asm/cacheflush.h>
32#include <linux/msm_ion.h>
33
34struct ion_removed_heap {
35 struct ion_heap heap;
36 struct gen_pool *pool;
37 ion_phys_addr_t base;
38 unsigned long allocated_bytes;
39 unsigned long total_size;
40 int (*request_region)(void *);
41 int (*release_region)(void *);
42 atomic_t map_count;
43 void *bus_id;
44 unsigned int has_outer_cache;
45};
46
47ion_phys_addr_t ion_removed_allocate(struct ion_heap *heap,
48 unsigned long size,
49 unsigned long align)
50{
51 struct ion_removed_heap *removed_heap =
52 container_of(heap, struct ion_removed_heap, heap);
53 unsigned long offset = gen_pool_alloc_aligned(removed_heap->pool,
54 size, ilog2(align));
55
56 if (!offset) {
57 if ((removed_heap->total_size -
58 removed_heap->allocated_bytes) >= size)
59 pr_debug("%s: heap %s has enough memory (%lx) but the allocation of size %lx still failed. Memory is probably fragmented.",
60 __func__, heap->name,
61 removed_heap->total_size -
62 removed_heap->allocated_bytes, size);
63 return ION_CARVEOUT_ALLOCATE_FAIL;
64 }
65
66 removed_heap->allocated_bytes += size;
67 return offset;
68}
69
70void ion_removed_free(struct ion_heap *heap, ion_phys_addr_t addr,
71 unsigned long size)
72{
73 struct ion_removed_heap *removed_heap =
74 container_of(heap, struct ion_removed_heap, heap);
75
76 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
77 return;
78 gen_pool_free(removed_heap->pool, addr, size);
79 removed_heap->allocated_bytes -= size;
80}
81
82static int ion_removed_heap_phys(struct ion_heap *heap,
83 struct ion_buffer *buffer,
84 ion_phys_addr_t *addr, size_t *len)
85{
86 *addr = buffer->priv_phys;
87 *len = buffer->size;
88 return 0;
89}
90
91static int ion_removed_heap_allocate(struct ion_heap *heap,
92 struct ion_buffer *buffer,
93 unsigned long size, unsigned long align,
94 unsigned long flags)
95{
96 buffer->priv_phys = ion_removed_allocate(heap, size, align);
97 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
98}
99
100static void ion_removed_heap_free(struct ion_buffer *buffer)
101{
102 struct ion_heap *heap = buffer->heap;
103
104 ion_removed_free(heap, buffer->priv_phys, buffer->size);
105 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
106}
107
108struct sg_table *ion_removed_heap_map_dma(struct ion_heap *heap,
109 struct ion_buffer *buffer)
110{
111 struct sg_table *table;
112 int ret;
113
114 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
115 if (!table)
116 return ERR_PTR(-ENOMEM);
117
118 ret = sg_alloc_table(table, 1, GFP_KERNEL);
119 if (ret)
120 goto err0;
121
122 table->sgl->length = buffer->size;
123 table->sgl->offset = 0;
124 table->sgl->dma_address = buffer->priv_phys;
125
126 return table;
127
128err0:
129 kfree(table);
130 return ERR_PTR(ret);
131}
132
133void ion_removed_heap_unmap_dma(struct ion_heap *heap,
134 struct ion_buffer *buffer)
135{
136 if (buffer->sg_table)
137 sg_free_table(buffer->sg_table);
138 kfree(buffer->sg_table);
139 buffer->sg_table = 0;
140}
141
142static int ion_removed_request_region(struct ion_removed_heap *removed_heap)
143{
144 int ret_value = 0;
145 if (atomic_inc_return(&removed_heap->map_count) == 1) {
146 if (removed_heap->request_region) {
147 ret_value = removed_heap->request_region(
148 removed_heap->bus_id);
149 if (ret_value) {
150 pr_err("Unable to request SMI region");
151 atomic_dec(&removed_heap->map_count);
152 }
153 }
154 }
155 return ret_value;
156}
157
158static int ion_removed_release_region(struct ion_removed_heap *removed_heap)
159{
160 int ret_value = 0;
161 if (atomic_dec_and_test(&removed_heap->map_count)) {
162 if (removed_heap->release_region) {
163 ret_value = removed_heap->release_region(
164 removed_heap->bus_id);
165 if (ret_value)
166 pr_err("Unable to release SMI region");
167 }
168 }
169 return ret_value;
170}
171
172void *ion_removed_heap_map_kernel(struct ion_heap *heap,
173 struct ion_buffer *buffer)
174{
175 struct ion_removed_heap *removed_heap =
176 container_of(heap, struct ion_removed_heap, heap);
177 void *ret_value;
178
179 if (ion_removed_request_region(removed_heap))
180 return NULL;
181
182 if (ION_IS_CACHED(buffer->flags))
183 ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
184 else
185 ret_value = ioremap(buffer->priv_phys, buffer->size);
186
187 if (!ret_value)
188 ion_removed_release_region(removed_heap);
189 return ret_value;
190}
191
192void ion_removed_heap_unmap_kernel(struct ion_heap *heap,
193 struct ion_buffer *buffer)
194{
195 struct ion_removed_heap *removed_heap =
196 container_of(heap, struct ion_removed_heap, heap);
197
198 __arm_iounmap(buffer->vaddr);
199 buffer->vaddr = NULL;
200
201 ion_removed_release_region(removed_heap);
202 return;
203}
204
205int ion_removed_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
206 struct vm_area_struct *vma)
207{
208 struct ion_removed_heap *removed_heap =
209 container_of(heap, struct ion_removed_heap, heap);
210 int ret_value = 0;
211
212 if (ion_removed_request_region(removed_heap))
213 return -EINVAL;
214
215 if (!ION_IS_CACHED(buffer->flags))
216 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
217
218 ret_value = remap_pfn_range(vma, vma->vm_start,
219 __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
220 vma->vm_end - vma->vm_start,
221 vma->vm_page_prot);
222
223 if (ret_value)
224 ion_removed_release_region(removed_heap);
225 return ret_value;
226}
227
228void ion_removed_heap_unmap_user(struct ion_heap *heap,
229 struct ion_buffer *buffer)
230{
231 struct ion_removed_heap *removed_heap =
232 container_of(heap, struct ion_removed_heap, heap);
233 ion_removed_release_region(removed_heap);
234}
235
236int ion_removed_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
237 void *vaddr, unsigned int offset, unsigned int length,
238 unsigned int cmd)
239{
240 void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
241 struct ion_removed_heap *removed_heap =
242 container_of(heap, struct ion_removed_heap, heap);
243 unsigned int size_to_vmap, total_size;
244 int i, j;
245 void *ptr = NULL;
246 ion_phys_addr_t buff_phys = buffer->priv_phys;
247
248 if (!vaddr) {
249 /*
250 * Split the vmalloc space into smaller regions in
251 * order to clean and/or invalidate the cache.
252 */
253 size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
254 total_size = buffer->size;
255
256 for (i = 0; i < total_size; i += size_to_vmap) {
257 size_to_vmap = min(size_to_vmap, total_size - i);
258 for (j = 0; j < 10 && size_to_vmap; ++j) {
259 ptr = ioremap(buff_phys, size_to_vmap);
260 if (ptr) {
261 switch (cmd) {
262 case ION_IOC_CLEAN_CACHES:
263 dmac_clean_range(ptr,
264 ptr + size_to_vmap);
265 outer_cache_op =
266 outer_clean_range;
267 break;
268 case ION_IOC_INV_CACHES:
269 dmac_inv_range(ptr,
270 ptr + size_to_vmap);
271 outer_cache_op =
272 outer_inv_range;
273 break;
274 case ION_IOC_CLEAN_INV_CACHES:
275 dmac_flush_range(ptr,
276 ptr + size_to_vmap);
277 outer_cache_op =
278 outer_flush_range;
279 break;
280 default:
281 return -EINVAL;
282 }
283 buff_phys += size_to_vmap;
284 break;
285 } else {
286 size_to_vmap >>= 1;
287 }
288 }
289 if (!ptr) {
290 pr_err("Couldn't io-remap the memory\n");
291 return -EINVAL;
292 }
293 iounmap(ptr);
294 }
295 } else {
296 switch (cmd) {
297 case ION_IOC_CLEAN_CACHES:
298 dmac_clean_range(vaddr, vaddr + length);
299 outer_cache_op = outer_clean_range;
300 break;
301 case ION_IOC_INV_CACHES:
302 dmac_inv_range(vaddr, vaddr + length);
303 outer_cache_op = outer_inv_range;
304 break;
305 case ION_IOC_CLEAN_INV_CACHES:
306 dmac_flush_range(vaddr, vaddr + length);
307 outer_cache_op = outer_flush_range;
308 break;
309 default:
310 return -EINVAL;
311 }
312 }
313
314 if (removed_heap->has_outer_cache) {
315 unsigned long pstart = buffer->priv_phys + offset;
316 outer_cache_op(pstart, pstart + length);
317 }
318 return 0;
319}
320
321static int ion_removed_print_debug(struct ion_heap *heap, struct seq_file *s,
322 const struct rb_root *mem_map)
323{
324 struct ion_removed_heap *removed_heap =
325 container_of(heap, struct ion_removed_heap, heap);
326
327 seq_printf(s, "total bytes currently allocated: %lx\n",
328 removed_heap->allocated_bytes);
329 seq_printf(s, "total heap size: %lx\n", removed_heap->total_size);
330
331 if (mem_map) {
332 unsigned long base = removed_heap->base;
333 unsigned long size = removed_heap->total_size;
334 unsigned long end = base+size;
335 unsigned long last_end = base;
336 struct rb_node *n;
337
338 seq_printf(s, "\nMemory Map\n");
339 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
340 "client", "start address", "end address",
341 "size (hex)");
342
343 for (n = rb_first(mem_map); n; n = rb_next(n)) {
344 struct mem_map_data *data =
345 rb_entry(n, struct mem_map_data, node);
346 const char *client_name = "(null)";
347
348 if (last_end < data->addr) {
349 phys_addr_t da;
350
351 da = data->addr-1;
352 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
353 "FREE", &last_end, &da,
354 data->addr-last_end,
355 data->addr-last_end);
356 }
357
358 if (data->client_name)
359 client_name = data->client_name;
360
361 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
362 client_name, &data->addr,
363 &data->addr_end,
364 data->size, data->size);
365 last_end = data->addr_end+1;
366 }
367 if (last_end < end) {
368 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
369 last_end, end-1, end-last_end, end-last_end);
370 }
371 }
372 return 0;
373}
374
375static struct ion_heap_ops removed_heap_ops = {
376 .allocate = ion_removed_heap_allocate,
377 .free = ion_removed_heap_free,
378 .phys = ion_removed_heap_phys,
379 .map_user = ion_removed_heap_map_user,
380 .map_kernel = ion_removed_heap_map_kernel,
381 .unmap_user = ion_removed_heap_unmap_user,
382 .unmap_kernel = ion_removed_heap_unmap_kernel,
383 .map_dma = ion_removed_heap_map_dma,
384 .unmap_dma = ion_removed_heap_unmap_dma,
385 .cache_op = ion_removed_cache_ops,
386 .print_debug = ion_removed_print_debug,
387};
388
389struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *heap_data)
390{
391 struct ion_removed_heap *removed_heap;
392 int ret;
393
394 removed_heap = kzalloc(sizeof(struct ion_removed_heap), GFP_KERNEL);
395 if (!removed_heap)
396 return ERR_PTR(-ENOMEM);
397
398 removed_heap->pool = gen_pool_create(12, -1);
399 if (!removed_heap->pool) {
400 kfree(removed_heap);
401 return ERR_PTR(-ENOMEM);
402 }
403 removed_heap->base = heap_data->base;
404 ret = gen_pool_add(removed_heap->pool, removed_heap->base,
405 heap_data->size, -1);
406 if (ret < 0) {
407 gen_pool_destroy(removed_heap->pool);
408 kfree(removed_heap);
409 return ERR_PTR(-EINVAL);
410 }
411 removed_heap->heap.ops = &removed_heap_ops;
412 removed_heap->heap.type = ION_HEAP_TYPE_REMOVED;
413 removed_heap->allocated_bytes = 0;
414 removed_heap->total_size = heap_data->size;
415 removed_heap->has_outer_cache = heap_data->has_outer_cache;
416
417 if (heap_data->extra_data) {
418 struct ion_co_heap_pdata *extra_data =
419 heap_data->extra_data;
420
421 if (extra_data->setup_region)
422 removed_heap->bus_id = extra_data->setup_region();
423 if (extra_data->request_region)
424 removed_heap->request_region =
425 extra_data->request_region;
426 if (extra_data->release_region)
427 removed_heap->release_region =
428 extra_data->release_region;
429 }
430 return &removed_heap->heap;
431}
432
433void ion_removed_heap_destroy(struct ion_heap *heap)
434{
435 struct ion_removed_heap *removed_heap =
436 container_of(heap, struct ion_removed_heap, heap);
437
438 gen_pool_destroy(removed_heap->pool);
439 kfree(removed_heap);
440 removed_heap = NULL;
441}