blob: aeffb52a73a7d672c20d456d058fbf3c7e8615a3 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
18
19#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
22#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070029#include "ion_priv.h"
30
Laura Abbott8c017362011-09-22 20:59:12 -070031#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070032#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070033#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070034#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070035
36struct ion_carveout_heap {
37 struct ion_heap heap;
38 struct gen_pool *pool;
39 ion_phys_addr_t base;
Laura Abbott68c80642011-10-21 17:32:27 -070040 unsigned long allocated_bytes;
41 unsigned long total_size;
Olav Haugan85c95402012-05-30 17:32:37 -070042 unsigned int has_outer_cache;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070043};
44
45ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
46 unsigned long size,
47 unsigned long align)
48{
49 struct ion_carveout_heap *carveout_heap =
50 container_of(heap, struct ion_carveout_heap, heap);
Laura Abbott5ee39212011-10-11 10:12:40 -070051 unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool,
52 size, ilog2(align));
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070053
Laura Abbottd16dd602011-10-31 13:51:10 -070054 if (!offset) {
55 if ((carveout_heap->total_size -
Olav Haugan739e5182012-04-19 14:14:43 -070056 carveout_heap->allocated_bytes) >= size)
Laura Abbottd16dd602011-10-31 13:51:10 -070057 pr_debug("%s: heap %s has enough memory (%lx) but"
58 " the allocation of size %lx still failed."
59 " Memory is probably fragmented.",
60 __func__, heap->name,
61 carveout_heap->total_size -
62 carveout_heap->allocated_bytes, size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070063 return ION_CARVEOUT_ALLOCATE_FAIL;
Laura Abbottd16dd602011-10-31 13:51:10 -070064 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070065
Laura Abbott68c80642011-10-21 17:32:27 -070066 carveout_heap->allocated_bytes += size;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067 return offset;
68}
69
70void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
71 unsigned long size)
72{
73 struct ion_carveout_heap *carveout_heap =
74 container_of(heap, struct ion_carveout_heap, heap);
75
76 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
77 return;
78 gen_pool_free(carveout_heap->pool, addr, size);
Laura Abbott68c80642011-10-21 17:32:27 -070079 carveout_heap->allocated_bytes -= size;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070080}
81
82static int ion_carveout_heap_phys(struct ion_heap *heap,
83 struct ion_buffer *buffer,
84 ion_phys_addr_t *addr, size_t *len)
85{
86 *addr = buffer->priv_phys;
87 *len = buffer->size;
88 return 0;
89}
90
91static int ion_carveout_heap_allocate(struct ion_heap *heap,
92 struct ion_buffer *buffer,
93 unsigned long size, unsigned long align,
94 unsigned long flags)
95{
96 buffer->priv_phys = ion_carveout_allocate(heap, size, align);
97 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
98}
99
100static void ion_carveout_heap_free(struct ion_buffer *buffer)
101{
102 struct ion_heap *heap = buffer->heap;
103
104 ion_carveout_free(heap, buffer->priv_phys, buffer->size);
105 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
106}
107
Laura Abbottb14ed962012-01-30 14:18:08 -0800108struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700109 struct ion_buffer *buffer)
110{
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800111 size_t chunk_size = buffer->size;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600112
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800113 if (ION_IS_CACHED(buffer->flags))
114 chunk_size = PAGE_SIZE;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600115
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800116 return ion_create_chunked_sg_table(buffer->priv_phys, chunk_size,
117 buffer->size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700118}
119
120void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
121 struct ion_buffer *buffer)
122{
Laura Abbottb14ed962012-01-30 14:18:08 -0800123 if (buffer->sg_table)
124 sg_free_table(buffer->sg_table);
125 kfree(buffer->sg_table);
126 buffer->sg_table = 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700127}
128
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700129void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800130 struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700131{
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800132 void *ret_value;
Alex Bird8a3ede32011-11-07 12:33:42 -0800133
Laura Abbottb14ed962012-01-30 14:18:08 -0800134 if (ION_IS_CACHED(buffer->flags))
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800135 ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
Laura Abbotte8f28022011-09-28 14:40:40 -0700136 else
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800137 ret_value = ioremap(buffer->priv_phys, buffer->size);
138
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800139 return ret_value;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700140}
141
142void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
143 struct ion_buffer *buffer)
144{
Colin Cross9a826402012-04-04 13:50:43 -0700145 __arm_iounmap(buffer->vaddr);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700146 buffer->vaddr = NULL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800147
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700148 return;
149}
150
151int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800152 struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700153{
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800154 int ret_value = 0;
Alex Bird8a3ede32011-11-07 12:33:42 -0800155
Laura Abbottb14ed962012-01-30 14:18:08 -0800156 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800157 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
158
159 ret_value = remap_pfn_range(vma, vma->vm_start,
160 __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
161 vma->vm_end - vma->vm_start,
162 vma->vm_page_prot);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800163
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800164 return ret_value;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700165}
166
Laura Abbottabcb6f72011-10-04 16:26:49 -0700167int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
168 void *vaddr, unsigned int offset, unsigned int length,
169 unsigned int cmd)
170{
Neeti Desai3f3c2822013-03-08 17:29:53 -0800171 void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
Olav Haugan85c95402012-05-30 17:32:37 -0700172 struct ion_carveout_heap *carveout_heap =
173 container_of(heap, struct ion_carveout_heap, heap);
Neeti Desai3f3c2822013-03-08 17:29:53 -0800174 unsigned int size_to_vmap, total_size;
175 int i, j;
176 void *ptr = NULL;
177 ion_phys_addr_t buff_phys = buffer->priv_phys;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700178
Neeti Desai3f3c2822013-03-08 17:29:53 -0800179 if (!vaddr) {
180 /*
181 * Split the vmalloc space into smaller regions in
182 * order to clean and/or invalidate the cache.
183 */
184 size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
185 total_size = buffer->size;
186
187 for (i = 0; i < total_size; i += size_to_vmap) {
188 size_to_vmap = min(size_to_vmap, total_size - i);
189 for (j = 0; j < 10 && size_to_vmap; ++j) {
190 ptr = ioremap(buff_phys, size_to_vmap);
191 if (ptr) {
192 switch (cmd) {
193 case ION_IOC_CLEAN_CACHES:
194 dmac_clean_range(ptr,
195 ptr + size_to_vmap);
196 outer_cache_op =
197 outer_clean_range;
198 break;
199 case ION_IOC_INV_CACHES:
200 dmac_inv_range(ptr,
201 ptr + size_to_vmap);
202 outer_cache_op =
203 outer_inv_range;
204 break;
205 case ION_IOC_CLEAN_INV_CACHES:
206 dmac_flush_range(ptr,
207 ptr + size_to_vmap);
208 outer_cache_op =
209 outer_flush_range;
210 break;
211 default:
212 return -EINVAL;
213 }
214 buff_phys += size_to_vmap;
215 break;
216 } else {
217 size_to_vmap >>= 1;
218 }
219 }
220 if (!ptr) {
221 pr_err("Couldn't io-remap the memory\n");
222 return -EINVAL;
223 }
224 iounmap(ptr);
225 }
226 } else {
227 switch (cmd) {
228 case ION_IOC_CLEAN_CACHES:
229 dmac_clean_range(vaddr, vaddr + length);
230 outer_cache_op = outer_clean_range;
231 break;
232 case ION_IOC_INV_CACHES:
233 dmac_inv_range(vaddr, vaddr + length);
234 outer_cache_op = outer_inv_range;
235 break;
236 case ION_IOC_CLEAN_INV_CACHES:
237 dmac_flush_range(vaddr, vaddr + length);
238 outer_cache_op = outer_flush_range;
239 break;
240 default:
241 return -EINVAL;
242 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700243 }
244
Olav Haugan85c95402012-05-30 17:32:37 -0700245 if (carveout_heap->has_outer_cache) {
246 unsigned long pstart = buffer->priv_phys + offset;
247 outer_cache_op(pstart, pstart + length);
248 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700249 return 0;
250}
251
Olav Haugan0671b9a2012-05-25 11:58:56 -0700252static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
253 const struct rb_root *mem_map)
Laura Abbott68c80642011-10-21 17:32:27 -0700254{
255 struct ion_carveout_heap *carveout_heap =
256 container_of(heap, struct ion_carveout_heap, heap);
257
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800258 seq_printf(s, "total bytes currently allocated: %lx\n",
259 carveout_heap->allocated_bytes);
260 seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
Laura Abbott68c80642011-10-21 17:32:27 -0700261
Olav Haugan0671b9a2012-05-25 11:58:56 -0700262 if (mem_map) {
263 unsigned long base = carveout_heap->base;
264 unsigned long size = carveout_heap->total_size;
265 unsigned long end = base+size;
266 unsigned long last_end = base;
267 struct rb_node *n;
268
269 seq_printf(s, "\nMemory Map\n");
270 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
271 "client", "start address", "end address",
272 "size (hex)");
273
274 for (n = rb_first(mem_map); n; n = rb_next(n)) {
275 struct mem_map_data *data =
276 rb_entry(n, struct mem_map_data, node);
277 const char *client_name = "(null)";
278
279 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700280 phys_addr_t da;
281
282 da = data->addr-1;
283 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
284 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700285 data->addr-last_end,
286 data->addr-last_end);
287 }
288
289 if (data->client_name)
290 client_name = data->client_name;
291
Laura Abbott1135c9e2013-03-13 15:33:40 -0700292 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
293 client_name, &data->addr,
294 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700295 data->size, data->size);
296 last_end = data->addr_end+1;
297 }
298 if (last_end < end) {
299 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
300 last_end, end-1, end-last_end, end-last_end);
301 }
302 }
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800303 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700304}
305
Laura Abbott8c017362011-09-22 20:59:12 -0700306int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
307 struct ion_iommu_map *data,
308 unsigned int domain_num,
309 unsigned int partition_num,
310 unsigned long align,
311 unsigned long iova_length,
312 unsigned long flags)
313{
Laura Abbott8c017362011-09-22 20:59:12 -0700314 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700315 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700316 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700317 struct scatterlist *sglist = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700318 int prot = IOMMU_WRITE | IOMMU_READ;
319 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700320
321 data->mapped_size = iova_length;
322
323 if (!msm_use_iommu()) {
324 data->iova_addr = buffer->priv_phys;
325 return 0;
326 }
327
328 extra = iova_length - buffer->size;
329
Laura Abbottd01221b2012-05-16 17:52:49 -0700330 ret = msm_allocate_iova_address(domain_num, partition_num,
331 data->mapped_size, align,
332 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700333
Laura Abbottd01221b2012-05-16 17:52:49 -0700334 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700335 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700336
337 domain = msm_get_iommu_domain(domain_num);
338
339 if (!domain) {
340 ret = -ENOMEM;
341 goto out1;
342 }
343
Olav Haugan16cdb412012-03-27 13:02:17 -0700344 sglist = vmalloc(sizeof(*sglist));
345 if (!sglist)
346 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700347
Olav Haugan16cdb412012-03-27 13:02:17 -0700348 sg_init_table(sglist, 1);
349 sglist->length = buffer->size;
350 sglist->offset = 0;
351 sglist->dma_address = buffer->priv_phys;
352
353 ret = iommu_map_range(domain, data->iova_addr, sglist,
354 buffer->size, prot);
355 if (ret) {
356 pr_err("%s: could not map %lx in domain %p\n",
357 __func__, data->iova_addr, domain);
358 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700359 }
360
Olav Haugan16cdb412012-03-27 13:02:17 -0700361 if (extra) {
362 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800363 unsigned long phys_addr = sg_phys(sglist);
364 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
365 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700366 if (ret)
367 goto out2;
368 }
369 vfree(sglist);
370 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700371
372out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700373 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700374out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700375 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700376 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
377 data->mapped_size);
378
379out:
380
381 return ret;
382}
383
384void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
385{
Laura Abbott8c017362011-09-22 20:59:12 -0700386 unsigned int domain_num;
387 unsigned int partition_num;
388 struct iommu_domain *domain;
389
390 if (!msm_use_iommu())
391 return;
392
393 domain_num = iommu_map_domain(data);
394 partition_num = iommu_map_partition(data);
395
396 domain = msm_get_iommu_domain(domain_num);
397
398 if (!domain) {
399 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
400 return;
401 }
402
Olav Haugan16cdb412012-03-27 13:02:17 -0700403 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700404 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
405 data->mapped_size);
406
407 return;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700408}
409
410static struct ion_heap_ops carveout_heap_ops = {
411 .allocate = ion_carveout_heap_allocate,
412 .free = ion_carveout_heap_free,
413 .phys = ion_carveout_heap_phys,
414 .map_user = ion_carveout_heap_map_user,
415 .map_kernel = ion_carveout_heap_map_kernel,
416 .unmap_kernel = ion_carveout_heap_unmap_kernel,
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600417 .map_dma = ion_carveout_heap_map_dma,
418 .unmap_dma = ion_carveout_heap_unmap_dma,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700419 .cache_op = ion_carveout_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800420 .print_debug = ion_carveout_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700421 .map_iommu = ion_carveout_heap_map_iommu,
422 .unmap_iommu = ion_carveout_heap_unmap_iommu,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700423};
424
425struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
426{
427 struct ion_carveout_heap *carveout_heap;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700428 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700429
430 carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
431 if (!carveout_heap)
432 return ERR_PTR(-ENOMEM);
433
434 carveout_heap->pool = gen_pool_create(12, -1);
435 if (!carveout_heap->pool) {
436 kfree(carveout_heap);
437 return ERR_PTR(-ENOMEM);
438 }
439 carveout_heap->base = heap_data->base;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700440 ret = gen_pool_add(carveout_heap->pool, carveout_heap->base,
441 heap_data->size, -1);
442 if (ret < 0) {
Olav Haugan7fba5cf2012-01-06 10:05:31 -0800443 gen_pool_destroy(carveout_heap->pool);
Laura Abbottb1b7b432011-08-03 13:25:08 -0700444 kfree(carveout_heap);
445 return ERR_PTR(-EINVAL);
446 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700447 carveout_heap->heap.ops = &carveout_heap_ops;
448 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
Laura Abbott68c80642011-10-21 17:32:27 -0700449 carveout_heap->allocated_bytes = 0;
450 carveout_heap->total_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -0700451 carveout_heap->has_outer_cache = heap_data->has_outer_cache;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700452
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700453 return &carveout_heap->heap;
454}
455
456void ion_carveout_heap_destroy(struct ion_heap *heap)
457{
458 struct ion_carveout_heap *carveout_heap =
459 container_of(heap, struct ion_carveout_heap, heap);
460
461 gen_pool_destroy(carveout_heap->pool);
462 kfree(carveout_heap);
463 carveout_heap = NULL;
464}