blob: 798c0272401deb92470192c95819857037d4a2f3 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
18
19#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
22#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070029#include "ion_priv.h"
30
Laura Abbott8c017362011-09-22 20:59:12 -070031#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070032#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070033#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070034#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070035
36struct ion_carveout_heap {
37 struct ion_heap heap;
38 struct gen_pool *pool;
39 ion_phys_addr_t base;
Laura Abbott68c80642011-10-21 17:32:27 -070040 unsigned long allocated_bytes;
41 unsigned long total_size;
Olav Hauganee0f7802011-12-19 13:28:57 -080042 int (*request_region)(void *);
43 int (*release_region)(void *);
Alex Bird8a3ede32011-11-07 12:33:42 -080044 atomic_t map_count;
45 void *bus_id;
Olav Haugan85c95402012-05-30 17:32:37 -070046 unsigned int has_outer_cache;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070047};
48
49ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
50 unsigned long size,
51 unsigned long align)
52{
53 struct ion_carveout_heap *carveout_heap =
54 container_of(heap, struct ion_carveout_heap, heap);
Laura Abbott5ee39212011-10-11 10:12:40 -070055 unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool,
56 size, ilog2(align));
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070057
Laura Abbottd16dd602011-10-31 13:51:10 -070058 if (!offset) {
59 if ((carveout_heap->total_size -
Olav Haugan739e5182012-04-19 14:14:43 -070060 carveout_heap->allocated_bytes) >= size)
Laura Abbottd16dd602011-10-31 13:51:10 -070061 pr_debug("%s: heap %s has enough memory (%lx) but"
62 " the allocation of size %lx still failed."
63 " Memory is probably fragmented.",
64 __func__, heap->name,
65 carveout_heap->total_size -
66 carveout_heap->allocated_bytes, size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067 return ION_CARVEOUT_ALLOCATE_FAIL;
Laura Abbottd16dd602011-10-31 13:51:10 -070068 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070069
Laura Abbott68c80642011-10-21 17:32:27 -070070 carveout_heap->allocated_bytes += size;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070071 return offset;
72}
73
74void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
75 unsigned long size)
76{
77 struct ion_carveout_heap *carveout_heap =
78 container_of(heap, struct ion_carveout_heap, heap);
79
80 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
81 return;
82 gen_pool_free(carveout_heap->pool, addr, size);
Laura Abbott68c80642011-10-21 17:32:27 -070083 carveout_heap->allocated_bytes -= size;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070084}
85
86static int ion_carveout_heap_phys(struct ion_heap *heap,
87 struct ion_buffer *buffer,
88 ion_phys_addr_t *addr, size_t *len)
89{
90 *addr = buffer->priv_phys;
91 *len = buffer->size;
92 return 0;
93}
94
95static int ion_carveout_heap_allocate(struct ion_heap *heap,
96 struct ion_buffer *buffer,
97 unsigned long size, unsigned long align,
98 unsigned long flags)
99{
100 buffer->priv_phys = ion_carveout_allocate(heap, size, align);
101 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
102}
103
104static void ion_carveout_heap_free(struct ion_buffer *buffer)
105{
106 struct ion_heap *heap = buffer->heap;
107
108 ion_carveout_free(heap, buffer->priv_phys, buffer->size);
109 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
110}
111
Laura Abbottb14ed962012-01-30 14:18:08 -0800112struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700113 struct ion_buffer *buffer)
114{
Laura Abbottb14ed962012-01-30 14:18:08 -0800115 struct sg_table *table;
116 int ret;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600117
Laura Abbottb14ed962012-01-30 14:18:08 -0800118 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
119 if (!table)
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600120 return ERR_PTR(-ENOMEM);
121
Laura Abbottb14ed962012-01-30 14:18:08 -0800122 ret = sg_alloc_table(table, 1, GFP_KERNEL);
123 if (ret)
124 goto err0;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600125
Laura Abbottb14ed962012-01-30 14:18:08 -0800126 table->sgl->length = buffer->size;
127 table->sgl->offset = 0;
128 table->sgl->dma_address = buffer->priv_phys;
129
130 return table;
131
132err0:
133 kfree(table);
134 return ERR_PTR(ret);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700135}
136
137void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
138 struct ion_buffer *buffer)
139{
Laura Abbottb14ed962012-01-30 14:18:08 -0800140 if (buffer->sg_table)
141 sg_free_table(buffer->sg_table);
142 kfree(buffer->sg_table);
143 buffer->sg_table = 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700144}
145
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800146static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
147{
148 int ret_value = 0;
149 if (atomic_inc_return(&carveout_heap->map_count) == 1) {
150 if (carveout_heap->request_region) {
151 ret_value = carveout_heap->request_region(
152 carveout_heap->bus_id);
153 if (ret_value) {
154 pr_err("Unable to request SMI region");
155 atomic_dec(&carveout_heap->map_count);
156 }
157 }
158 }
159 return ret_value;
160}
161
162static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap)
163{
164 int ret_value = 0;
165 if (atomic_dec_and_test(&carveout_heap->map_count)) {
166 if (carveout_heap->release_region) {
167 ret_value = carveout_heap->release_region(
168 carveout_heap->bus_id);
169 if (ret_value)
170 pr_err("Unable to release SMI region");
171 }
172 }
173 return ret_value;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700174}
175
176void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800177 struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700178{
Alex Bird8a3ede32011-11-07 12:33:42 -0800179 struct ion_carveout_heap *carveout_heap =
180 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800181 void *ret_value;
Alex Bird8a3ede32011-11-07 12:33:42 -0800182
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800183 if (ion_carveout_request_region(carveout_heap))
184 return NULL;
185
Laura Abbottb14ed962012-01-30 14:18:08 -0800186 if (ION_IS_CACHED(buffer->flags))
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800187 ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
Laura Abbotte8f28022011-09-28 14:40:40 -0700188 else
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800189 ret_value = ioremap(buffer->priv_phys, buffer->size);
190
191 if (!ret_value)
192 ion_carveout_release_region(carveout_heap);
193 return ret_value;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700194}
195
196void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
197 struct ion_buffer *buffer)
198{
Alex Bird8a3ede32011-11-07 12:33:42 -0800199 struct ion_carveout_heap *carveout_heap =
200 container_of(heap, struct ion_carveout_heap, heap);
201
Colin Cross9a826402012-04-04 13:50:43 -0700202 __arm_iounmap(buffer->vaddr);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700203 buffer->vaddr = NULL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800204
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800205 ion_carveout_release_region(carveout_heap);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700206 return;
207}
208
209int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800210 struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700211{
Alex Bird8a3ede32011-11-07 12:33:42 -0800212 struct ion_carveout_heap *carveout_heap =
213 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800214 int ret_value = 0;
Alex Bird8a3ede32011-11-07 12:33:42 -0800215
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800216 if (ion_carveout_request_region(carveout_heap))
217 return -EINVAL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800218
Laura Abbottb14ed962012-01-30 14:18:08 -0800219 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800220 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
221
222 ret_value = remap_pfn_range(vma, vma->vm_start,
223 __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
224 vma->vm_end - vma->vm_start,
225 vma->vm_page_prot);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800226
227 if (ret_value)
228 ion_carveout_release_region(carveout_heap);
229 return ret_value;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700230}
231
Alex Bird8a3ede32011-11-07 12:33:42 -0800232void ion_carveout_heap_unmap_user(struct ion_heap *heap,
233 struct ion_buffer *buffer)
234{
235 struct ion_carveout_heap *carveout_heap =
236 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800237 ion_carveout_release_region(carveout_heap);
Alex Bird8a3ede32011-11-07 12:33:42 -0800238}
239
Laura Abbottabcb6f72011-10-04 16:26:49 -0700240int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
241 void *vaddr, unsigned int offset, unsigned int length,
242 unsigned int cmd)
243{
Neeti Desai3f3c2822013-03-08 17:29:53 -0800244 void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
Olav Haugan85c95402012-05-30 17:32:37 -0700245 struct ion_carveout_heap *carveout_heap =
246 container_of(heap, struct ion_carveout_heap, heap);
Neeti Desai3f3c2822013-03-08 17:29:53 -0800247 unsigned int size_to_vmap, total_size;
248 int i, j;
249 void *ptr = NULL;
250 ion_phys_addr_t buff_phys = buffer->priv_phys;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700251
Neeti Desai3f3c2822013-03-08 17:29:53 -0800252 if (!vaddr) {
253 /*
254 * Split the vmalloc space into smaller regions in
255 * order to clean and/or invalidate the cache.
256 */
257 size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
258 total_size = buffer->size;
259
260 for (i = 0; i < total_size; i += size_to_vmap) {
261 size_to_vmap = min(size_to_vmap, total_size - i);
262 for (j = 0; j < 10 && size_to_vmap; ++j) {
263 ptr = ioremap(buff_phys, size_to_vmap);
264 if (ptr) {
265 switch (cmd) {
266 case ION_IOC_CLEAN_CACHES:
267 dmac_clean_range(ptr,
268 ptr + size_to_vmap);
269 outer_cache_op =
270 outer_clean_range;
271 break;
272 case ION_IOC_INV_CACHES:
273 dmac_inv_range(ptr,
274 ptr + size_to_vmap);
275 outer_cache_op =
276 outer_inv_range;
277 break;
278 case ION_IOC_CLEAN_INV_CACHES:
279 dmac_flush_range(ptr,
280 ptr + size_to_vmap);
281 outer_cache_op =
282 outer_flush_range;
283 break;
284 default:
285 return -EINVAL;
286 }
287 buff_phys += size_to_vmap;
288 break;
289 } else {
290 size_to_vmap >>= 1;
291 }
292 }
293 if (!ptr) {
294 pr_err("Couldn't io-remap the memory\n");
295 return -EINVAL;
296 }
297 iounmap(ptr);
298 }
299 } else {
300 switch (cmd) {
301 case ION_IOC_CLEAN_CACHES:
302 dmac_clean_range(vaddr, vaddr + length);
303 outer_cache_op = outer_clean_range;
304 break;
305 case ION_IOC_INV_CACHES:
306 dmac_inv_range(vaddr, vaddr + length);
307 outer_cache_op = outer_inv_range;
308 break;
309 case ION_IOC_CLEAN_INV_CACHES:
310 dmac_flush_range(vaddr, vaddr + length);
311 outer_cache_op = outer_flush_range;
312 break;
313 default:
314 return -EINVAL;
315 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700316 }
317
Olav Haugan85c95402012-05-30 17:32:37 -0700318 if (carveout_heap->has_outer_cache) {
319 unsigned long pstart = buffer->priv_phys + offset;
320 outer_cache_op(pstart, pstart + length);
321 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700322 return 0;
323}
324
Olav Haugan0671b9a2012-05-25 11:58:56 -0700325static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
326 const struct rb_root *mem_map)
Laura Abbott68c80642011-10-21 17:32:27 -0700327{
328 struct ion_carveout_heap *carveout_heap =
329 container_of(heap, struct ion_carveout_heap, heap);
330
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800331 seq_printf(s, "total bytes currently allocated: %lx\n",
332 carveout_heap->allocated_bytes);
333 seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
Laura Abbott68c80642011-10-21 17:32:27 -0700334
Olav Haugan0671b9a2012-05-25 11:58:56 -0700335 if (mem_map) {
336 unsigned long base = carveout_heap->base;
337 unsigned long size = carveout_heap->total_size;
338 unsigned long end = base+size;
339 unsigned long last_end = base;
340 struct rb_node *n;
341
342 seq_printf(s, "\nMemory Map\n");
343 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
344 "client", "start address", "end address",
345 "size (hex)");
346
347 for (n = rb_first(mem_map); n; n = rb_next(n)) {
348 struct mem_map_data *data =
349 rb_entry(n, struct mem_map_data, node);
350 const char *client_name = "(null)";
351
352 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700353 phys_addr_t da;
354
355 da = data->addr-1;
356 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
357 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700358 data->addr-last_end,
359 data->addr-last_end);
360 }
361
362 if (data->client_name)
363 client_name = data->client_name;
364
Laura Abbott1135c9e2013-03-13 15:33:40 -0700365 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
366 client_name, &data->addr,
367 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700368 data->size, data->size);
369 last_end = data->addr_end+1;
370 }
371 if (last_end < end) {
372 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
373 last_end, end-1, end-last_end, end-last_end);
374 }
375 }
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800376 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700377}
378
Laura Abbott8c017362011-09-22 20:59:12 -0700379int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
380 struct ion_iommu_map *data,
381 unsigned int domain_num,
382 unsigned int partition_num,
383 unsigned long align,
384 unsigned long iova_length,
385 unsigned long flags)
386{
Laura Abbott8c017362011-09-22 20:59:12 -0700387 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700388 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700389 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700390 struct scatterlist *sglist = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700391 int prot = IOMMU_WRITE | IOMMU_READ;
392 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700393
394 data->mapped_size = iova_length;
395
396 if (!msm_use_iommu()) {
397 data->iova_addr = buffer->priv_phys;
398 return 0;
399 }
400
401 extra = iova_length - buffer->size;
402
Laura Abbottd01221b2012-05-16 17:52:49 -0700403 ret = msm_allocate_iova_address(domain_num, partition_num,
404 data->mapped_size, align,
405 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700406
Laura Abbottd01221b2012-05-16 17:52:49 -0700407 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700408 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700409
410 domain = msm_get_iommu_domain(domain_num);
411
412 if (!domain) {
413 ret = -ENOMEM;
414 goto out1;
415 }
416
Olav Haugan16cdb412012-03-27 13:02:17 -0700417 sglist = vmalloc(sizeof(*sglist));
418 if (!sglist)
419 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700420
Olav Haugan16cdb412012-03-27 13:02:17 -0700421 sg_init_table(sglist, 1);
422 sglist->length = buffer->size;
423 sglist->offset = 0;
424 sglist->dma_address = buffer->priv_phys;
425
426 ret = iommu_map_range(domain, data->iova_addr, sglist,
427 buffer->size, prot);
428 if (ret) {
429 pr_err("%s: could not map %lx in domain %p\n",
430 __func__, data->iova_addr, domain);
431 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700432 }
433
Olav Haugan16cdb412012-03-27 13:02:17 -0700434 if (extra) {
435 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800436 unsigned long phys_addr = sg_phys(sglist);
437 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
438 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700439 if (ret)
440 goto out2;
441 }
442 vfree(sglist);
443 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700444
445out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700446 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700447out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700448 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700449 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
450 data->mapped_size);
451
452out:
453
454 return ret;
455}
456
457void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
458{
Laura Abbott8c017362011-09-22 20:59:12 -0700459 unsigned int domain_num;
460 unsigned int partition_num;
461 struct iommu_domain *domain;
462
463 if (!msm_use_iommu())
464 return;
465
466 domain_num = iommu_map_domain(data);
467 partition_num = iommu_map_partition(data);
468
469 domain = msm_get_iommu_domain(domain_num);
470
471 if (!domain) {
472 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
473 return;
474 }
475
Olav Haugan16cdb412012-03-27 13:02:17 -0700476 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700477 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
478 data->mapped_size);
479
480 return;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700481}
482
483static struct ion_heap_ops carveout_heap_ops = {
484 .allocate = ion_carveout_heap_allocate,
485 .free = ion_carveout_heap_free,
486 .phys = ion_carveout_heap_phys,
487 .map_user = ion_carveout_heap_map_user,
488 .map_kernel = ion_carveout_heap_map_kernel,
Alex Bird8a3ede32011-11-07 12:33:42 -0800489 .unmap_user = ion_carveout_heap_unmap_user,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700490 .unmap_kernel = ion_carveout_heap_unmap_kernel,
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600491 .map_dma = ion_carveout_heap_map_dma,
492 .unmap_dma = ion_carveout_heap_unmap_dma,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700493 .cache_op = ion_carveout_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800494 .print_debug = ion_carveout_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700495 .map_iommu = ion_carveout_heap_map_iommu,
496 .unmap_iommu = ion_carveout_heap_unmap_iommu,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700497};
498
499struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
500{
501 struct ion_carveout_heap *carveout_heap;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700502 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700503
504 carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
505 if (!carveout_heap)
506 return ERR_PTR(-ENOMEM);
507
508 carveout_heap->pool = gen_pool_create(12, -1);
509 if (!carveout_heap->pool) {
510 kfree(carveout_heap);
511 return ERR_PTR(-ENOMEM);
512 }
513 carveout_heap->base = heap_data->base;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700514 ret = gen_pool_add(carveout_heap->pool, carveout_heap->base,
515 heap_data->size, -1);
516 if (ret < 0) {
Olav Haugan7fba5cf2012-01-06 10:05:31 -0800517 gen_pool_destroy(carveout_heap->pool);
Laura Abbottb1b7b432011-08-03 13:25:08 -0700518 kfree(carveout_heap);
519 return ERR_PTR(-EINVAL);
520 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700521 carveout_heap->heap.ops = &carveout_heap_ops;
522 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
Laura Abbott68c80642011-10-21 17:32:27 -0700523 carveout_heap->allocated_bytes = 0;
524 carveout_heap->total_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -0700525 carveout_heap->has_outer_cache = heap_data->has_outer_cache;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700526
Olav Haugan0703dbf2011-12-19 17:53:38 -0800527 if (heap_data->extra_data) {
528 struct ion_co_heap_pdata *extra_data =
529 heap_data->extra_data;
530
531 if (extra_data->setup_region)
532 carveout_heap->bus_id = extra_data->setup_region();
533 if (extra_data->request_region)
534 carveout_heap->request_region =
535 extra_data->request_region;
536 if (extra_data->release_region)
537 carveout_heap->release_region =
538 extra_data->release_region;
539 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700540 return &carveout_heap->heap;
541}
542
543void ion_carveout_heap_destroy(struct ion_heap *heap)
544{
545 struct ion_carveout_heap *carveout_heap =
546 container_of(heap, struct ion_carveout_heap, heap);
547
548 gen_pool_destroy(carveout_heap->pool);
549 kfree(carveout_heap);
550 carveout_heap = NULL;
551}