blob: 9610dfe3e85f4d933c0dca29948ab3b4c124e464 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
18
19#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
22#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070029#include "ion_priv.h"
30
Laura Abbott8c017362011-09-22 20:59:12 -070031#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070032#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070033#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070034#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070035
36struct ion_carveout_heap {
37 struct ion_heap heap;
38 struct gen_pool *pool;
39 ion_phys_addr_t base;
Laura Abbott68c80642011-10-21 17:32:27 -070040 unsigned long allocated_bytes;
41 unsigned long total_size;
Olav Hauganee0f7802011-12-19 13:28:57 -080042 int (*request_region)(void *);
43 int (*release_region)(void *);
Alex Bird8a3ede32011-11-07 12:33:42 -080044 atomic_t map_count;
45 void *bus_id;
Olav Haugan85c95402012-05-30 17:32:37 -070046 unsigned int has_outer_cache;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070047};
48
49ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
50 unsigned long size,
51 unsigned long align)
52{
53 struct ion_carveout_heap *carveout_heap =
54 container_of(heap, struct ion_carveout_heap, heap);
Laura Abbott5ee39212011-10-11 10:12:40 -070055 unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool,
56 size, ilog2(align));
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070057
Laura Abbottd16dd602011-10-31 13:51:10 -070058 if (!offset) {
59 if ((carveout_heap->total_size -
Olav Haugan739e5182012-04-19 14:14:43 -070060 carveout_heap->allocated_bytes) >= size)
Laura Abbottd16dd602011-10-31 13:51:10 -070061 pr_debug("%s: heap %s has enough memory (%lx) but"
62 " the allocation of size %lx still failed."
63 " Memory is probably fragmented.",
64 __func__, heap->name,
65 carveout_heap->total_size -
66 carveout_heap->allocated_bytes, size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070067 return ION_CARVEOUT_ALLOCATE_FAIL;
Laura Abbottd16dd602011-10-31 13:51:10 -070068 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070069
Laura Abbott68c80642011-10-21 17:32:27 -070070 carveout_heap->allocated_bytes += size;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070071 return offset;
72}
73
74void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
75 unsigned long size)
76{
77 struct ion_carveout_heap *carveout_heap =
78 container_of(heap, struct ion_carveout_heap, heap);
79
80 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
81 return;
82 gen_pool_free(carveout_heap->pool, addr, size);
Laura Abbott68c80642011-10-21 17:32:27 -070083 carveout_heap->allocated_bytes -= size;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070084}
85
86static int ion_carveout_heap_phys(struct ion_heap *heap,
87 struct ion_buffer *buffer,
88 ion_phys_addr_t *addr, size_t *len)
89{
90 *addr = buffer->priv_phys;
91 *len = buffer->size;
92 return 0;
93}
94
95static int ion_carveout_heap_allocate(struct ion_heap *heap,
96 struct ion_buffer *buffer,
97 unsigned long size, unsigned long align,
98 unsigned long flags)
99{
100 buffer->priv_phys = ion_carveout_allocate(heap, size, align);
101 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
102}
103
104static void ion_carveout_heap_free(struct ion_buffer *buffer)
105{
106 struct ion_heap *heap = buffer->heap;
107
108 ion_carveout_free(heap, buffer->priv_phys, buffer->size);
109 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
110}
111
Laura Abbottb14ed962012-01-30 14:18:08 -0800112struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700113 struct ion_buffer *buffer)
114{
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800115 size_t chunk_size = buffer->size;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600116
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800117 if (ION_IS_CACHED(buffer->flags))
118 chunk_size = PAGE_SIZE;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600119
Mitchel Humpherys0432d692013-01-08 17:03:10 -0800120 return ion_create_chunked_sg_table(buffer->priv_phys, chunk_size,
121 buffer->size);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700122}
123
124void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
125 struct ion_buffer *buffer)
126{
Laura Abbottb14ed962012-01-30 14:18:08 -0800127 if (buffer->sg_table)
128 sg_free_table(buffer->sg_table);
129 kfree(buffer->sg_table);
130 buffer->sg_table = 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700131}
132
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800133static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
134{
135 int ret_value = 0;
136 if (atomic_inc_return(&carveout_heap->map_count) == 1) {
137 if (carveout_heap->request_region) {
138 ret_value = carveout_heap->request_region(
139 carveout_heap->bus_id);
140 if (ret_value) {
141 pr_err("Unable to request SMI region");
142 atomic_dec(&carveout_heap->map_count);
143 }
144 }
145 }
146 return ret_value;
147}
148
149static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap)
150{
151 int ret_value = 0;
152 if (atomic_dec_and_test(&carveout_heap->map_count)) {
153 if (carveout_heap->release_region) {
154 ret_value = carveout_heap->release_region(
155 carveout_heap->bus_id);
156 if (ret_value)
157 pr_err("Unable to release SMI region");
158 }
159 }
160 return ret_value;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700161}
162
163void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800164 struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700165{
Alex Bird8a3ede32011-11-07 12:33:42 -0800166 struct ion_carveout_heap *carveout_heap =
167 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800168 void *ret_value;
Alex Bird8a3ede32011-11-07 12:33:42 -0800169
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800170 if (ion_carveout_request_region(carveout_heap))
171 return NULL;
172
Laura Abbottb14ed962012-01-30 14:18:08 -0800173 if (ION_IS_CACHED(buffer->flags))
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800174 ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
Laura Abbotte8f28022011-09-28 14:40:40 -0700175 else
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800176 ret_value = ioremap(buffer->priv_phys, buffer->size);
177
178 if (!ret_value)
179 ion_carveout_release_region(carveout_heap);
180 return ret_value;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700181}
182
183void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
184 struct ion_buffer *buffer)
185{
Alex Bird8a3ede32011-11-07 12:33:42 -0800186 struct ion_carveout_heap *carveout_heap =
187 container_of(heap, struct ion_carveout_heap, heap);
188
Colin Cross9a826402012-04-04 13:50:43 -0700189 __arm_iounmap(buffer->vaddr);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700190 buffer->vaddr = NULL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800191
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800192 ion_carveout_release_region(carveout_heap);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700193 return;
194}
195
196int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800197 struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700198{
Alex Bird8a3ede32011-11-07 12:33:42 -0800199 struct ion_carveout_heap *carveout_heap =
200 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800201 int ret_value = 0;
Alex Bird8a3ede32011-11-07 12:33:42 -0800202
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800203 if (ion_carveout_request_region(carveout_heap))
204 return -EINVAL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800205
Laura Abbottb14ed962012-01-30 14:18:08 -0800206 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800207 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
208
209 ret_value = remap_pfn_range(vma, vma->vm_start,
210 __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
211 vma->vm_end - vma->vm_start,
212 vma->vm_page_prot);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800213
214 if (ret_value)
215 ion_carveout_release_region(carveout_heap);
216 return ret_value;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700217}
218
Alex Bird8a3ede32011-11-07 12:33:42 -0800219void ion_carveout_heap_unmap_user(struct ion_heap *heap,
220 struct ion_buffer *buffer)
221{
222 struct ion_carveout_heap *carveout_heap =
223 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800224 ion_carveout_release_region(carveout_heap);
Alex Bird8a3ede32011-11-07 12:33:42 -0800225}
226
Laura Abbottabcb6f72011-10-04 16:26:49 -0700227int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
228 void *vaddr, unsigned int offset, unsigned int length,
229 unsigned int cmd)
230{
Neeti Desai3f3c2822013-03-08 17:29:53 -0800231 void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
Olav Haugan85c95402012-05-30 17:32:37 -0700232 struct ion_carveout_heap *carveout_heap =
233 container_of(heap, struct ion_carveout_heap, heap);
Neeti Desai3f3c2822013-03-08 17:29:53 -0800234 unsigned int size_to_vmap, total_size;
235 int i, j;
236 void *ptr = NULL;
237 ion_phys_addr_t buff_phys = buffer->priv_phys;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700238
Neeti Desai3f3c2822013-03-08 17:29:53 -0800239 if (!vaddr) {
240 /*
241 * Split the vmalloc space into smaller regions in
242 * order to clean and/or invalidate the cache.
243 */
244 size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
245 total_size = buffer->size;
246
247 for (i = 0; i < total_size; i += size_to_vmap) {
248 size_to_vmap = min(size_to_vmap, total_size - i);
249 for (j = 0; j < 10 && size_to_vmap; ++j) {
250 ptr = ioremap(buff_phys, size_to_vmap);
251 if (ptr) {
252 switch (cmd) {
253 case ION_IOC_CLEAN_CACHES:
254 dmac_clean_range(ptr,
255 ptr + size_to_vmap);
256 outer_cache_op =
257 outer_clean_range;
258 break;
259 case ION_IOC_INV_CACHES:
260 dmac_inv_range(ptr,
261 ptr + size_to_vmap);
262 outer_cache_op =
263 outer_inv_range;
264 break;
265 case ION_IOC_CLEAN_INV_CACHES:
266 dmac_flush_range(ptr,
267 ptr + size_to_vmap);
268 outer_cache_op =
269 outer_flush_range;
270 break;
271 default:
272 return -EINVAL;
273 }
274 buff_phys += size_to_vmap;
275 break;
276 } else {
277 size_to_vmap >>= 1;
278 }
279 }
280 if (!ptr) {
281 pr_err("Couldn't io-remap the memory\n");
282 return -EINVAL;
283 }
284 iounmap(ptr);
285 }
286 } else {
287 switch (cmd) {
288 case ION_IOC_CLEAN_CACHES:
289 dmac_clean_range(vaddr, vaddr + length);
290 outer_cache_op = outer_clean_range;
291 break;
292 case ION_IOC_INV_CACHES:
293 dmac_inv_range(vaddr, vaddr + length);
294 outer_cache_op = outer_inv_range;
295 break;
296 case ION_IOC_CLEAN_INV_CACHES:
297 dmac_flush_range(vaddr, vaddr + length);
298 outer_cache_op = outer_flush_range;
299 break;
300 default:
301 return -EINVAL;
302 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700303 }
304
Olav Haugan85c95402012-05-30 17:32:37 -0700305 if (carveout_heap->has_outer_cache) {
306 unsigned long pstart = buffer->priv_phys + offset;
307 outer_cache_op(pstart, pstart + length);
308 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700309 return 0;
310}
311
Olav Haugan0671b9a2012-05-25 11:58:56 -0700312static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
313 const struct rb_root *mem_map)
Laura Abbott68c80642011-10-21 17:32:27 -0700314{
315 struct ion_carveout_heap *carveout_heap =
316 container_of(heap, struct ion_carveout_heap, heap);
317
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800318 seq_printf(s, "total bytes currently allocated: %lx\n",
319 carveout_heap->allocated_bytes);
320 seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
Laura Abbott68c80642011-10-21 17:32:27 -0700321
Olav Haugan0671b9a2012-05-25 11:58:56 -0700322 if (mem_map) {
323 unsigned long base = carveout_heap->base;
324 unsigned long size = carveout_heap->total_size;
325 unsigned long end = base+size;
326 unsigned long last_end = base;
327 struct rb_node *n;
328
329 seq_printf(s, "\nMemory Map\n");
330 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
331 "client", "start address", "end address",
332 "size (hex)");
333
334 for (n = rb_first(mem_map); n; n = rb_next(n)) {
335 struct mem_map_data *data =
336 rb_entry(n, struct mem_map_data, node);
337 const char *client_name = "(null)";
338
339 if (last_end < data->addr) {
Laura Abbott1135c9e2013-03-13 15:33:40 -0700340 phys_addr_t da;
341
342 da = data->addr-1;
343 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
344 "FREE", &last_end, &da,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700345 data->addr-last_end,
346 data->addr-last_end);
347 }
348
349 if (data->client_name)
350 client_name = data->client_name;
351
Laura Abbott1135c9e2013-03-13 15:33:40 -0700352 seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
353 client_name, &data->addr,
354 &data->addr_end,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700355 data->size, data->size);
356 last_end = data->addr_end+1;
357 }
358 if (last_end < end) {
359 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
360 last_end, end-1, end-last_end, end-last_end);
361 }
362 }
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800363 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700364}
365
Laura Abbott8c017362011-09-22 20:59:12 -0700366int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
367 struct ion_iommu_map *data,
368 unsigned int domain_num,
369 unsigned int partition_num,
370 unsigned long align,
371 unsigned long iova_length,
372 unsigned long flags)
373{
Laura Abbott8c017362011-09-22 20:59:12 -0700374 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700375 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700376 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700377 struct scatterlist *sglist = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700378 int prot = IOMMU_WRITE | IOMMU_READ;
379 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700380
381 data->mapped_size = iova_length;
382
383 if (!msm_use_iommu()) {
384 data->iova_addr = buffer->priv_phys;
385 return 0;
386 }
387
388 extra = iova_length - buffer->size;
389
Laura Abbottd01221b2012-05-16 17:52:49 -0700390 ret = msm_allocate_iova_address(domain_num, partition_num,
391 data->mapped_size, align,
392 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700393
Laura Abbottd01221b2012-05-16 17:52:49 -0700394 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700395 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700396
397 domain = msm_get_iommu_domain(domain_num);
398
399 if (!domain) {
400 ret = -ENOMEM;
401 goto out1;
402 }
403
Olav Haugan16cdb412012-03-27 13:02:17 -0700404 sglist = vmalloc(sizeof(*sglist));
405 if (!sglist)
406 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700407
Olav Haugan16cdb412012-03-27 13:02:17 -0700408 sg_init_table(sglist, 1);
409 sglist->length = buffer->size;
410 sglist->offset = 0;
411 sglist->dma_address = buffer->priv_phys;
412
413 ret = iommu_map_range(domain, data->iova_addr, sglist,
414 buffer->size, prot);
415 if (ret) {
416 pr_err("%s: could not map %lx in domain %p\n",
417 __func__, data->iova_addr, domain);
418 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700419 }
420
Olav Haugan16cdb412012-03-27 13:02:17 -0700421 if (extra) {
422 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800423 unsigned long phys_addr = sg_phys(sglist);
424 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
425 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700426 if (ret)
427 goto out2;
428 }
429 vfree(sglist);
430 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700431
432out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700433 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700434out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700435 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700436 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
437 data->mapped_size);
438
439out:
440
441 return ret;
442}
443
444void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
445{
Laura Abbott8c017362011-09-22 20:59:12 -0700446 unsigned int domain_num;
447 unsigned int partition_num;
448 struct iommu_domain *domain;
449
450 if (!msm_use_iommu())
451 return;
452
453 domain_num = iommu_map_domain(data);
454 partition_num = iommu_map_partition(data);
455
456 domain = msm_get_iommu_domain(domain_num);
457
458 if (!domain) {
459 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
460 return;
461 }
462
Olav Haugan16cdb412012-03-27 13:02:17 -0700463 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700464 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
465 data->mapped_size);
466
467 return;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700468}
469
470static struct ion_heap_ops carveout_heap_ops = {
471 .allocate = ion_carveout_heap_allocate,
472 .free = ion_carveout_heap_free,
473 .phys = ion_carveout_heap_phys,
474 .map_user = ion_carveout_heap_map_user,
475 .map_kernel = ion_carveout_heap_map_kernel,
Alex Bird8a3ede32011-11-07 12:33:42 -0800476 .unmap_user = ion_carveout_heap_unmap_user,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700477 .unmap_kernel = ion_carveout_heap_unmap_kernel,
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600478 .map_dma = ion_carveout_heap_map_dma,
479 .unmap_dma = ion_carveout_heap_unmap_dma,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700480 .cache_op = ion_carveout_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800481 .print_debug = ion_carveout_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700482 .map_iommu = ion_carveout_heap_map_iommu,
483 .unmap_iommu = ion_carveout_heap_unmap_iommu,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700484};
485
486struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
487{
488 struct ion_carveout_heap *carveout_heap;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700489 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700490
491 carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
492 if (!carveout_heap)
493 return ERR_PTR(-ENOMEM);
494
495 carveout_heap->pool = gen_pool_create(12, -1);
496 if (!carveout_heap->pool) {
497 kfree(carveout_heap);
498 return ERR_PTR(-ENOMEM);
499 }
500 carveout_heap->base = heap_data->base;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700501 ret = gen_pool_add(carveout_heap->pool, carveout_heap->base,
502 heap_data->size, -1);
503 if (ret < 0) {
Olav Haugan7fba5cf2012-01-06 10:05:31 -0800504 gen_pool_destroy(carveout_heap->pool);
Laura Abbottb1b7b432011-08-03 13:25:08 -0700505 kfree(carveout_heap);
506 return ERR_PTR(-EINVAL);
507 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700508 carveout_heap->heap.ops = &carveout_heap_ops;
509 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
Laura Abbott68c80642011-10-21 17:32:27 -0700510 carveout_heap->allocated_bytes = 0;
511 carveout_heap->total_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -0700512 carveout_heap->has_outer_cache = heap_data->has_outer_cache;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700513
Olav Haugan0703dbf2011-12-19 17:53:38 -0800514 if (heap_data->extra_data) {
515 struct ion_co_heap_pdata *extra_data =
516 heap_data->extra_data;
517
518 if (extra_data->setup_region)
519 carveout_heap->bus_id = extra_data->setup_region();
520 if (extra_data->request_region)
521 carveout_heap->request_region =
522 extra_data->request_region;
523 if (extra_data->release_region)
524 carveout_heap->release_region =
525 extra_data->release_region;
526 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700527 return &carveout_heap->heap;
528}
529
530void ion_carveout_heap_destroy(struct ion_heap *heap)
531{
532 struct ion_carveout_heap *carveout_heap =
533 container_of(heap, struct ion_carveout_heap, heap);
534
535 gen_pool_destroy(carveout_heap->pool);
536 kfree(carveout_heap);
537 carveout_heap = NULL;
538}