blob: 3561a8a6bba2e7dc3640c8aa7809dd11ebfd11d1 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_carveout_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan7fba5cf2012-01-06 10:05:31 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include <linux/spinlock.h>
18
19#include <linux/err.h>
20#include <linux/genalloc.h>
21#include <linux/io.h>
22#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070029#include "ion_priv.h"
30
Laura Abbott8c017362011-09-22 20:59:12 -070031#include <mach/iommu_domains.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070032#include <asm/mach/map.h>
Olav Haugan85c95402012-05-30 17:32:37 -070033#include <asm/cacheflush.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070034
35struct ion_carveout_heap {
36 struct ion_heap heap;
37 struct gen_pool *pool;
38 ion_phys_addr_t base;
Laura Abbott68c80642011-10-21 17:32:27 -070039 unsigned long allocated_bytes;
40 unsigned long total_size;
Olav Hauganee0f7802011-12-19 13:28:57 -080041 int (*request_region)(void *);
42 int (*release_region)(void *);
Alex Bird8a3ede32011-11-07 12:33:42 -080043 atomic_t map_count;
44 void *bus_id;
Olav Haugan85c95402012-05-30 17:32:37 -070045 unsigned int has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070046};
47
48ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070049 unsigned long size,
50 unsigned long align)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070051{
52 struct ion_carveout_heap *carveout_heap =
53 container_of(heap, struct ion_carveout_heap, heap);
Laura Abbott5ee39212011-10-11 10:12:40 -070054 unsigned long offset = gen_pool_alloc_aligned(carveout_heap->pool,
55 size, ilog2(align));
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070056
Laura Abbottd16dd602011-10-31 13:51:10 -070057 if (!offset) {
58 if ((carveout_heap->total_size -
Olav Haugan739e5182012-04-19 14:14:43 -070059 carveout_heap->allocated_bytes) >= size)
Laura Abbottd16dd602011-10-31 13:51:10 -070060 pr_debug("%s: heap %s has enough memory (%lx) but"
61 " the allocation of size %lx still failed."
62 " Memory is probably fragmented.",
63 __func__, heap->name,
64 carveout_heap->total_size -
65 carveout_heap->allocated_bytes, size);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070066 return ION_CARVEOUT_ALLOCATE_FAIL;
Laura Abbottd16dd602011-10-31 13:51:10 -070067 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070068
Laura Abbott68c80642011-10-21 17:32:27 -070069 carveout_heap->allocated_bytes += size;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070070 return offset;
71}
72
73void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
74 unsigned long size)
75{
76 struct ion_carveout_heap *carveout_heap =
77 container_of(heap, struct ion_carveout_heap, heap);
78
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070079 if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
80 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070081 gen_pool_free(carveout_heap->pool, addr, size);
Laura Abbott68c80642011-10-21 17:32:27 -070082 carveout_heap->allocated_bytes -= size;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070083}
84
85static int ion_carveout_heap_phys(struct ion_heap *heap,
86 struct ion_buffer *buffer,
87 ion_phys_addr_t *addr, size_t *len)
88{
89 *addr = buffer->priv_phys;
90 *len = buffer->size;
91 return 0;
92}
93
94static int ion_carveout_heap_allocate(struct ion_heap *heap,
95 struct ion_buffer *buffer,
96 unsigned long size, unsigned long align,
97 unsigned long flags)
98{
99 buffer->priv_phys = ion_carveout_allocate(heap, size, align);
100 return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
101}
102
103static void ion_carveout_heap_free(struct ion_buffer *buffer)
104{
105 struct ion_heap *heap = buffer->heap;
106
107 ion_carveout_free(heap, buffer->priv_phys, buffer->size);
108 buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
109}
110
111struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap,
112 struct ion_buffer *buffer)
113{
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600114 struct scatterlist *sglist;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600115
116 sglist = vmalloc(sizeof(struct scatterlist));
117 if (!sglist)
118 return ERR_PTR(-ENOMEM);
119
120 sg_init_table(sglist, 1);
Jeremy Gebbend01042d2012-03-23 10:25:23 -0600121 sglist->length = buffer->size;
122 sglist->offset = 0;
123 sglist->dma_address = buffer->priv_phys;
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600124
125 return sglist;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700126}
127
128void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
129 struct ion_buffer *buffer)
130{
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600131 if (buffer->sglist)
132 vfree(buffer->sglist);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700133}
134
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800135static int ion_carveout_request_region(struct ion_carveout_heap *carveout_heap)
136{
137 int ret_value = 0;
138 if (atomic_inc_return(&carveout_heap->map_count) == 1) {
139 if (carveout_heap->request_region) {
140 ret_value = carveout_heap->request_region(
141 carveout_heap->bus_id);
142 if (ret_value) {
143 pr_err("Unable to request SMI region");
144 atomic_dec(&carveout_heap->map_count);
145 }
146 }
147 }
148 return ret_value;
149}
150
151static int ion_carveout_release_region(struct ion_carveout_heap *carveout_heap)
152{
153 int ret_value = 0;
154 if (atomic_dec_and_test(&carveout_heap->map_count)) {
155 if (carveout_heap->release_region) {
156 ret_value = carveout_heap->release_region(
157 carveout_heap->bus_id);
158 if (ret_value)
159 pr_err("Unable to release SMI region");
160 }
161 }
162 return ret_value;
163}
164
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700165void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
Laura Abbott894fd582011-08-19 13:33:56 -0700166 struct ion_buffer *buffer,
167 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700168{
Alex Bird8a3ede32011-11-07 12:33:42 -0800169 struct ion_carveout_heap *carveout_heap =
170 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800171 void *ret_value;
Alex Bird8a3ede32011-11-07 12:33:42 -0800172
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800173 if (ion_carveout_request_region(carveout_heap))
174 return NULL;
175
Laura Abbott35412032011-09-29 09:50:06 -0700176 if (ION_IS_CACHED(flags))
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800177 ret_value = ioremap_cached(buffer->priv_phys, buffer->size);
Laura Abbotte8f28022011-09-28 14:40:40 -0700178 else
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800179 ret_value = ioremap(buffer->priv_phys, buffer->size);
180
181 if (!ret_value)
182 ion_carveout_release_region(carveout_heap);
183 return ret_value;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700184}
185
186void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
187 struct ion_buffer *buffer)
188{
Alex Bird8a3ede32011-11-07 12:33:42 -0800189 struct ion_carveout_heap *carveout_heap =
190 container_of(heap, struct ion_carveout_heap, heap);
191
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700192 __arch_iounmap(buffer->vaddr);
193 buffer->vaddr = NULL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800194
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800195 ion_carveout_release_region(carveout_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700196 return;
197}
198
199int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -0700200 struct vm_area_struct *vma, unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700201{
Alex Bird8a3ede32011-11-07 12:33:42 -0800202 struct ion_carveout_heap *carveout_heap =
203 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800204 int ret_value = 0;
Alex Bird8a3ede32011-11-07 12:33:42 -0800205
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800206 if (ion_carveout_request_region(carveout_heap))
207 return -EINVAL;
Alex Bird8a3ede32011-11-07 12:33:42 -0800208
Olav Haugande074a72012-02-22 15:39:54 -0800209 if (!ION_IS_CACHED(flags))
210 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
211
212 ret_value = remap_pfn_range(vma, vma->vm_start,
213 __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
214 vma->vm_end - vma->vm_start,
215 vma->vm_page_prot);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800216
217 if (ret_value)
218 ion_carveout_release_region(carveout_heap);
219 return ret_value;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700220}
221
Alex Bird8a3ede32011-11-07 12:33:42 -0800222void ion_carveout_heap_unmap_user(struct ion_heap *heap,
223 struct ion_buffer *buffer)
224{
225 struct ion_carveout_heap *carveout_heap =
226 container_of(heap, struct ion_carveout_heap, heap);
Olav Haugancb9ad9e2012-01-09 11:15:14 -0800227 ion_carveout_release_region(carveout_heap);
Alex Bird8a3ede32011-11-07 12:33:42 -0800228}
229
Laura Abbottabcb6f72011-10-04 16:26:49 -0700230int ion_carveout_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
231 void *vaddr, unsigned int offset, unsigned int length,
232 unsigned int cmd)
233{
Olav Haugan85c95402012-05-30 17:32:37 -0700234 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
235 struct ion_carveout_heap *carveout_heap =
236 container_of(heap, struct ion_carveout_heap, heap);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700237
238 switch (cmd) {
239 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700240 dmac_clean_range(vaddr, vaddr + length);
241 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700242 break;
243 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700244 dmac_inv_range(vaddr, vaddr + length);
245 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700246 break;
247 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700248 dmac_flush_range(vaddr, vaddr + length);
249 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700250 break;
251 default:
252 return -EINVAL;
253 }
254
Olav Haugan85c95402012-05-30 17:32:37 -0700255 if (carveout_heap->has_outer_cache) {
256 unsigned long pstart = buffer->priv_phys + offset;
257 outer_cache_op(pstart, pstart + length);
258 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700259 return 0;
260}
261
Olav Haugan0671b9a2012-05-25 11:58:56 -0700262static int ion_carveout_print_debug(struct ion_heap *heap, struct seq_file *s,
263 const struct rb_root *mem_map)
Laura Abbott68c80642011-10-21 17:32:27 -0700264{
265 struct ion_carveout_heap *carveout_heap =
266 container_of(heap, struct ion_carveout_heap, heap);
267
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800268 seq_printf(s, "total bytes currently allocated: %lx\n",
269 carveout_heap->allocated_bytes);
270 seq_printf(s, "total heap size: %lx\n", carveout_heap->total_size);
Laura Abbott68c80642011-10-21 17:32:27 -0700271
Olav Haugan0671b9a2012-05-25 11:58:56 -0700272 if (mem_map) {
273 unsigned long base = carveout_heap->base;
274 unsigned long size = carveout_heap->total_size;
275 unsigned long end = base+size;
276 unsigned long last_end = base;
277 struct rb_node *n;
278
279 seq_printf(s, "\nMemory Map\n");
280 seq_printf(s, "%16.s %14.s %14.s %14.s\n",
281 "client", "start address", "end address",
282 "size (hex)");
283
284 for (n = rb_first(mem_map); n; n = rb_next(n)) {
285 struct mem_map_data *data =
286 rb_entry(n, struct mem_map_data, node);
287 const char *client_name = "(null)";
288
289 if (last_end < data->addr) {
290 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
291 "FREE", last_end, data->addr-1,
292 data->addr-last_end,
293 data->addr-last_end);
294 }
295
296 if (data->client_name)
297 client_name = data->client_name;
298
299 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
300 client_name, data->addr,
301 data->addr_end,
302 data->size, data->size);
303 last_end = data->addr_end+1;
304 }
305 if (last_end < end) {
306 seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n", "FREE",
307 last_end, end-1, end-last_end, end-last_end);
308 }
309 }
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800310 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700311}
312
Laura Abbott8c017362011-09-22 20:59:12 -0700313int ion_carveout_heap_map_iommu(struct ion_buffer *buffer,
314 struct ion_iommu_map *data,
315 unsigned int domain_num,
316 unsigned int partition_num,
317 unsigned long align,
318 unsigned long iova_length,
319 unsigned long flags)
320{
Laura Abbott8c017362011-09-22 20:59:12 -0700321 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700322 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700323 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700324 struct scatterlist *sglist = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700325 int prot = IOMMU_WRITE | IOMMU_READ;
326 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700327
328 data->mapped_size = iova_length;
329
330 if (!msm_use_iommu()) {
331 data->iova_addr = buffer->priv_phys;
332 return 0;
333 }
334
335 extra = iova_length - buffer->size;
336
Laura Abbottd01221b2012-05-16 17:52:49 -0700337 ret = msm_allocate_iova_address(domain_num, partition_num,
338 data->mapped_size, align,
339 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700340
Laura Abbottd01221b2012-05-16 17:52:49 -0700341 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700342 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700343
344 domain = msm_get_iommu_domain(domain_num);
345
346 if (!domain) {
347 ret = -ENOMEM;
348 goto out1;
349 }
350
Olav Haugan16cdb412012-03-27 13:02:17 -0700351 sglist = vmalloc(sizeof(*sglist));
352 if (!sglist)
353 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700354
Olav Haugan16cdb412012-03-27 13:02:17 -0700355 sg_init_table(sglist, 1);
356 sglist->length = buffer->size;
357 sglist->offset = 0;
358 sglist->dma_address = buffer->priv_phys;
359
360 ret = iommu_map_range(domain, data->iova_addr, sglist,
361 buffer->size, prot);
362 if (ret) {
363 pr_err("%s: could not map %lx in domain %p\n",
364 __func__, data->iova_addr, domain);
365 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700366 }
367
Olav Haugan16cdb412012-03-27 13:02:17 -0700368 if (extra) {
369 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700370 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
371 SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700372 if (ret)
373 goto out2;
374 }
375 vfree(sglist);
376 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700377
378out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700379 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700380out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700381 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700382 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
383 data->mapped_size);
384
385out:
386
387 return ret;
388}
389
390void ion_carveout_heap_unmap_iommu(struct ion_iommu_map *data)
391{
Laura Abbott8c017362011-09-22 20:59:12 -0700392 unsigned int domain_num;
393 unsigned int partition_num;
394 struct iommu_domain *domain;
395
396 if (!msm_use_iommu())
397 return;
398
399 domain_num = iommu_map_domain(data);
400 partition_num = iommu_map_partition(data);
401
402 domain = msm_get_iommu_domain(domain_num);
403
404 if (!domain) {
405 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
406 return;
407 }
408
Olav Haugan16cdb412012-03-27 13:02:17 -0700409 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700410 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
411 data->mapped_size);
412
413 return;
414}
415
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700416static struct ion_heap_ops carveout_heap_ops = {
417 .allocate = ion_carveout_heap_allocate,
418 .free = ion_carveout_heap_free,
419 .phys = ion_carveout_heap_phys,
420 .map_user = ion_carveout_heap_map_user,
421 .map_kernel = ion_carveout_heap_map_kernel,
Alex Bird8a3ede32011-11-07 12:33:42 -0800422 .unmap_user = ion_carveout_heap_unmap_user,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700423 .unmap_kernel = ion_carveout_heap_unmap_kernel,
Jordan Crouse0a1195e2011-10-20 14:03:31 -0600424 .map_dma = ion_carveout_heap_map_dma,
425 .unmap_dma = ion_carveout_heap_unmap_dma,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700426 .cache_op = ion_carveout_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800427 .print_debug = ion_carveout_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700428 .map_iommu = ion_carveout_heap_map_iommu,
429 .unmap_iommu = ion_carveout_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700430};
431
432struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
433{
434 struct ion_carveout_heap *carveout_heap;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700435 int ret;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700436
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700437 carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
438 if (!carveout_heap)
439 return ERR_PTR(-ENOMEM);
440
441 carveout_heap->pool = gen_pool_create(12, -1);
Choi, Jong-Hwan5a3e1472011-07-07 09:06:17 +0900442 if (!carveout_heap->pool) {
443 kfree(carveout_heap);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700444 return ERR_PTR(-ENOMEM);
Choi, Jong-Hwan5a3e1472011-07-07 09:06:17 +0900445 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700446 carveout_heap->base = heap_data->base;
Laura Abbottb1b7b432011-08-03 13:25:08 -0700447 ret = gen_pool_add(carveout_heap->pool, carveout_heap->base,
448 heap_data->size, -1);
449 if (ret < 0) {
Olav Haugan7fba5cf2012-01-06 10:05:31 -0800450 gen_pool_destroy(carveout_heap->pool);
Laura Abbottb1b7b432011-08-03 13:25:08 -0700451 kfree(carveout_heap);
452 return ERR_PTR(-EINVAL);
453 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700454 carveout_heap->heap.ops = &carveout_heap_ops;
455 carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
Laura Abbott68c80642011-10-21 17:32:27 -0700456 carveout_heap->allocated_bytes = 0;
457 carveout_heap->total_size = heap_data->size;
Olav Haugan85c95402012-05-30 17:32:37 -0700458 carveout_heap->has_outer_cache = heap_data->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700459
Olav Haugan0703dbf2011-12-19 17:53:38 -0800460 if (heap_data->extra_data) {
461 struct ion_co_heap_pdata *extra_data =
462 heap_data->extra_data;
463
464 if (extra_data->setup_region)
465 carveout_heap->bus_id = extra_data->setup_region();
466 if (extra_data->request_region)
467 carveout_heap->request_region =
468 extra_data->request_region;
469 if (extra_data->release_region)
470 carveout_heap->release_region =
471 extra_data->release_region;
472 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700473 return &carveout_heap->heap;
474}
475
476void ion_carveout_heap_destroy(struct ion_heap *heap)
477{
478 struct ion_carveout_heap *carveout_heap =
479 container_of(heap, struct ion_carveout_heap, heap);
480
481 gen_pool_destroy(carveout_heap->pool);
482 kfree(carveout_heap);
483 carveout_heap = NULL;
484}