blob: 512ebf3de3c3de5f0f0c7d83b70d93ac19e2d836 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Mitchel Humpherysd1a69032013-01-31 10:30:40 -08002 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
Mitchel Humpherysb358e072013-02-01 18:30:14 -080017#include <linux/highmem.h>
Laura Abbott8c017362011-09-22 20:59:12 -070018#include <linux/scatterlist.h>
19#include <linux/slab.h>
20#include <linux/vmalloc.h>
21#include <linux/iommu.h>
22#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070023#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include "ion_priv.h"
25
26#include <asm/mach/map.h>
27#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070028#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
30
31struct ion_iommu_heap {
32 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070033 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070034};
35
Mitchel Humpherysb358e072013-02-01 18:30:14 -080036/*
37 * We will attempt to allocate high-order pages and store those in an
38 * sg_list. However, some APIs expect an array of struct page * where
39 * each page is of size PAGE_SIZE. We use this extra structure to
40 * carry around an array of such pages (derived from the high-order
41 * pages with nth_page).
42 */
Laura Abbott8c017362011-09-22 20:59:12 -070043struct ion_iommu_priv_data {
44 struct page **pages;
45 int nrpages;
46 unsigned long size;
47};
48
Mitchel Humpherysd1a69032013-01-31 10:30:40 -080049#define MAX_VMAP_RETRIES 10
50
Mitchel Humpherysb358e072013-02-01 18:30:14 -080051static const unsigned int orders[] = {8, 4, 0};
52static const int num_orders = ARRAY_SIZE(orders);
53
54struct page_info {
55 struct page *page;
56 unsigned int order;
57 struct list_head list;
58};
59
60static unsigned int order_to_size(int order)
61{
62 return PAGE_SIZE << order;
63}
64
65static struct page_info *alloc_largest_available(unsigned long size,
66 unsigned int max_order)
67{
68 struct page *page;
69 struct page_info *info;
70 int i;
71
72 for (i = 0; i < num_orders; i++) {
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080073 gfp_t gfp;
Mitchel Humpherysb358e072013-02-01 18:30:14 -080074 if (size < order_to_size(orders[i]))
75 continue;
76 if (max_order < orders[i])
77 continue;
78
Olav Haugan041d4b52013-04-02 14:03:34 -070079 gfp = __GFP_HIGHMEM;
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080080
Olav Haugan041d4b52013-04-02 14:03:34 -070081 if (orders[i]) {
82 gfp |= __GFP_COMP | __GFP_NORETRY |
83 __GFP_NO_KSWAPD | __GFP_NOWARN;
84 } else {
85 gfp |= GFP_KERNEL;
86 }
Mitchel Humpherysff6ea622013-02-20 13:55:46 -080087 page = alloc_pages(gfp, orders[i]);
Mitchel Humpherysb358e072013-02-01 18:30:14 -080088 if (!page)
89 continue;
90
91 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
92 info->page = page;
93 info->order = orders[i];
94 return info;
95 }
96 return NULL;
97}
98
Laura Abbott8c017362011-09-22 20:59:12 -070099static int ion_iommu_heap_allocate(struct ion_heap *heap,
100 struct ion_buffer *buffer,
101 unsigned long size, unsigned long align,
102 unsigned long flags)
103{
104 int ret, i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800105 struct list_head pages_list;
106 struct page_info *info, *tmp_info;
Laura Abbott8c017362011-09-22 20:59:12 -0700107 struct ion_iommu_priv_data *data = NULL;
108
109 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800110 struct scatterlist *sg;
111 struct sg_table *table;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800112 int j;
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800113 void *ptr = NULL;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800114 unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
115 long size_remaining = PAGE_ALIGN(size);
Mitchel Humpherys1010c7f2013-02-22 17:36:16 -0800116 unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
Laura Abbottb14ed962012-01-30 14:18:08 -0800117
Laura Abbott8c017362011-09-22 20:59:12 -0700118 data = kmalloc(sizeof(*data), GFP_KERNEL);
119 if (!data)
120 return -ENOMEM;
121
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800122 INIT_LIST_HEAD(&pages_list);
123 while (size_remaining > 0) {
124 info = alloc_largest_available(size_remaining,
125 max_order);
126 if (!info) {
127 ret = -ENOMEM;
128 goto err_free_data;
129 }
130 list_add_tail(&info->list, &pages_list);
131 size_remaining -= order_to_size(info->order);
132 max_order = info->order;
133 num_large_pages++;
134 }
135
Laura Abbott8c017362011-09-22 20:59:12 -0700136 data->size = PFN_ALIGN(size);
137 data->nrpages = data->size >> PAGE_SHIFT;
138 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
139 GFP_KERNEL);
140 if (!data->pages) {
141 ret = -ENOMEM;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800142 goto err_free_data;
Laura Abbott8c017362011-09-22 20:59:12 -0700143 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800144
145 table = buffer->sg_table =
146 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
147
148 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -0700149 ret = -ENOMEM;
150 goto err1;
151 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800152 ret = sg_alloc_table(table, num_large_pages, GFP_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800153 if (ret)
154 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -0700155
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800156 i = 0;
157 sg = table->sgl;
158 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
159 struct page *page = info->page;
160 sg_set_page(sg, page, order_to_size(info->order), 0);
Laura Abbott60c92c72012-10-10 13:12:52 -0700161 sg_dma_address(sg) = sg_phys(sg);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800162 sg = sg_next(sg);
163 for (j = 0; j < (1 << info->order); ++j)
164 data->pages[i++] = nth_page(page, j);
165 list_del(&info->list);
166 kfree(info);
Laura Abbott8c017362011-09-22 20:59:12 -0700167 }
168
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800169 /*
170 * As an optimization, we omit __GFP_ZERO from
171 * alloc_page above and manually zero out all of the
172 * pages in one fell swoop here. To safeguard against
173 * insufficient vmalloc space, we only vmap
174 * `npages_to_vmap' at a time, starting with a
175 * conservative estimate of 1/8 of the total number of
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800176 * vmalloc pages available. Note that the `pages'
177 * array is composed of all 4K pages, irrespective of
178 * the size of the pages on the sg list.
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800179 */
180 npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8)
181 >> PAGE_SHIFT;
182 total_pages = data->nrpages;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800183 for (i = 0; i < total_pages; i += npages_to_vmap) {
184 npages_to_vmap = min(npages_to_vmap, total_pages - i);
185 for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap;
186 ++j) {
187 ptr = vmap(&data->pages[i], npages_to_vmap,
Mitchel Humpherysd1a69032013-01-31 10:30:40 -0800188 VM_IOREMAP, pgprot_kernel);
189 if (ptr)
190 break;
191 else
192 npages_to_vmap >>= 1;
193 }
194 if (!ptr) {
195 pr_err("Couldn't vmap the pages for zeroing\n");
196 ret = -ENOMEM;
197 goto err3;
198 }
199 memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
200 vunmap(ptr);
201 }
202
Laura Abbott60c92c72012-10-10 13:12:52 -0700203 if (!ION_IS_CACHED(flags))
204 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
205 DMA_BIDIRECTIONAL);
206
Laura Abbott8c017362011-09-22 20:59:12 -0700207 buffer->priv_virt = data;
208 return 0;
209
210 } else {
211 return -ENOMEM;
212 }
213
214
Laura Abbottb14ed962012-01-30 14:18:08 -0800215err3:
216 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700217err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800218 kfree(buffer->sg_table);
219 buffer->sg_table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700220err1:
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800221 kfree(data->pages);
222err_free_data:
Laura Abbott8c017362011-09-22 20:59:12 -0700223 kfree(data);
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800224
225 list_for_each_entry_safe(info, tmp_info, &pages_list, list) {
226 if (info->page)
227 __free_pages(info->page, info->order);
228 list_del(&info->list);
229 kfree(info);
230 }
Laura Abbott8c017362011-09-22 20:59:12 -0700231 return ret;
232}
233
234static void ion_iommu_heap_free(struct ion_buffer *buffer)
235{
Laura Abbott8c017362011-09-22 20:59:12 -0700236 int i;
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800237 struct scatterlist *sg;
238 struct sg_table *table = buffer->sg_table;
239 struct ion_iommu_priv_data *data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700240
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800241 if (!table)
242 return;
Laura Abbott8c017362011-09-22 20:59:12 -0700243 if (!data)
244 return;
245
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800246 for_each_sg(table->sgl, sg, table->nents, i)
247 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbott8c017362011-09-22 20:59:12 -0700248
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800249 sg_free_table(table);
250 kfree(table);
251 table = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700252 kfree(data->pages);
253 kfree(data);
254}
255
256void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800257 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700258{
259 struct ion_iommu_priv_data *data = buffer->priv_virt;
260 pgprot_t page_prot = PAGE_KERNEL;
261
262 if (!data)
263 return NULL;
264
Laura Abbottb14ed962012-01-30 14:18:08 -0800265 if (!ION_IS_CACHED(buffer->flags))
Olav Haugan08f911b2012-12-13 09:51:59 -0800266 page_prot = pgprot_writecombine(page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700267
268 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
269
270 return buffer->vaddr;
271}
272
273void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
274 struct ion_buffer *buffer)
275{
276 if (!buffer->vaddr)
277 return;
278
279 vunmap(buffer->vaddr);
280 buffer->vaddr = NULL;
281}
282
283int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800284 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700285{
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800286 struct sg_table *table = buffer->sg_table;
287 unsigned long addr = vma->vm_start;
288 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
289 struct scatterlist *sg;
Laura Abbott8c017362011-09-22 20:59:12 -0700290 int i;
Laura Abbott8c017362011-09-22 20:59:12 -0700291
Laura Abbottb14ed962012-01-30 14:18:08 -0800292 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800293 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700294
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800295 for_each_sg(table->sgl, sg, table->nents, i) {
296 struct page *page = sg_page(sg);
297 unsigned long remainder = vma->vm_end - addr;
298 unsigned long len = sg_dma_len(sg);
299
300 if (offset >= sg_dma_len(sg)) {
301 offset -= sg_dma_len(sg);
302 continue;
303 } else if (offset) {
304 page += offset / PAGE_SIZE;
305 len = sg_dma_len(sg) - offset;
306 offset = 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800307 }
Mitchel Humpherysb358e072013-02-01 18:30:14 -0800308 len = min(len, remainder);
309 remap_pfn_range(vma, addr, page_to_pfn(page), len,
310 vma->vm_page_prot);
311 addr += len;
312 if (addr >= vma->vm_end)
313 return 0;
Olav Haugandbec7db2012-02-25 10:32:41 -0800314 }
Laura Abbott8c017362011-09-22 20:59:12 -0700315 return 0;
316}
317
318int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
319 struct ion_iommu_map *data,
320 unsigned int domain_num,
321 unsigned int partition_num,
322 unsigned long align,
323 unsigned long iova_length,
324 unsigned long flags)
325{
Laura Abbott8c017362011-09-22 20:59:12 -0700326 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700327 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700328 unsigned long extra;
Olav Hauganf310cf22012-05-08 08:42:49 -0700329 int prot = IOMMU_WRITE | IOMMU_READ;
330 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700331
332 BUG_ON(!msm_use_iommu());
333
334 data->mapped_size = iova_length;
335 extra = iova_length - buffer->size;
336
Olav Haugan7237a072013-03-19 17:37:50 -0700337 /* Use the biggest alignment to allow bigger IOMMU mappings.
338 * Use the first entry since the first entry will always be the
339 * biggest entry. To take advantage of bigger mapping sizes both the
340 * VA and PA addresses have to be aligned to the biggest size.
341 */
342 if (buffer->sg_table->sgl->length > align)
343 align = buffer->sg_table->sgl->length;
344
Laura Abbottd01221b2012-05-16 17:52:49 -0700345 ret = msm_allocate_iova_address(domain_num, partition_num,
346 data->mapped_size, align,
347 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700348
Laura Abbotte27cdcd2012-06-21 07:58:41 -0700349 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700350 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700351
352 domain = msm_get_iommu_domain(domain_num);
353
354 if (!domain) {
355 ret = -ENOMEM;
356 goto out1;
357 }
358
Olav Haugan16cdb412012-03-27 13:02:17 -0700359 ret = iommu_map_range(domain, data->iova_addr,
Laura Abbottb14ed962012-01-30 14:18:08 -0800360 buffer->sg_table->sgl,
361 buffer->size, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700362 if (ret) {
363 pr_err("%s: could not map %lx in domain %p\n",
364 __func__, data->iova_addr, domain);
365 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700366 }
367
Olav Haugan16cdb412012-03-27 13:02:17 -0700368 if (extra) {
369 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800370 unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
371 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
372 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700373 if (ret)
374 goto out2;
375 }
376 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700377
378out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700379 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700380out1:
381 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
382 buffer->size);
383
384out:
385
386 return ret;
387}
388
389void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
390{
Laura Abbott8c017362011-09-22 20:59:12 -0700391 unsigned int domain_num;
392 unsigned int partition_num;
393 struct iommu_domain *domain;
394
395 BUG_ON(!msm_use_iommu());
396
397 domain_num = iommu_map_domain(data);
398 partition_num = iommu_map_partition(data);
399
400 domain = msm_get_iommu_domain(domain_num);
401
402 if (!domain) {
403 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
404 return;
405 }
406
Olav Haugan16cdb412012-03-27 13:02:17 -0700407 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700408 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
409 data->mapped_size);
410
411 return;
412}
413
Olav Hauganef010712012-03-05 14:19:46 -0800414static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
415 void *vaddr, unsigned int offset, unsigned int length,
416 unsigned int cmd)
417{
Olav Haugan85c95402012-05-30 17:32:37 -0700418 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
419 struct ion_iommu_heap *iommu_heap =
420 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800421
422 switch (cmd) {
423 case ION_IOC_CLEAN_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800424 if (!vaddr)
425 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
426 buffer->sg_table->nents, DMA_TO_DEVICE);
427 else
428 dmac_clean_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700429 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800430 break;
431 case ION_IOC_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800432 if (!vaddr)
433 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
434 buffer->sg_table->nents, DMA_FROM_DEVICE);
435 else
436 dmac_inv_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700437 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800438 break;
439 case ION_IOC_CLEAN_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800440 if (!vaddr) {
441 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
442 buffer->sg_table->nents, DMA_TO_DEVICE);
443 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
444 buffer->sg_table->nents, DMA_FROM_DEVICE);
445 } else {
446 dmac_flush_range(vaddr, vaddr + length);
447 }
Olav Haugan85c95402012-05-30 17:32:37 -0700448 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800449 break;
450 default:
451 return -EINVAL;
452 }
453
Olav Haugan85c95402012-05-30 17:32:37 -0700454 if (iommu_heap->has_outer_cache) {
455 unsigned long pstart;
456 unsigned int i;
457 struct ion_iommu_priv_data *data = buffer->priv_virt;
458 if (!data)
459 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800460
Olav Haugan85c95402012-05-30 17:32:37 -0700461 for (i = 0; i < data->nrpages; ++i) {
462 pstart = page_to_phys(data->pages[i]);
463 outer_cache_op(pstart, pstart + PAGE_SIZE);
464 }
465 }
Olav Hauganef010712012-03-05 14:19:46 -0800466 return 0;
467}
Laura Abbott8c017362011-09-22 20:59:12 -0700468
Laura Abbottb14ed962012-01-30 14:18:08 -0800469static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800470 struct ion_buffer *buffer)
471{
Laura Abbottb14ed962012-01-30 14:18:08 -0800472 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800473}
474
475static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
476 struct ion_buffer *buffer)
477{
Olav Hauganab804b82012-03-05 14:41:16 -0800478}
479
Laura Abbott8c017362011-09-22 20:59:12 -0700480static struct ion_heap_ops iommu_heap_ops = {
481 .allocate = ion_iommu_heap_allocate,
482 .free = ion_iommu_heap_free,
483 .map_user = ion_iommu_heap_map_user,
484 .map_kernel = ion_iommu_heap_map_kernel,
485 .unmap_kernel = ion_iommu_heap_unmap_kernel,
486 .map_iommu = ion_iommu_heap_map_iommu,
487 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800488 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800489 .map_dma = ion_iommu_heap_map_dma,
490 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700491};
492
493struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
494{
495 struct ion_iommu_heap *iommu_heap;
496
497 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
498 if (!iommu_heap)
499 return ERR_PTR(-ENOMEM);
500
501 iommu_heap->heap.ops = &iommu_heap_ops;
502 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700503 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700504
505 return &iommu_heap->heap;
506}
507
508void ion_iommu_heap_destroy(struct ion_heap *heap)
509{
510 struct ion_iommu_heap *iommu_heap =
511 container_of(heap, struct ion_iommu_heap, heap);
512
513 kfree(iommu_heap);
514 iommu_heap = NULL;
515}