blob: f33fc18238520fade7e19409f1189548de825b21 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/err.h>
19#include <linux/ion.h>
20#include <linux/mm.h>
21#include <linux/scatterlist.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080025#include <linux/seq_file.h>
Laura Abbott8c017362011-09-22 20:59:12 -070026#include <mach/iommu_domains.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070027#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070028#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070029#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070030#include <linux/msm_ion.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070031
Laura Abbott68c80642011-10-21 17:32:27 -070032static atomic_t system_heap_allocated;
33static atomic_t system_contig_heap_allocated;
Olav Haugan85c95402012-05-30 17:32:37 -070034static unsigned int system_heap_has_outer_cache;
35static unsigned int system_heap_contig_has_outer_cache;
Laura Abbott68c80642011-10-21 17:32:27 -070036
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070037static int ion_system_heap_allocate(struct ion_heap *heap,
38 struct ion_buffer *buffer,
39 unsigned long size, unsigned long align,
40 unsigned long flags)
41{
Laura Abbottb14ed962012-01-30 14:18:08 -080042 struct sg_table *table;
43 struct scatterlist *sg;
44 int i, j;
45 int npages = PAGE_ALIGN(size) / PAGE_SIZE;
Laura Abbott68c80642011-10-21 17:32:27 -070046
Laura Abbottb14ed962012-01-30 14:18:08 -080047 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
48 if (!table)
49 return -ENOMEM;
50 i = sg_alloc_table(table, npages, GFP_KERNEL);
51 if (i)
52 goto err0;
53 for_each_sg(table->sgl, sg, table->nents, i) {
54 struct page *page;
55 page = alloc_page(GFP_KERNEL|__GFP_ZERO);
56 if (!page)
57 goto err1;
58 sg_set_page(sg, page, PAGE_SIZE, 0);
59 }
60 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -070061 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070062 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -080063err1:
64 for_each_sg(table->sgl, sg, i, j)
65 __free_page(sg_page(sg));
66 sg_free_table(table);
67err0:
68 kfree(table);
69 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070070}
71
72void ion_system_heap_free(struct ion_buffer *buffer)
73{
Laura Abbottb14ed962012-01-30 14:18:08 -080074 int i;
75 struct scatterlist *sg;
76 struct sg_table *table = buffer->priv_virt;
77
78 for_each_sg(table->sgl, sg, table->nents, i)
79 __free_page(sg_page(sg));
80 if (buffer->sg_table)
81 sg_free_table(buffer->sg_table);
82 kfree(buffer->sg_table);
Laura Abbott68c80642011-10-21 17:32:27 -070083 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070084}
85
Laura Abbottb14ed962012-01-30 14:18:08 -080086struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
87 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070088{
Laura Abbottb14ed962012-01-30 14:18:08 -080089 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070090}
91
92void ion_system_heap_unmap_dma(struct ion_heap *heap,
93 struct ion_buffer *buffer)
94{
Laura Abbottb14ed962012-01-30 14:18:08 -080095 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070096}
97
98void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -080099 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700100{
Laura Abbottb14ed962012-01-30 14:18:08 -0800101 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700102 pr_err("%s: cannot map system heap uncached\n", __func__);
103 return ERR_PTR(-EINVAL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800104 } else {
105 struct scatterlist *sg;
106 int i;
107 void *vaddr;
108 struct sg_table *table = buffer->priv_virt;
109 struct page **pages = kmalloc(
110 sizeof(struct page *) * table->nents,
111 GFP_KERNEL);
112
113 for_each_sg(table->sgl, sg, table->nents, i)
114 pages[i] = sg_page(sg);
115 vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
116 kfree(pages);
117
118 return vaddr;
Laura Abbott894fd582011-08-19 13:33:56 -0700119 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700120}
121
122void ion_system_heap_unmap_kernel(struct ion_heap *heap,
123 struct ion_buffer *buffer)
124{
Laura Abbottb14ed962012-01-30 14:18:08 -0800125 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700126}
127
Laura Abbott8c017362011-09-22 20:59:12 -0700128void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
129{
Laura Abbott8c017362011-09-22 20:59:12 -0700130 unsigned int domain_num;
131 unsigned int partition_num;
132 struct iommu_domain *domain;
133
134 if (!msm_use_iommu())
135 return;
136
137 domain_num = iommu_map_domain(data);
138 partition_num = iommu_map_partition(data);
139
140 domain = msm_get_iommu_domain(domain_num);
141
142 if (!domain) {
143 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
144 return;
145 }
146
Olav Haugan16cdb412012-03-27 13:02:17 -0700147 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700148 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
149 data->mapped_size);
150
151 return;
152}
153
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700154int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800155 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700156{
Laura Abbottb14ed962012-01-30 14:18:08 -0800157 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700158 pr_err("%s: cannot map system heap uncached\n", __func__);
159 return -EINVAL;
Laura Abbottb14ed962012-01-30 14:18:08 -0800160 } else {
161 struct sg_table *table = buffer->priv_virt;
162 unsigned long addr = vma->vm_start;
163 unsigned long offset = vma->vm_pgoff;
164 struct scatterlist *sg;
165 int i;
166
167 for_each_sg(table->sgl, sg, table->nents, i) {
168 if (offset) {
169 offset--;
170 continue;
171 }
172 vm_insert_page(vma, addr, sg_page(sg));
173 addr += PAGE_SIZE;
174 }
175 return 0;
Laura Abbott894fd582011-08-19 13:33:56 -0700176 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700177}
178
Laura Abbottabcb6f72011-10-04 16:26:49 -0700179int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
180 void *vaddr, unsigned int offset, unsigned int length,
181 unsigned int cmd)
182{
Olav Haugan85c95402012-05-30 17:32:37 -0700183 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700184
185 switch (cmd) {
186 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700187 dmac_clean_range(vaddr, vaddr + length);
188 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700189 break;
190 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700191 dmac_inv_range(vaddr, vaddr + length);
192 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700193 break;
194 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700195 dmac_flush_range(vaddr, vaddr + length);
196 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700197 break;
198 default:
199 return -EINVAL;
200 }
201
Olav Haugan85c95402012-05-30 17:32:37 -0700202 if (system_heap_has_outer_cache) {
203 unsigned long pstart;
Laura Abbottb14ed962012-01-30 14:18:08 -0800204 struct sg_table *table = buffer->priv_virt;
205 struct scatterlist *sg;
206 int i;
207 for_each_sg(table->sgl, sg, table->nents, i) {
208 struct page *page = sg_page(sg);
Olav Haugan85c95402012-05-30 17:32:37 -0700209 pstart = page_to_phys(page);
210 /*
211 * If page -> phys is returning NULL, something
212 * has really gone wrong...
213 */
214 if (!pstart) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800215 WARN(1, "Could not translate virtual address to physical address\n");
Olav Haugan85c95402012-05-30 17:32:37 -0700216 return -EINVAL;
217 }
Olav Haugan85c95402012-05-30 17:32:37 -0700218 outer_cache_op(pstart, pstart + PAGE_SIZE);
219 }
220 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700221 return 0;
222}
223
Olav Haugan0671b9a2012-05-25 11:58:56 -0700224static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
225 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700226{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800227 seq_printf(s, "total bytes currently allocated: %lx\n",
228 (unsigned long) atomic_read(&system_heap_allocated));
229
230 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700231}
232
Laura Abbott8c017362011-09-22 20:59:12 -0700233int ion_system_heap_map_iommu(struct ion_buffer *buffer,
234 struct ion_iommu_map *data,
235 unsigned int domain_num,
236 unsigned int partition_num,
237 unsigned long align,
238 unsigned long iova_length,
239 unsigned long flags)
240{
Laura Abbottb14ed962012-01-30 14:18:08 -0800241 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700242 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700243 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700244 unsigned long extra_iova_addr;
Laura Abbottb14ed962012-01-30 14:18:08 -0800245 struct sg_table *table = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700246 int prot = IOMMU_WRITE | IOMMU_READ;
247 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700248
249 if (!ION_IS_CACHED(flags))
250 return -EINVAL;
251
252 if (!msm_use_iommu())
253 return -EINVAL;
254
255 data->mapped_size = iova_length;
256 extra = iova_length - buffer->size;
257
Laura Abbottd01221b2012-05-16 17:52:49 -0700258 ret = msm_allocate_iova_address(domain_num, partition_num,
259 data->mapped_size, align,
260 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700261
Laura Abbottd01221b2012-05-16 17:52:49 -0700262 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700263 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700264
265 domain = msm_get_iommu_domain(domain_num);
266
267 if (!domain) {
268 ret = -ENOMEM;
269 goto out1;
270 }
271
Laura Abbottb14ed962012-01-30 14:18:08 -0800272 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -0700273 buffer->size, prot);
274
275 if (ret) {
276 pr_err("%s: could not map %lx in domain %p\n",
277 __func__, data->iova_addr, domain);
278 goto out1;
279 }
280
281 extra_iova_addr = data->iova_addr + buffer->size;
282 if (extra) {
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800283 unsigned long phys_addr = sg_phys(table->sgl);
284 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
285 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700286 if (ret)
287 goto out2;
288 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700289 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700290
291out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700292 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700293out1:
294 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
Olav Haugan16cdb412012-03-27 13:02:17 -0700295 data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700296out:
297 return ret;
298}
299
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700300static struct ion_heap_ops vmalloc_ops = {
301 .allocate = ion_system_heap_allocate,
302 .free = ion_system_heap_free,
303 .map_dma = ion_system_heap_map_dma,
304 .unmap_dma = ion_system_heap_unmap_dma,
305 .map_kernel = ion_system_heap_map_kernel,
306 .unmap_kernel = ion_system_heap_unmap_kernel,
307 .map_user = ion_system_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700308 .cache_op = ion_system_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800309 .print_debug = ion_system_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700310 .map_iommu = ion_system_heap_map_iommu,
311 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700312};
313
Olav Haugan85c95402012-05-30 17:32:37 -0700314struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700315{
316 struct ion_heap *heap;
317
318 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
319 if (!heap)
320 return ERR_PTR(-ENOMEM);
321 heap->ops = &vmalloc_ops;
322 heap->type = ION_HEAP_TYPE_SYSTEM;
Olav Haugan85c95402012-05-30 17:32:37 -0700323 system_heap_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700324 return heap;
325}
326
327void ion_system_heap_destroy(struct ion_heap *heap)
328{
329 kfree(heap);
330}
331
332static int ion_system_contig_heap_allocate(struct ion_heap *heap,
333 struct ion_buffer *buffer,
334 unsigned long len,
335 unsigned long align,
336 unsigned long flags)
337{
338 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
339 if (!buffer->priv_virt)
340 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700341 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700342 return 0;
343}
344
345void ion_system_contig_heap_free(struct ion_buffer *buffer)
346{
347 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700348 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700349}
350
351static int ion_system_contig_heap_phys(struct ion_heap *heap,
352 struct ion_buffer *buffer,
353 ion_phys_addr_t *addr, size_t *len)
354{
355 *addr = virt_to_phys(buffer->priv_virt);
356 *len = buffer->size;
357 return 0;
358}
359
Laura Abbottb14ed962012-01-30 14:18:08 -0800360struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700361 struct ion_buffer *buffer)
362{
Laura Abbottb14ed962012-01-30 14:18:08 -0800363 struct sg_table *table;
364 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700365
Laura Abbottb14ed962012-01-30 14:18:08 -0800366 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
367 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700368 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800369 ret = sg_alloc_table(table, 1, GFP_KERNEL);
370 if (ret) {
371 kfree(table);
372 return ERR_PTR(ret);
373 }
374 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
375 0);
376 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700377}
378
379int ion_system_contig_heap_map_user(struct ion_heap *heap,
380 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800381 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700382{
383 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700384
Laura Abbottb14ed962012-01-30 14:18:08 -0800385 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700386 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700387 vma->vm_end - vma->vm_start,
388 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700389 else {
390 pr_err("%s: cannot map system heap uncached\n", __func__);
391 return -EINVAL;
392 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700393}
394
Laura Abbottabcb6f72011-10-04 16:26:49 -0700395int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
396 struct ion_buffer *buffer, void *vaddr,
397 unsigned int offset, unsigned int length,
398 unsigned int cmd)
399{
Olav Haugan85c95402012-05-30 17:32:37 -0700400 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700401
402 switch (cmd) {
403 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700404 dmac_clean_range(vaddr, vaddr + length);
405 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700406 break;
407 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700408 dmac_inv_range(vaddr, vaddr + length);
409 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700410 break;
411 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700412 dmac_flush_range(vaddr, vaddr + length);
413 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700414 break;
415 default:
416 return -EINVAL;
417 }
418
Olav Haugan85c95402012-05-30 17:32:37 -0700419 if (system_heap_contig_has_outer_cache) {
420 unsigned long pstart;
421
422 pstart = virt_to_phys(buffer->priv_virt) + offset;
423 if (!pstart) {
424 WARN(1, "Could not do virt to phys translation on %p\n",
425 buffer->priv_virt);
426 return -EINVAL;
427 }
428
429 outer_cache_op(pstart, pstart + PAGE_SIZE);
430 }
431
Laura Abbottabcb6f72011-10-04 16:26:49 -0700432 return 0;
433}
434
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800435static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700436 struct seq_file *s,
437 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700438{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800439 seq_printf(s, "total bytes currently allocated: %lx\n",
440 (unsigned long) atomic_read(&system_contig_heap_allocated));
441
442 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700443}
444
Laura Abbott8c017362011-09-22 20:59:12 -0700445int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
446 struct ion_iommu_map *data,
447 unsigned int domain_num,
448 unsigned int partition_num,
449 unsigned long align,
450 unsigned long iova_length,
451 unsigned long flags)
452{
Olav Haugan16cdb412012-03-27 13:02:17 -0700453 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700454 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700455 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700456 struct scatterlist *sglist = 0;
457 struct page *page = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700458 int prot = IOMMU_WRITE | IOMMU_READ;
459 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700460
461 if (!ION_IS_CACHED(flags))
462 return -EINVAL;
463
464 if (!msm_use_iommu()) {
465 data->iova_addr = virt_to_phys(buffer->vaddr);
466 return 0;
467 }
468
469 data->mapped_size = iova_length;
470 extra = iova_length - buffer->size;
471
Laura Abbottd01221b2012-05-16 17:52:49 -0700472 ret = msm_allocate_iova_address(domain_num, partition_num,
473 data->mapped_size, align,
474 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700475
Laura Abbottd01221b2012-05-16 17:52:49 -0700476 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700477 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700478
479 domain = msm_get_iommu_domain(domain_num);
480
481 if (!domain) {
482 ret = -ENOMEM;
483 goto out1;
484 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700485 page = virt_to_page(buffer->vaddr);
Laura Abbott8c017362011-09-22 20:59:12 -0700486
Olav Haugan16cdb412012-03-27 13:02:17 -0700487 sglist = vmalloc(sizeof(*sglist));
488 if (!sglist)
489 goto out1;
490
491 sg_init_table(sglist, 1);
492 sg_set_page(sglist, page, buffer->size, 0);
493
494 ret = iommu_map_range(domain, data->iova_addr, sglist,
495 buffer->size, prot);
496 if (ret) {
497 pr_err("%s: could not map %lx in domain %p\n",
498 __func__, data->iova_addr, domain);
499 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700500 }
501
Olav Haugan16cdb412012-03-27 13:02:17 -0700502 if (extra) {
503 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800504 unsigned long phys_addr = sg_phys(sglist);
505 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
506 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700507 if (ret)
508 goto out2;
509 }
510 vfree(sglist);
511 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700512out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700513 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700514
515out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700516 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700517 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
518 data->mapped_size);
519out:
520 return ret;
521}
522
Rohit Vaswani35edc882012-11-20 10:20:47 -0800523void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
524 struct ion_buffer *buffer)
525{
526 return buffer->priv_virt;
527}
528
529void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
530 struct ion_buffer *buffer)
531{
532 return;
533}
534
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700535static struct ion_heap_ops kmalloc_ops = {
536 .allocate = ion_system_contig_heap_allocate,
537 .free = ion_system_contig_heap_free,
538 .phys = ion_system_contig_heap_phys,
539 .map_dma = ion_system_contig_heap_map_dma,
540 .unmap_dma = ion_system_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800541 .map_kernel = ion_system_contig_heap_map_kernel,
542 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700543 .map_user = ion_system_contig_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700544 .cache_op = ion_system_contig_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800545 .print_debug = ion_system_contig_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700546 .map_iommu = ion_system_contig_heap_map_iommu,
547 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700548};
549
Olav Haugan85c95402012-05-30 17:32:37 -0700550struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700551{
552 struct ion_heap *heap;
553
554 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
555 if (!heap)
556 return ERR_PTR(-ENOMEM);
557 heap->ops = &kmalloc_ops;
558 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Olav Haugan85c95402012-05-30 17:32:37 -0700559 system_heap_contig_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700560 return heap;
561}
562
563void ion_system_contig_heap_destroy(struct ion_heap *heap)
564{
565 kfree(heap);
566}
567