blob: d35796c80c90d2bbb0297615473aa93c0589d9c2 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/err.h>
19#include <linux/ion.h>
20#include <linux/mm.h>
21#include <linux/scatterlist.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080025#include <linux/seq_file.h>
Laura Abbott8c017362011-09-22 20:59:12 -070026#include <mach/iommu_domains.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070027#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070028#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070029#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070030#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080031#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070032
Laura Abbott68c80642011-10-21 17:32:27 -070033static atomic_t system_heap_allocated;
34static atomic_t system_contig_heap_allocated;
Olav Haugan85c95402012-05-30 17:32:37 -070035static unsigned int system_heap_has_outer_cache;
36static unsigned int system_heap_contig_has_outer_cache;
Laura Abbott68c80642011-10-21 17:32:27 -070037
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070038static int ion_system_heap_allocate(struct ion_heap *heap,
39 struct ion_buffer *buffer,
40 unsigned long size, unsigned long align,
41 unsigned long flags)
42{
Laura Abbottb14ed962012-01-30 14:18:08 -080043 struct sg_table *table;
44 struct scatterlist *sg;
45 int i, j;
46 int npages = PAGE_ALIGN(size) / PAGE_SIZE;
Laura Abbott68c80642011-10-21 17:32:27 -070047
Laura Abbottb14ed962012-01-30 14:18:08 -080048 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
49 if (!table)
50 return -ENOMEM;
51 i = sg_alloc_table(table, npages, GFP_KERNEL);
52 if (i)
53 goto err0;
54 for_each_sg(table->sgl, sg, table->nents, i) {
55 struct page *page;
56 page = alloc_page(GFP_KERNEL|__GFP_ZERO);
57 if (!page)
58 goto err1;
59 sg_set_page(sg, page, PAGE_SIZE, 0);
60 }
61 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -070062 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070063 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -080064err1:
65 for_each_sg(table->sgl, sg, i, j)
66 __free_page(sg_page(sg));
67 sg_free_table(table);
68err0:
69 kfree(table);
70 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070071}
72
73void ion_system_heap_free(struct ion_buffer *buffer)
74{
Laura Abbottb14ed962012-01-30 14:18:08 -080075 int i;
76 struct scatterlist *sg;
77 struct sg_table *table = buffer->priv_virt;
78
79 for_each_sg(table->sgl, sg, table->nents, i)
80 __free_page(sg_page(sg));
81 if (buffer->sg_table)
82 sg_free_table(buffer->sg_table);
83 kfree(buffer->sg_table);
Laura Abbott68c80642011-10-21 17:32:27 -070084 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070085}
86
Laura Abbottb14ed962012-01-30 14:18:08 -080087struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
88 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070089{
Laura Abbottb14ed962012-01-30 14:18:08 -080090 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070091}
92
93void ion_system_heap_unmap_dma(struct ion_heap *heap,
94 struct ion_buffer *buffer)
95{
Laura Abbottb14ed962012-01-30 14:18:08 -080096 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070097}
98
99void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800100 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700101{
Laura Abbottb14ed962012-01-30 14:18:08 -0800102 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700103 pr_err("%s: cannot map system heap uncached\n", __func__);
104 return ERR_PTR(-EINVAL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800105 } else {
106 struct scatterlist *sg;
107 int i;
108 void *vaddr;
109 struct sg_table *table = buffer->priv_virt;
110 struct page **pages = kmalloc(
111 sizeof(struct page *) * table->nents,
112 GFP_KERNEL);
113
114 for_each_sg(table->sgl, sg, table->nents, i)
115 pages[i] = sg_page(sg);
116 vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
117 kfree(pages);
118
119 return vaddr;
Laura Abbott894fd582011-08-19 13:33:56 -0700120 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700121}
122
123void ion_system_heap_unmap_kernel(struct ion_heap *heap,
124 struct ion_buffer *buffer)
125{
Laura Abbottb14ed962012-01-30 14:18:08 -0800126 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700127}
128
Laura Abbott8c017362011-09-22 20:59:12 -0700129void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
130{
Laura Abbott8c017362011-09-22 20:59:12 -0700131 unsigned int domain_num;
132 unsigned int partition_num;
133 struct iommu_domain *domain;
134
135 if (!msm_use_iommu())
136 return;
137
138 domain_num = iommu_map_domain(data);
139 partition_num = iommu_map_partition(data);
140
141 domain = msm_get_iommu_domain(domain_num);
142
143 if (!domain) {
144 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
145 return;
146 }
147
Olav Haugan16cdb412012-03-27 13:02:17 -0700148 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700149 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
150 data->mapped_size);
151
152 return;
153}
154
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700155int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800156 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700157{
Laura Abbottb14ed962012-01-30 14:18:08 -0800158 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700159 pr_err("%s: cannot map system heap uncached\n", __func__);
160 return -EINVAL;
Laura Abbottb14ed962012-01-30 14:18:08 -0800161 } else {
162 struct sg_table *table = buffer->priv_virt;
163 unsigned long addr = vma->vm_start;
164 unsigned long offset = vma->vm_pgoff;
165 struct scatterlist *sg;
166 int i;
167
168 for_each_sg(table->sgl, sg, table->nents, i) {
169 if (offset) {
170 offset--;
171 continue;
172 }
173 vm_insert_page(vma, addr, sg_page(sg));
174 addr += PAGE_SIZE;
175 }
176 return 0;
Laura Abbott894fd582011-08-19 13:33:56 -0700177 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700178}
179
Laura Abbottabcb6f72011-10-04 16:26:49 -0700180int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
181 void *vaddr, unsigned int offset, unsigned int length,
182 unsigned int cmd)
183{
Olav Haugan85c95402012-05-30 17:32:37 -0700184 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700185
186 switch (cmd) {
187 case ION_IOC_CLEAN_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800188 if (!vaddr)
189 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
190 buffer->sg_table->nents, DMA_TO_DEVICE);
191 else
192 dmac_clean_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700193 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700194 break;
195 case ION_IOC_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800196 if (!vaddr)
197 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
198 buffer->sg_table->nents, DMA_FROM_DEVICE);
199 else
200 dmac_inv_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700201 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700202 break;
203 case ION_IOC_CLEAN_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800204 if (!vaddr) {
205 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
206 buffer->sg_table->nents, DMA_TO_DEVICE);
207 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
208 buffer->sg_table->nents, DMA_FROM_DEVICE);
209 } else {
210 dmac_flush_range(vaddr, vaddr + length);
211 }
Olav Haugan85c95402012-05-30 17:32:37 -0700212 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700213 break;
214 default:
215 return -EINVAL;
216 }
217
Olav Haugan85c95402012-05-30 17:32:37 -0700218 if (system_heap_has_outer_cache) {
219 unsigned long pstart;
Laura Abbottb14ed962012-01-30 14:18:08 -0800220 struct sg_table *table = buffer->priv_virt;
221 struct scatterlist *sg;
222 int i;
223 for_each_sg(table->sgl, sg, table->nents, i) {
224 struct page *page = sg_page(sg);
Olav Haugan85c95402012-05-30 17:32:37 -0700225 pstart = page_to_phys(page);
226 /*
227 * If page -> phys is returning NULL, something
228 * has really gone wrong...
229 */
230 if (!pstart) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800231 WARN(1, "Could not translate virtual address to physical address\n");
Olav Haugan85c95402012-05-30 17:32:37 -0700232 return -EINVAL;
233 }
Olav Haugan85c95402012-05-30 17:32:37 -0700234 outer_cache_op(pstart, pstart + PAGE_SIZE);
235 }
236 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700237 return 0;
238}
239
Olav Haugan0671b9a2012-05-25 11:58:56 -0700240static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
241 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700242{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800243 seq_printf(s, "total bytes currently allocated: %lx\n",
244 (unsigned long) atomic_read(&system_heap_allocated));
245
246 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700247}
248
Laura Abbott8c017362011-09-22 20:59:12 -0700249int ion_system_heap_map_iommu(struct ion_buffer *buffer,
250 struct ion_iommu_map *data,
251 unsigned int domain_num,
252 unsigned int partition_num,
253 unsigned long align,
254 unsigned long iova_length,
255 unsigned long flags)
256{
Laura Abbottb14ed962012-01-30 14:18:08 -0800257 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700258 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700259 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700260 unsigned long extra_iova_addr;
Laura Abbottb14ed962012-01-30 14:18:08 -0800261 struct sg_table *table = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700262 int prot = IOMMU_WRITE | IOMMU_READ;
263 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700264
265 if (!ION_IS_CACHED(flags))
266 return -EINVAL;
267
268 if (!msm_use_iommu())
269 return -EINVAL;
270
271 data->mapped_size = iova_length;
272 extra = iova_length - buffer->size;
273
Laura Abbottd01221b2012-05-16 17:52:49 -0700274 ret = msm_allocate_iova_address(domain_num, partition_num,
275 data->mapped_size, align,
276 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700277
Laura Abbottd01221b2012-05-16 17:52:49 -0700278 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700279 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700280
281 domain = msm_get_iommu_domain(domain_num);
282
283 if (!domain) {
284 ret = -ENOMEM;
285 goto out1;
286 }
287
Laura Abbottb14ed962012-01-30 14:18:08 -0800288 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -0700289 buffer->size, prot);
290
291 if (ret) {
292 pr_err("%s: could not map %lx in domain %p\n",
293 __func__, data->iova_addr, domain);
294 goto out1;
295 }
296
297 extra_iova_addr = data->iova_addr + buffer->size;
298 if (extra) {
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800299 unsigned long phys_addr = sg_phys(table->sgl);
300 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
301 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700302 if (ret)
303 goto out2;
304 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700305 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700306
307out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700308 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700309out1:
310 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
Olav Haugan16cdb412012-03-27 13:02:17 -0700311 data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700312out:
313 return ret;
314}
315
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700316static struct ion_heap_ops vmalloc_ops = {
317 .allocate = ion_system_heap_allocate,
318 .free = ion_system_heap_free,
319 .map_dma = ion_system_heap_map_dma,
320 .unmap_dma = ion_system_heap_unmap_dma,
321 .map_kernel = ion_system_heap_map_kernel,
322 .unmap_kernel = ion_system_heap_unmap_kernel,
323 .map_user = ion_system_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700324 .cache_op = ion_system_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800325 .print_debug = ion_system_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700326 .map_iommu = ion_system_heap_map_iommu,
327 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700328};
329
Olav Haugan85c95402012-05-30 17:32:37 -0700330struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700331{
332 struct ion_heap *heap;
333
334 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
335 if (!heap)
336 return ERR_PTR(-ENOMEM);
337 heap->ops = &vmalloc_ops;
338 heap->type = ION_HEAP_TYPE_SYSTEM;
Olav Haugan85c95402012-05-30 17:32:37 -0700339 system_heap_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700340 return heap;
341}
342
343void ion_system_heap_destroy(struct ion_heap *heap)
344{
345 kfree(heap);
346}
347
348static int ion_system_contig_heap_allocate(struct ion_heap *heap,
349 struct ion_buffer *buffer,
350 unsigned long len,
351 unsigned long align,
352 unsigned long flags)
353{
354 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
355 if (!buffer->priv_virt)
356 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700357 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700358 return 0;
359}
360
361void ion_system_contig_heap_free(struct ion_buffer *buffer)
362{
363 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700364 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700365}
366
367static int ion_system_contig_heap_phys(struct ion_heap *heap,
368 struct ion_buffer *buffer,
369 ion_phys_addr_t *addr, size_t *len)
370{
371 *addr = virt_to_phys(buffer->priv_virt);
372 *len = buffer->size;
373 return 0;
374}
375
Laura Abbottb14ed962012-01-30 14:18:08 -0800376struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700377 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700378{
Laura Abbottb14ed962012-01-30 14:18:08 -0800379 struct sg_table *table;
380 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700381
Laura Abbottb14ed962012-01-30 14:18:08 -0800382 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
383 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700384 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800385 ret = sg_alloc_table(table, 1, GFP_KERNEL);
386 if (ret) {
387 kfree(table);
388 return ERR_PTR(ret);
389 }
390 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
391 0);
392 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700393}
394
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700395void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
396 struct ion_buffer *buffer)
397{
398 sg_free_table(buffer->sg_table);
399 kfree(buffer->sg_table);
400}
401
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700402int ion_system_contig_heap_map_user(struct ion_heap *heap,
403 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800404 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700405{
406 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700407
Laura Abbottb14ed962012-01-30 14:18:08 -0800408 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700409 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700410 vma->vm_end - vma->vm_start,
411 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700412 else {
413 pr_err("%s: cannot map system heap uncached\n", __func__);
414 return -EINVAL;
415 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700416}
417
Laura Abbottabcb6f72011-10-04 16:26:49 -0700418int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
419 struct ion_buffer *buffer, void *vaddr,
420 unsigned int offset, unsigned int length,
421 unsigned int cmd)
422{
Olav Haugan85c95402012-05-30 17:32:37 -0700423 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700424
425 switch (cmd) {
426 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700427 dmac_clean_range(vaddr, vaddr + length);
428 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700429 break;
430 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700431 dmac_inv_range(vaddr, vaddr + length);
432 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700433 break;
434 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700435 dmac_flush_range(vaddr, vaddr + length);
436 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700437 break;
438 default:
439 return -EINVAL;
440 }
441
Olav Haugan85c95402012-05-30 17:32:37 -0700442 if (system_heap_contig_has_outer_cache) {
443 unsigned long pstart;
444
445 pstart = virt_to_phys(buffer->priv_virt) + offset;
446 if (!pstart) {
447 WARN(1, "Could not do virt to phys translation on %p\n",
448 buffer->priv_virt);
449 return -EINVAL;
450 }
451
452 outer_cache_op(pstart, pstart + PAGE_SIZE);
453 }
454
Laura Abbottabcb6f72011-10-04 16:26:49 -0700455 return 0;
456}
457
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800458static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700459 struct seq_file *s,
460 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700461{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800462 seq_printf(s, "total bytes currently allocated: %lx\n",
463 (unsigned long) atomic_read(&system_contig_heap_allocated));
464
465 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700466}
467
Laura Abbott8c017362011-09-22 20:59:12 -0700468int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
469 struct ion_iommu_map *data,
470 unsigned int domain_num,
471 unsigned int partition_num,
472 unsigned long align,
473 unsigned long iova_length,
474 unsigned long flags)
475{
Olav Haugan16cdb412012-03-27 13:02:17 -0700476 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700477 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700478 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700479 struct scatterlist *sglist = 0;
480 struct page *page = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700481 int prot = IOMMU_WRITE | IOMMU_READ;
482 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700483
484 if (!ION_IS_CACHED(flags))
485 return -EINVAL;
486
487 if (!msm_use_iommu()) {
488 data->iova_addr = virt_to_phys(buffer->vaddr);
489 return 0;
490 }
491
492 data->mapped_size = iova_length;
493 extra = iova_length - buffer->size;
494
Laura Abbottd01221b2012-05-16 17:52:49 -0700495 ret = msm_allocate_iova_address(domain_num, partition_num,
496 data->mapped_size, align,
497 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700498
Laura Abbottd01221b2012-05-16 17:52:49 -0700499 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700500 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700501
502 domain = msm_get_iommu_domain(domain_num);
503
504 if (!domain) {
505 ret = -ENOMEM;
506 goto out1;
507 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700508 page = virt_to_page(buffer->vaddr);
Laura Abbott8c017362011-09-22 20:59:12 -0700509
Olav Haugan16cdb412012-03-27 13:02:17 -0700510 sglist = vmalloc(sizeof(*sglist));
511 if (!sglist)
512 goto out1;
513
514 sg_init_table(sglist, 1);
515 sg_set_page(sglist, page, buffer->size, 0);
516
517 ret = iommu_map_range(domain, data->iova_addr, sglist,
518 buffer->size, prot);
519 if (ret) {
520 pr_err("%s: could not map %lx in domain %p\n",
521 __func__, data->iova_addr, domain);
522 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700523 }
524
Olav Haugan16cdb412012-03-27 13:02:17 -0700525 if (extra) {
526 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800527 unsigned long phys_addr = sg_phys(sglist);
528 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
529 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700530 if (ret)
531 goto out2;
532 }
533 vfree(sglist);
534 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700535out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700536 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700537
538out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700539 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700540 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
541 data->mapped_size);
542out:
543 return ret;
544}
545
Rohit Vaswani35edc882012-11-20 10:20:47 -0800546void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
547 struct ion_buffer *buffer)
548{
549 return buffer->priv_virt;
550}
551
552void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
553 struct ion_buffer *buffer)
554{
555 return;
556}
557
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700558static struct ion_heap_ops kmalloc_ops = {
559 .allocate = ion_system_contig_heap_allocate,
560 .free = ion_system_contig_heap_free,
561 .phys = ion_system_contig_heap_phys,
562 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700563 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800564 .map_kernel = ion_system_contig_heap_map_kernel,
565 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700566 .map_user = ion_system_contig_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700567 .cache_op = ion_system_contig_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800568 .print_debug = ion_system_contig_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700569 .map_iommu = ion_system_contig_heap_map_iommu,
570 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700571};
572
Olav Haugan85c95402012-05-30 17:32:37 -0700573struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700574{
575 struct ion_heap *heap;
576
577 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
578 if (!heap)
579 return ERR_PTR(-ENOMEM);
580 heap->ops = &kmalloc_ops;
581 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Olav Haugan85c95402012-05-30 17:32:37 -0700582 system_heap_contig_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700583 return heap;
584}
585
586void ion_system_contig_heap_destroy(struct ion_heap *heap)
587{
588 kfree(heap);
589}
590