blob: 7ca2cd22aaa343c0eff0e7f04320cd79018857fa [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/err.h>
19#include <linux/ion.h>
20#include <linux/mm.h>
21#include <linux/scatterlist.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070024#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080025#include <linux/seq_file.h>
Laura Abbott8c017362011-09-22 20:59:12 -070026#include <mach/iommu_domains.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070027#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070028#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070029#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070030#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080031#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070032
Laura Abbott68c80642011-10-21 17:32:27 -070033static atomic_t system_heap_allocated;
34static atomic_t system_contig_heap_allocated;
Olav Haugan85c95402012-05-30 17:32:37 -070035static unsigned int system_heap_has_outer_cache;
36static unsigned int system_heap_contig_has_outer_cache;
Laura Abbott68c80642011-10-21 17:32:27 -070037
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070038static int ion_system_heap_allocate(struct ion_heap *heap,
39 struct ion_buffer *buffer,
40 unsigned long size, unsigned long align,
41 unsigned long flags)
42{
Laura Abbottb14ed962012-01-30 14:18:08 -080043 struct sg_table *table;
44 struct scatterlist *sg;
45 int i, j;
46 int npages = PAGE_ALIGN(size) / PAGE_SIZE;
Laura Abbott68c80642011-10-21 17:32:27 -070047
Laura Abbottb14ed962012-01-30 14:18:08 -080048 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
49 if (!table)
50 return -ENOMEM;
51 i = sg_alloc_table(table, npages, GFP_KERNEL);
52 if (i)
53 goto err0;
54 for_each_sg(table->sgl, sg, table->nents, i) {
55 struct page *page;
56 page = alloc_page(GFP_KERNEL|__GFP_ZERO);
57 if (!page)
58 goto err1;
59 sg_set_page(sg, page, PAGE_SIZE, 0);
60 }
61 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -070062 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070063 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -080064err1:
65 for_each_sg(table->sgl, sg, i, j)
66 __free_page(sg_page(sg));
67 sg_free_table(table);
68err0:
69 kfree(table);
70 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070071}
72
73void ion_system_heap_free(struct ion_buffer *buffer)
74{
Laura Abbottb14ed962012-01-30 14:18:08 -080075 int i;
76 struct scatterlist *sg;
77 struct sg_table *table = buffer->priv_virt;
78
79 for_each_sg(table->sgl, sg, table->nents, i)
80 __free_page(sg_page(sg));
81 if (buffer->sg_table)
82 sg_free_table(buffer->sg_table);
83 kfree(buffer->sg_table);
Laura Abbott68c80642011-10-21 17:32:27 -070084 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070085}
86
Laura Abbottb14ed962012-01-30 14:18:08 -080087struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
88 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070089{
Laura Abbottb14ed962012-01-30 14:18:08 -080090 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070091}
92
93void ion_system_heap_unmap_dma(struct ion_heap *heap,
94 struct ion_buffer *buffer)
95{
Laura Abbottb14ed962012-01-30 14:18:08 -080096 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070097}
98
99void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800100 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700101{
Laura Abbottb14ed962012-01-30 14:18:08 -0800102 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700103 pr_err("%s: cannot map system heap uncached\n", __func__);
104 return ERR_PTR(-EINVAL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800105 } else {
106 struct scatterlist *sg;
107 int i;
108 void *vaddr;
109 struct sg_table *table = buffer->priv_virt;
110 struct page **pages = kmalloc(
111 sizeof(struct page *) * table->nents,
112 GFP_KERNEL);
113
114 for_each_sg(table->sgl, sg, table->nents, i)
115 pages[i] = sg_page(sg);
116 vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
117 kfree(pages);
118
119 return vaddr;
Laura Abbott894fd582011-08-19 13:33:56 -0700120 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700121}
122
123void ion_system_heap_unmap_kernel(struct ion_heap *heap,
124 struct ion_buffer *buffer)
125{
Laura Abbottb14ed962012-01-30 14:18:08 -0800126 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700127}
128
Laura Abbott8c017362011-09-22 20:59:12 -0700129void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
130{
Laura Abbott8c017362011-09-22 20:59:12 -0700131 unsigned int domain_num;
132 unsigned int partition_num;
133 struct iommu_domain *domain;
134
135 if (!msm_use_iommu())
136 return;
137
138 domain_num = iommu_map_domain(data);
139 partition_num = iommu_map_partition(data);
140
141 domain = msm_get_iommu_domain(domain_num);
142
143 if (!domain) {
144 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
145 return;
146 }
147
Olav Haugan16cdb412012-03-27 13:02:17 -0700148 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700149 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
150 data->mapped_size);
151
152 return;
153}
154
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700155int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800156 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700157{
Laura Abbottb14ed962012-01-30 14:18:08 -0800158 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700159 pr_err("%s: cannot map system heap uncached\n", __func__);
160 return -EINVAL;
Laura Abbottb14ed962012-01-30 14:18:08 -0800161 } else {
162 struct sg_table *table = buffer->priv_virt;
163 unsigned long addr = vma->vm_start;
164 unsigned long offset = vma->vm_pgoff;
165 struct scatterlist *sg;
166 int i;
167
168 for_each_sg(table->sgl, sg, table->nents, i) {
169 if (offset) {
170 offset--;
171 continue;
172 }
173 vm_insert_page(vma, addr, sg_page(sg));
174 addr += PAGE_SIZE;
175 }
176 return 0;
Laura Abbott894fd582011-08-19 13:33:56 -0700177 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700178}
179
Laura Abbottabcb6f72011-10-04 16:26:49 -0700180int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
181 void *vaddr, unsigned int offset, unsigned int length,
182 unsigned int cmd)
183{
Olav Haugan85c95402012-05-30 17:32:37 -0700184 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700185
186 switch (cmd) {
187 case ION_IOC_CLEAN_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800188 if (!vaddr)
189 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
190 buffer->sg_table->nents, DMA_TO_DEVICE);
191 else
192 dmac_clean_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700193 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700194 break;
195 case ION_IOC_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800196 if (!vaddr)
197 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
198 buffer->sg_table->nents, DMA_FROM_DEVICE);
199 else
200 dmac_inv_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700201 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700202 break;
203 case ION_IOC_CLEAN_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800204 if (!vaddr) {
205 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
206 buffer->sg_table->nents, DMA_TO_DEVICE);
207 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
208 buffer->sg_table->nents, DMA_FROM_DEVICE);
209 } else {
210 dmac_flush_range(vaddr, vaddr + length);
211 }
Olav Haugan85c95402012-05-30 17:32:37 -0700212 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700213 break;
214 default:
215 return -EINVAL;
216 }
217
Olav Haugan85c95402012-05-30 17:32:37 -0700218 if (system_heap_has_outer_cache) {
219 unsigned long pstart;
Laura Abbottb14ed962012-01-30 14:18:08 -0800220 struct sg_table *table = buffer->priv_virt;
221 struct scatterlist *sg;
222 int i;
223 for_each_sg(table->sgl, sg, table->nents, i) {
224 struct page *page = sg_page(sg);
Olav Haugan85c95402012-05-30 17:32:37 -0700225 pstart = page_to_phys(page);
226 /*
227 * If page -> phys is returning NULL, something
228 * has really gone wrong...
229 */
230 if (!pstart) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800231 WARN(1, "Could not translate virtual address to physical address\n");
Olav Haugan85c95402012-05-30 17:32:37 -0700232 return -EINVAL;
233 }
Olav Haugan85c95402012-05-30 17:32:37 -0700234 outer_cache_op(pstart, pstart + PAGE_SIZE);
235 }
236 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700237 return 0;
238}
239
Olav Haugan0671b9a2012-05-25 11:58:56 -0700240static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
241 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700242{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800243 seq_printf(s, "total bytes currently allocated: %lx\n",
244 (unsigned long) atomic_read(&system_heap_allocated));
245
246 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700247}
248
Laura Abbott8c017362011-09-22 20:59:12 -0700249int ion_system_heap_map_iommu(struct ion_buffer *buffer,
250 struct ion_iommu_map *data,
251 unsigned int domain_num,
252 unsigned int partition_num,
253 unsigned long align,
254 unsigned long iova_length,
255 unsigned long flags)
256{
Laura Abbottb14ed962012-01-30 14:18:08 -0800257 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700258 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700259 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700260 unsigned long extra_iova_addr;
Laura Abbottb14ed962012-01-30 14:18:08 -0800261 struct sg_table *table = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700262 int prot = IOMMU_WRITE | IOMMU_READ;
263 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700264
265 if (!ION_IS_CACHED(flags))
266 return -EINVAL;
267
268 if (!msm_use_iommu())
269 return -EINVAL;
270
271 data->mapped_size = iova_length;
272 extra = iova_length - buffer->size;
273
Laura Abbottd01221b2012-05-16 17:52:49 -0700274 ret = msm_allocate_iova_address(domain_num, partition_num,
275 data->mapped_size, align,
276 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700277
Laura Abbottd01221b2012-05-16 17:52:49 -0700278 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700279 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700280
281 domain = msm_get_iommu_domain(domain_num);
282
283 if (!domain) {
284 ret = -ENOMEM;
285 goto out1;
286 }
287
Laura Abbottb14ed962012-01-30 14:18:08 -0800288 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -0700289 buffer->size, prot);
290
291 if (ret) {
292 pr_err("%s: could not map %lx in domain %p\n",
293 __func__, data->iova_addr, domain);
294 goto out1;
295 }
296
297 extra_iova_addr = data->iova_addr + buffer->size;
298 if (extra) {
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800299 unsigned long phys_addr = sg_phys(table->sgl);
300 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
301 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700302 if (ret)
303 goto out2;
304 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700305 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700306
307out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700308 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700309out1:
310 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
Olav Haugan16cdb412012-03-27 13:02:17 -0700311 data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700312out:
313 return ret;
314}
315
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700316static struct ion_heap_ops vmalloc_ops = {
317 .allocate = ion_system_heap_allocate,
318 .free = ion_system_heap_free,
319 .map_dma = ion_system_heap_map_dma,
320 .unmap_dma = ion_system_heap_unmap_dma,
321 .map_kernel = ion_system_heap_map_kernel,
322 .unmap_kernel = ion_system_heap_unmap_kernel,
323 .map_user = ion_system_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700324 .cache_op = ion_system_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800325 .print_debug = ion_system_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700326 .map_iommu = ion_system_heap_map_iommu,
327 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700328};
329
Olav Haugan85c95402012-05-30 17:32:37 -0700330struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700331{
332 struct ion_heap *heap;
333
334 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
335 if (!heap)
336 return ERR_PTR(-ENOMEM);
337 heap->ops = &vmalloc_ops;
338 heap->type = ION_HEAP_TYPE_SYSTEM;
Olav Haugan85c95402012-05-30 17:32:37 -0700339 system_heap_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700340 return heap;
341}
342
343void ion_system_heap_destroy(struct ion_heap *heap)
344{
345 kfree(heap);
346}
347
348static int ion_system_contig_heap_allocate(struct ion_heap *heap,
349 struct ion_buffer *buffer,
350 unsigned long len,
351 unsigned long align,
352 unsigned long flags)
353{
354 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
355 if (!buffer->priv_virt)
356 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700357 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700358 return 0;
359}
360
361void ion_system_contig_heap_free(struct ion_buffer *buffer)
362{
363 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700364 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700365}
366
367static int ion_system_contig_heap_phys(struct ion_heap *heap,
368 struct ion_buffer *buffer,
369 ion_phys_addr_t *addr, size_t *len)
370{
371 *addr = virt_to_phys(buffer->priv_virt);
372 *len = buffer->size;
373 return 0;
374}
375
Laura Abbottb14ed962012-01-30 14:18:08 -0800376struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700377 struct ion_buffer *buffer)
378{
Laura Abbottb14ed962012-01-30 14:18:08 -0800379 struct sg_table *table;
380 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700381
Laura Abbottb14ed962012-01-30 14:18:08 -0800382 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
383 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700384 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800385 ret = sg_alloc_table(table, 1, GFP_KERNEL);
386 if (ret) {
387 kfree(table);
388 return ERR_PTR(ret);
389 }
390 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
391 0);
392 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700393}
394
395int ion_system_contig_heap_map_user(struct ion_heap *heap,
396 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800397 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700398{
399 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700400
Laura Abbottb14ed962012-01-30 14:18:08 -0800401 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700402 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700403 vma->vm_end - vma->vm_start,
404 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700405 else {
406 pr_err("%s: cannot map system heap uncached\n", __func__);
407 return -EINVAL;
408 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700409}
410
Laura Abbottabcb6f72011-10-04 16:26:49 -0700411int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
412 struct ion_buffer *buffer, void *vaddr,
413 unsigned int offset, unsigned int length,
414 unsigned int cmd)
415{
Olav Haugan85c95402012-05-30 17:32:37 -0700416 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700417
418 switch (cmd) {
419 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700420 dmac_clean_range(vaddr, vaddr + length);
421 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700422 break;
423 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700424 dmac_inv_range(vaddr, vaddr + length);
425 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700426 break;
427 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700428 dmac_flush_range(vaddr, vaddr + length);
429 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700430 break;
431 default:
432 return -EINVAL;
433 }
434
Olav Haugan85c95402012-05-30 17:32:37 -0700435 if (system_heap_contig_has_outer_cache) {
436 unsigned long pstart;
437
438 pstart = virt_to_phys(buffer->priv_virt) + offset;
439 if (!pstart) {
440 WARN(1, "Could not do virt to phys translation on %p\n",
441 buffer->priv_virt);
442 return -EINVAL;
443 }
444
445 outer_cache_op(pstart, pstart + PAGE_SIZE);
446 }
447
Laura Abbottabcb6f72011-10-04 16:26:49 -0700448 return 0;
449}
450
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800451static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700452 struct seq_file *s,
453 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700454{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800455 seq_printf(s, "total bytes currently allocated: %lx\n",
456 (unsigned long) atomic_read(&system_contig_heap_allocated));
457
458 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700459}
460
Laura Abbott8c017362011-09-22 20:59:12 -0700461int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
462 struct ion_iommu_map *data,
463 unsigned int domain_num,
464 unsigned int partition_num,
465 unsigned long align,
466 unsigned long iova_length,
467 unsigned long flags)
468{
Olav Haugan16cdb412012-03-27 13:02:17 -0700469 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700470 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700471 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700472 struct scatterlist *sglist = 0;
473 struct page *page = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700474 int prot = IOMMU_WRITE | IOMMU_READ;
475 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700476
477 if (!ION_IS_CACHED(flags))
478 return -EINVAL;
479
480 if (!msm_use_iommu()) {
481 data->iova_addr = virt_to_phys(buffer->vaddr);
482 return 0;
483 }
484
485 data->mapped_size = iova_length;
486 extra = iova_length - buffer->size;
487
Laura Abbottd01221b2012-05-16 17:52:49 -0700488 ret = msm_allocate_iova_address(domain_num, partition_num,
489 data->mapped_size, align,
490 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700491
Laura Abbottd01221b2012-05-16 17:52:49 -0700492 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700493 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700494
495 domain = msm_get_iommu_domain(domain_num);
496
497 if (!domain) {
498 ret = -ENOMEM;
499 goto out1;
500 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700501 page = virt_to_page(buffer->vaddr);
Laura Abbott8c017362011-09-22 20:59:12 -0700502
Olav Haugan16cdb412012-03-27 13:02:17 -0700503 sglist = vmalloc(sizeof(*sglist));
504 if (!sglist)
505 goto out1;
506
507 sg_init_table(sglist, 1);
508 sg_set_page(sglist, page, buffer->size, 0);
509
510 ret = iommu_map_range(domain, data->iova_addr, sglist,
511 buffer->size, prot);
512 if (ret) {
513 pr_err("%s: could not map %lx in domain %p\n",
514 __func__, data->iova_addr, domain);
515 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700516 }
517
Olav Haugan16cdb412012-03-27 13:02:17 -0700518 if (extra) {
519 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800520 unsigned long phys_addr = sg_phys(sglist);
521 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
522 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700523 if (ret)
524 goto out2;
525 }
526 vfree(sglist);
527 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700528out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700529 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700530
531out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700532 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700533 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
534 data->mapped_size);
535out:
536 return ret;
537}
538
Rohit Vaswani35edc882012-11-20 10:20:47 -0800539void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
540 struct ion_buffer *buffer)
541{
542 return buffer->priv_virt;
543}
544
545void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
546 struct ion_buffer *buffer)
547{
548 return;
549}
550
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700551static struct ion_heap_ops kmalloc_ops = {
552 .allocate = ion_system_contig_heap_allocate,
553 .free = ion_system_contig_heap_free,
554 .phys = ion_system_contig_heap_phys,
555 .map_dma = ion_system_contig_heap_map_dma,
556 .unmap_dma = ion_system_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800557 .map_kernel = ion_system_contig_heap_map_kernel,
558 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700559 .map_user = ion_system_contig_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700560 .cache_op = ion_system_contig_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800561 .print_debug = ion_system_contig_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700562 .map_iommu = ion_system_contig_heap_map_iommu,
563 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700564};
565
Olav Haugan85c95402012-05-30 17:32:37 -0700566struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700567{
568 struct ion_heap *heap;
569
570 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
571 if (!heap)
572 return ERR_PTR(-ENOMEM);
573 heap->ops = &kmalloc_ops;
574 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Olav Haugan85c95402012-05-30 17:32:37 -0700575 system_heap_contig_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700576 return heap;
577}
578
579void ion_system_contig_heap_destroy(struct ion_heap *heap)
580{
581 kfree(heap);
582}
583