blob: ceb30a40ca40a2bb35930f2ab2d79036e71bd1d8 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_system_heap.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -08005 * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070018#include <asm/page.h>
19#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070020#include <linux/err.h>
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070021#include <linux/highmem.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/ion.h>
23#include <linux/mm.h>
24#include <linux/scatterlist.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <linux/iommu.h>
Olav Haugan3d4fe1a2012-01-13 11:42:15 -080028#include <linux/seq_file.h>
Laura Abbott8c017362011-09-22 20:59:12 -070029#include <mach/iommu_domains.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070030#include "ion_priv.h"
Laura Abbottabcb6f72011-10-04 16:26:49 -070031#include <mach/memory.h>
Olav Haugan85c95402012-05-30 17:32:37 -070032#include <asm/cacheflush.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070033#include <linux/msm_ion.h>
Neeti Desai3f3c2822013-03-08 17:29:53 -080034#include <linux/dma-mapping.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070035
Laura Abbott68c80642011-10-21 17:32:27 -070036static atomic_t system_heap_allocated;
37static atomic_t system_contig_heap_allocated;
Olav Haugan85c95402012-05-30 17:32:37 -070038static unsigned int system_heap_has_outer_cache;
39static unsigned int system_heap_contig_has_outer_cache;
Laura Abbott68c80642011-10-21 17:32:27 -070040
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070041struct page_info {
42 struct page *page;
43 unsigned long order;
44 struct list_head list;
45};
46
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070047static struct page_info *alloc_largest_available(unsigned long size,
48 bool split_pages)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070049{
50 static unsigned int orders[] = {8, 4, 0};
51 struct page *page;
52 struct page_info *info;
53 int i;
54
55 for (i = 0; i < ARRAY_SIZE(orders); i++) {
56 if (size < (1 << orders[i]) * PAGE_SIZE)
57 continue;
Dima Zavindcd71bc2012-07-27 15:29:55 -070058 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070059 __GFP_NOWARN | __GFP_NORETRY, orders[i]);
60 if (!page)
61 continue;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070062 if (split_pages)
63 split_page(page, orders[i]);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070064 info = kmap(page);
65 info->page = page;
66 info->order = orders[i];
67 return info;
68 }
69 return NULL;
70}
71
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070072static int ion_system_heap_allocate(struct ion_heap *heap,
73 struct ion_buffer *buffer,
74 unsigned long size, unsigned long align,
75 unsigned long flags)
76{
Laura Abbottb14ed962012-01-30 14:18:08 -080077 struct sg_table *table;
78 struct scatterlist *sg;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070079 int ret;
80 struct list_head pages;
81 struct page_info *info, *tmp_info;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070082 int i = 0;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070083 long size_remaining = PAGE_ALIGN(size);
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070084 bool split_pages = ion_buffer_fault_user_mappings(buffer);
85
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070086
87 INIT_LIST_HEAD(&pages);
88 while (size_remaining > 0) {
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070089 info = alloc_largest_available(size_remaining, split_pages);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070090 if (!info)
91 goto err;
92 list_add_tail(&info->list, &pages);
93 size_remaining -= (1 << info->order) * PAGE_SIZE;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -070094 i++;
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070095 }
Laura Abbott68c80642011-10-21 17:32:27 -070096
Laura Abbottb14ed962012-01-30 14:18:08 -080097 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
98 if (!table)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -070099 goto err;
100
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700101 if (split_pages)
102 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
103 GFP_KERNEL);
104 else
105 ret = sg_alloc_table(table, i, GFP_KERNEL);
106
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700107 if (ret)
108 goto err1;
109
110 sg = table->sgl;
111 list_for_each_entry_safe(info, tmp_info, &pages, list) {
112 struct page *page = info->page;
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700113
114 if (split_pages) {
115 for (i = 0; i < (1 << info->order); i++) {
116 sg_set_page(sg, page + i, PAGE_SIZE, 0);
117 sg = sg_next(sg);
118 }
119 } else {
120 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
121 0);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700122 sg = sg_next(sg);
123 }
124 list_del(&info->list);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700125 kunmap(page);
Laura Abbottb14ed962012-01-30 14:18:08 -0800126 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700127
128 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
129 DMA_BIDIRECTIONAL);
130
Laura Abbottb14ed962012-01-30 14:18:08 -0800131 buffer->priv_virt = table;
Laura Abbott68c80642011-10-21 17:32:27 -0700132 atomic_add(size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700133 return 0;
Laura Abbottb14ed962012-01-30 14:18:08 -0800134err1:
Laura Abbottb14ed962012-01-30 14:18:08 -0800135 kfree(table);
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700136err:
137 list_for_each_entry(info, &pages, list) {
Rebecca Schultz Zavinf858ba42012-09-21 11:46:06 -0700138 if (split_pages)
139 for (i = 0; i < (1 << info->order); i++)
140 __free_page(info->page + i);
141 else
142 __free_pages(info->page, info->order);
143
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700144 kunmap(info->page);
145 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800146 return -ENOMEM;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700147}
148
149void ion_system_heap_free(struct ion_buffer *buffer)
150{
Laura Abbottb14ed962012-01-30 14:18:08 -0800151 int i;
152 struct scatterlist *sg;
153 struct sg_table *table = buffer->priv_virt;
154
155 for_each_sg(table->sgl, sg, table->nents, i)
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700156 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
Laura Abbottb14ed962012-01-30 14:18:08 -0800157 if (buffer->sg_table)
158 sg_free_table(buffer->sg_table);
159 kfree(buffer->sg_table);
Laura Abbott68c80642011-10-21 17:32:27 -0700160 atomic_sub(buffer->size, &system_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700161}
162
Laura Abbottb14ed962012-01-30 14:18:08 -0800163struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
164 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700165{
Laura Abbottb14ed962012-01-30 14:18:08 -0800166 return buffer->priv_virt;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700167}
168
169void ion_system_heap_unmap_dma(struct ion_heap *heap,
170 struct ion_buffer *buffer)
171{
Laura Abbottb14ed962012-01-30 14:18:08 -0800172 return;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700173}
174
175void *ion_system_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800176 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700177{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700178 struct scatterlist *sg;
179 int i, j;
180 void *vaddr;
181 pgprot_t pgprot;
182 struct sg_table *table = buffer->priv_virt;
183 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
184 struct page **pages = kzalloc(sizeof(struct page *) * npages,
185 GFP_KERNEL);
186 struct page **tmp = pages;
Laura Abbottb14ed962012-01-30 14:18:08 -0800187
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700188 if (buffer->flags & ION_FLAG_CACHED)
189 pgprot = PAGE_KERNEL;
190 else
191 pgprot = pgprot_writecombine(PAGE_KERNEL);
Laura Abbottb14ed962012-01-30 14:18:08 -0800192
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700193 for_each_sg(table->sgl, sg, table->nents, i) {
194 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
195 struct page *page = sg_page(sg);
196 BUG_ON(i >= npages);
197 for (j = 0; j < npages_this_entry; j++) {
198 *(tmp++) = page++;
199 }
Laura Abbott894fd582011-08-19 13:33:56 -0700200 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700201 vaddr = vmap(pages, npages, VM_MAP, pgprot);
202 kfree(pages);
203
204 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700205}
206
207void ion_system_heap_unmap_kernel(struct ion_heap *heap,
208 struct ion_buffer *buffer)
209{
Laura Abbottb14ed962012-01-30 14:18:08 -0800210 vunmap(buffer->vaddr);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700211}
212
Laura Abbott8c017362011-09-22 20:59:12 -0700213void ion_system_heap_unmap_iommu(struct ion_iommu_map *data)
214{
Laura Abbott8c017362011-09-22 20:59:12 -0700215 unsigned int domain_num;
216 unsigned int partition_num;
217 struct iommu_domain *domain;
218
219 if (!msm_use_iommu())
220 return;
221
222 domain_num = iommu_map_domain(data);
223 partition_num = iommu_map_partition(data);
224
225 domain = msm_get_iommu_domain(domain_num);
226
227 if (!domain) {
228 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
229 return;
230 }
231
Olav Haugan16cdb412012-03-27 13:02:17 -0700232 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700233 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
234 data->mapped_size);
235
236 return;
237}
238
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700239int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800240 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700241{
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700242 struct sg_table *table = buffer->priv_virt;
243 unsigned long addr = vma->vm_start;
244 unsigned long offset = vma->vm_pgoff;
245 struct scatterlist *sg;
246 int i;
247
Laura Abbottb14ed962012-01-30 14:18:08 -0800248 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbott894fd582011-08-19 13:33:56 -0700249 pr_err("%s: cannot map system heap uncached\n", __func__);
250 return -EINVAL;
251 }
Rebecca Schultz Zavinb831c8c2012-06-14 13:30:01 -0700252
253 for_each_sg(table->sgl, sg, table->nents, i) {
254 if (offset) {
255 offset--;
256 continue;
257 }
258 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
259 sg_dma_len(sg), vma->vm_page_prot);
260 addr += sg_dma_len(sg);
261 }
262 return 0;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700263}
264
Laura Abbottabcb6f72011-10-04 16:26:49 -0700265int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
266 void *vaddr, unsigned int offset, unsigned int length,
267 unsigned int cmd)
268{
Olav Haugan85c95402012-05-30 17:32:37 -0700269 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700270
271 switch (cmd) {
272 case ION_IOC_CLEAN_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800273 if (!vaddr)
274 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
275 buffer->sg_table->nents, DMA_TO_DEVICE);
276 else
277 dmac_clean_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700278 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700279 break;
280 case ION_IOC_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800281 if (!vaddr)
282 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
283 buffer->sg_table->nents, DMA_FROM_DEVICE);
284 else
285 dmac_inv_range(vaddr, vaddr + length);
Olav Haugan85c95402012-05-30 17:32:37 -0700286 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700287 break;
288 case ION_IOC_CLEAN_INV_CACHES:
Neeti Desai3f3c2822013-03-08 17:29:53 -0800289 if (!vaddr) {
290 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
291 buffer->sg_table->nents, DMA_TO_DEVICE);
292 dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
293 buffer->sg_table->nents, DMA_FROM_DEVICE);
294 } else {
295 dmac_flush_range(vaddr, vaddr + length);
296 }
Olav Haugan85c95402012-05-30 17:32:37 -0700297 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700298 break;
299 default:
300 return -EINVAL;
301 }
302
Olav Haugan85c95402012-05-30 17:32:37 -0700303 if (system_heap_has_outer_cache) {
304 unsigned long pstart;
Laura Abbottb14ed962012-01-30 14:18:08 -0800305 struct sg_table *table = buffer->priv_virt;
306 struct scatterlist *sg;
307 int i;
308 for_each_sg(table->sgl, sg, table->nents, i) {
309 struct page *page = sg_page(sg);
Olav Haugan85c95402012-05-30 17:32:37 -0700310 pstart = page_to_phys(page);
311 /*
312 * If page -> phys is returning NULL, something
313 * has really gone wrong...
314 */
315 if (!pstart) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800316 WARN(1, "Could not translate virtual address to physical address\n");
Olav Haugan85c95402012-05-30 17:32:37 -0700317 return -EINVAL;
318 }
Olav Haugan85c95402012-05-30 17:32:37 -0700319 outer_cache_op(pstart, pstart + PAGE_SIZE);
320 }
321 }
Laura Abbottabcb6f72011-10-04 16:26:49 -0700322 return 0;
323}
324
Olav Haugan0671b9a2012-05-25 11:58:56 -0700325static int ion_system_print_debug(struct ion_heap *heap, struct seq_file *s,
326 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700327{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800328 seq_printf(s, "total bytes currently allocated: %lx\n",
329 (unsigned long) atomic_read(&system_heap_allocated));
330
331 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700332}
333
Laura Abbott8c017362011-09-22 20:59:12 -0700334int ion_system_heap_map_iommu(struct ion_buffer *buffer,
335 struct ion_iommu_map *data,
336 unsigned int domain_num,
337 unsigned int partition_num,
338 unsigned long align,
339 unsigned long iova_length,
340 unsigned long flags)
341{
Laura Abbottb14ed962012-01-30 14:18:08 -0800342 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700343 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700344 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700345 unsigned long extra_iova_addr;
Laura Abbottb14ed962012-01-30 14:18:08 -0800346 struct sg_table *table = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700347 int prot = IOMMU_WRITE | IOMMU_READ;
348 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700349
350 if (!ION_IS_CACHED(flags))
351 return -EINVAL;
352
353 if (!msm_use_iommu())
354 return -EINVAL;
355
356 data->mapped_size = iova_length;
357 extra = iova_length - buffer->size;
358
Olav Haugan7237a072013-03-19 17:37:50 -0700359 /* Use the biggest alignment to allow bigger IOMMU mappings.
360 * Use the first entry since the first entry will always be the
361 * biggest entry. To take advantage of bigger mapping sizes both the
362 * VA and PA addresses have to be aligned to the biggest size.
363 */
364 if (table->sgl->length > align)
365 align = table->sgl->length;
366
Laura Abbottd01221b2012-05-16 17:52:49 -0700367 ret = msm_allocate_iova_address(domain_num, partition_num,
368 data->mapped_size, align,
369 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700370
Laura Abbottd01221b2012-05-16 17:52:49 -0700371 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700372 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700373
374 domain = msm_get_iommu_domain(domain_num);
375
376 if (!domain) {
377 ret = -ENOMEM;
378 goto out1;
379 }
380
Laura Abbottb14ed962012-01-30 14:18:08 -0800381 ret = iommu_map_range(domain, data->iova_addr, table->sgl,
Olav Haugan16cdb412012-03-27 13:02:17 -0700382 buffer->size, prot);
383
384 if (ret) {
385 pr_err("%s: could not map %lx in domain %p\n",
386 __func__, data->iova_addr, domain);
387 goto out1;
388 }
389
390 extra_iova_addr = data->iova_addr + buffer->size;
391 if (extra) {
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800392 unsigned long phys_addr = sg_phys(table->sgl);
393 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
394 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700395 if (ret)
396 goto out2;
397 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700398 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700399
400out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700401 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700402out1:
403 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
Olav Haugan16cdb412012-03-27 13:02:17 -0700404 data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700405out:
406 return ret;
407}
408
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700409static struct ion_heap_ops vmalloc_ops = {
410 .allocate = ion_system_heap_allocate,
411 .free = ion_system_heap_free,
412 .map_dma = ion_system_heap_map_dma,
413 .unmap_dma = ion_system_heap_unmap_dma,
414 .map_kernel = ion_system_heap_map_kernel,
415 .unmap_kernel = ion_system_heap_unmap_kernel,
416 .map_user = ion_system_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700417 .cache_op = ion_system_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800418 .print_debug = ion_system_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700419 .map_iommu = ion_system_heap_map_iommu,
420 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700421};
422
Olav Haugan85c95402012-05-30 17:32:37 -0700423struct ion_heap *ion_system_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700424{
425 struct ion_heap *heap;
426
427 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
428 if (!heap)
429 return ERR_PTR(-ENOMEM);
430 heap->ops = &vmalloc_ops;
431 heap->type = ION_HEAP_TYPE_SYSTEM;
Olav Haugan85c95402012-05-30 17:32:37 -0700432 system_heap_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700433 return heap;
434}
435
436void ion_system_heap_destroy(struct ion_heap *heap)
437{
438 kfree(heap);
439}
440
441static int ion_system_contig_heap_allocate(struct ion_heap *heap,
442 struct ion_buffer *buffer,
443 unsigned long len,
444 unsigned long align,
445 unsigned long flags)
446{
447 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
448 if (!buffer->priv_virt)
449 return -ENOMEM;
Laura Abbott68c80642011-10-21 17:32:27 -0700450 atomic_add(len, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700451 return 0;
452}
453
454void ion_system_contig_heap_free(struct ion_buffer *buffer)
455{
456 kfree(buffer->priv_virt);
Laura Abbott68c80642011-10-21 17:32:27 -0700457 atomic_sub(buffer->size, &system_contig_heap_allocated);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700458}
459
460static int ion_system_contig_heap_phys(struct ion_heap *heap,
461 struct ion_buffer *buffer,
462 ion_phys_addr_t *addr, size_t *len)
463{
464 *addr = virt_to_phys(buffer->priv_virt);
465 *len = buffer->size;
466 return 0;
467}
468
Laura Abbottb14ed962012-01-30 14:18:08 -0800469struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700470 struct ion_buffer *buffer)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700471{
Laura Abbottb14ed962012-01-30 14:18:08 -0800472 struct sg_table *table;
473 int ret;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700474
Laura Abbottb14ed962012-01-30 14:18:08 -0800475 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
476 if (!table)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700477 return ERR_PTR(-ENOMEM);
Laura Abbottb14ed962012-01-30 14:18:08 -0800478 ret = sg_alloc_table(table, 1, GFP_KERNEL);
479 if (ret) {
480 kfree(table);
481 return ERR_PTR(ret);
482 }
483 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
484 0);
485 return table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700486}
487
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700488void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
489 struct ion_buffer *buffer)
490{
491 sg_free_table(buffer->sg_table);
492 kfree(buffer->sg_table);
493}
494
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700495int ion_system_contig_heap_map_user(struct ion_heap *heap,
496 struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800497 struct vm_area_struct *vma)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700498{
499 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
Laura Abbott894fd582011-08-19 13:33:56 -0700500
Laura Abbottb14ed962012-01-30 14:18:08 -0800501 if (ION_IS_CACHED(buffer->flags))
Laura Abbott894fd582011-08-19 13:33:56 -0700502 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700503 vma->vm_end - vma->vm_start,
504 vma->vm_page_prot);
Laura Abbott894fd582011-08-19 13:33:56 -0700505 else {
506 pr_err("%s: cannot map system heap uncached\n", __func__);
507 return -EINVAL;
508 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700509}
510
Laura Abbottabcb6f72011-10-04 16:26:49 -0700511int ion_system_contig_heap_cache_ops(struct ion_heap *heap,
512 struct ion_buffer *buffer, void *vaddr,
513 unsigned int offset, unsigned int length,
514 unsigned int cmd)
515{
Olav Haugan85c95402012-05-30 17:32:37 -0700516 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700517
518 switch (cmd) {
519 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700520 dmac_clean_range(vaddr, vaddr + length);
521 outer_cache_op = outer_clean_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700522 break;
523 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700524 dmac_inv_range(vaddr, vaddr + length);
525 outer_cache_op = outer_inv_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700526 break;
527 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700528 dmac_flush_range(vaddr, vaddr + length);
529 outer_cache_op = outer_flush_range;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700530 break;
531 default:
532 return -EINVAL;
533 }
534
Olav Haugan85c95402012-05-30 17:32:37 -0700535 if (system_heap_contig_has_outer_cache) {
536 unsigned long pstart;
537
538 pstart = virt_to_phys(buffer->priv_virt) + offset;
539 if (!pstart) {
540 WARN(1, "Could not do virt to phys translation on %p\n",
541 buffer->priv_virt);
542 return -EINVAL;
543 }
544
545 outer_cache_op(pstart, pstart + PAGE_SIZE);
546 }
547
Laura Abbottabcb6f72011-10-04 16:26:49 -0700548 return 0;
549}
550
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800551static int ion_system_contig_print_debug(struct ion_heap *heap,
Olav Haugan0671b9a2012-05-25 11:58:56 -0700552 struct seq_file *s,
553 const struct rb_root *unused)
Laura Abbott68c80642011-10-21 17:32:27 -0700554{
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800555 seq_printf(s, "total bytes currently allocated: %lx\n",
556 (unsigned long) atomic_read(&system_contig_heap_allocated));
557
558 return 0;
Laura Abbott68c80642011-10-21 17:32:27 -0700559}
560
Laura Abbott8c017362011-09-22 20:59:12 -0700561int ion_system_contig_heap_map_iommu(struct ion_buffer *buffer,
562 struct ion_iommu_map *data,
563 unsigned int domain_num,
564 unsigned int partition_num,
565 unsigned long align,
566 unsigned long iova_length,
567 unsigned long flags)
568{
Olav Haugan16cdb412012-03-27 13:02:17 -0700569 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700570 struct iommu_domain *domain;
Laura Abbott8c017362011-09-22 20:59:12 -0700571 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700572 struct scatterlist *sglist = 0;
573 struct page *page = 0;
Olav Hauganf310cf22012-05-08 08:42:49 -0700574 int prot = IOMMU_WRITE | IOMMU_READ;
575 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700576
577 if (!ION_IS_CACHED(flags))
578 return -EINVAL;
579
580 if (!msm_use_iommu()) {
581 data->iova_addr = virt_to_phys(buffer->vaddr);
582 return 0;
583 }
584
585 data->mapped_size = iova_length;
586 extra = iova_length - buffer->size;
587
Laura Abbottd01221b2012-05-16 17:52:49 -0700588 ret = msm_allocate_iova_address(domain_num, partition_num,
589 data->mapped_size, align,
590 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700591
Laura Abbottd01221b2012-05-16 17:52:49 -0700592 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700593 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700594
595 domain = msm_get_iommu_domain(domain_num);
596
597 if (!domain) {
598 ret = -ENOMEM;
599 goto out1;
600 }
Olav Haugan16cdb412012-03-27 13:02:17 -0700601 page = virt_to_page(buffer->vaddr);
Laura Abbott8c017362011-09-22 20:59:12 -0700602
Olav Haugan16cdb412012-03-27 13:02:17 -0700603 sglist = vmalloc(sizeof(*sglist));
604 if (!sglist)
605 goto out1;
606
607 sg_init_table(sglist, 1);
608 sg_set_page(sglist, page, buffer->size, 0);
609
610 ret = iommu_map_range(domain, data->iova_addr, sglist,
611 buffer->size, prot);
612 if (ret) {
613 pr_err("%s: could not map %lx in domain %p\n",
614 __func__, data->iova_addr, domain);
615 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700616 }
617
Olav Haugan16cdb412012-03-27 13:02:17 -0700618 if (extra) {
619 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800620 unsigned long phys_addr = sg_phys(sglist);
621 ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
622 extra, SZ_4K, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700623 if (ret)
624 goto out2;
625 }
626 vfree(sglist);
627 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700628out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700629 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700630
631out1:
Olav Haugan16cdb412012-03-27 13:02:17 -0700632 vfree(sglist);
Laura Abbott8c017362011-09-22 20:59:12 -0700633 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
634 data->mapped_size);
635out:
636 return ret;
637}
638
Rohit Vaswani35edc882012-11-20 10:20:47 -0800639void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
640 struct ion_buffer *buffer)
641{
642 return buffer->priv_virt;
643}
644
645void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
646 struct ion_buffer *buffer)
647{
648 return;
649}
650
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700651static struct ion_heap_ops kmalloc_ops = {
652 .allocate = ion_system_contig_heap_allocate,
653 .free = ion_system_contig_heap_free,
654 .phys = ion_system_contig_heap_phys,
655 .map_dma = ion_system_contig_heap_map_dma,
Rebecca Schultz Zavinb1790672012-06-14 15:08:53 -0700656 .unmap_dma = ion_system_contig_heap_unmap_dma,
Rohit Vaswani35edc882012-11-20 10:20:47 -0800657 .map_kernel = ion_system_contig_heap_map_kernel,
658 .unmap_kernel = ion_system_contig_heap_unmap_kernel,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700659 .map_user = ion_system_contig_heap_map_user,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700660 .cache_op = ion_system_contig_heap_cache_ops,
Olav Haugan3d4fe1a2012-01-13 11:42:15 -0800661 .print_debug = ion_system_contig_print_debug,
Laura Abbott8c017362011-09-22 20:59:12 -0700662 .map_iommu = ion_system_contig_heap_map_iommu,
663 .unmap_iommu = ion_system_heap_unmap_iommu,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700664};
665
Olav Haugan85c95402012-05-30 17:32:37 -0700666struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *pheap)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700667{
668 struct ion_heap *heap;
669
670 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
671 if (!heap)
672 return ERR_PTR(-ENOMEM);
673 heap->ops = &kmalloc_ops;
674 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
Olav Haugan85c95402012-05-30 17:32:37 -0700675 system_heap_contig_has_outer_cache = pheap->has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700676 return heap;
677}
678
679void ion_system_contig_heap_destroy(struct ion_heap *heap)
680{
681 kfree(heap);
682}
683