blob: 0b691f3f97558fd3650ec688d2166c729da73d5b [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Laura Abbott60c92c72012-10-10 13:12:52 -07002 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070022#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070023#include "ion_priv.h"
24
25#include <asm/mach/map.h>
26#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070027#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070028#include <mach/iommu_domains.h>
29
30struct ion_iommu_heap {
31 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070032 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070033};
34
35struct ion_iommu_priv_data {
36 struct page **pages;
37 int nrpages;
38 unsigned long size;
39};
40
41static int ion_iommu_heap_allocate(struct ion_heap *heap,
42 struct ion_buffer *buffer,
43 unsigned long size, unsigned long align,
44 unsigned long flags)
45{
46 int ret, i;
47 struct ion_iommu_priv_data *data = NULL;
48
49 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -080050 struct scatterlist *sg;
51 struct sg_table *table;
52 unsigned int i;
53
Laura Abbott8c017362011-09-22 20:59:12 -070054 data = kmalloc(sizeof(*data), GFP_KERNEL);
55 if (!data)
56 return -ENOMEM;
57
58 data->size = PFN_ALIGN(size);
59 data->nrpages = data->size >> PAGE_SHIFT;
60 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
61 GFP_KERNEL);
62 if (!data->pages) {
63 ret = -ENOMEM;
64 goto err1;
65 }
Laura Abbottb14ed962012-01-30 14:18:08 -080066
67 table = buffer->sg_table =
68 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
69
70 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -070071 ret = -ENOMEM;
72 goto err1;
73 }
Laura Abbottb14ed962012-01-30 14:18:08 -080074 ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
75 if (ret)
76 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -070077
Laura Abbottb14ed962012-01-30 14:18:08 -080078 for_each_sg(table->sgl, sg, table->nents, i) {
Laura Abbott8c017362011-09-22 20:59:12 -070079 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
80 if (!data->pages[i])
Laura Abbottb14ed962012-01-30 14:18:08 -080081 goto err3;
Olav Haugan16cdb412012-03-27 13:02:17 -070082
Laura Abbottb14ed962012-01-30 14:18:08 -080083 sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
Laura Abbott60c92c72012-10-10 13:12:52 -070084 sg_dma_address(sg) = sg_phys(sg);
Laura Abbott8c017362011-09-22 20:59:12 -070085 }
86
Laura Abbott60c92c72012-10-10 13:12:52 -070087 if (!ION_IS_CACHED(flags))
88 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
89 DMA_BIDIRECTIONAL);
90
Laura Abbott8c017362011-09-22 20:59:12 -070091 buffer->priv_virt = data;
92 return 0;
93
94 } else {
95 return -ENOMEM;
96 }
97
98
Laura Abbottb14ed962012-01-30 14:18:08 -080099err3:
100 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700101err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800102 kfree(buffer->sg_table);
103 buffer->sg_table = 0;
Olav Haugan16cdb412012-03-27 13:02:17 -0700104
Laura Abbott8c017362011-09-22 20:59:12 -0700105 for (i = 0; i < data->nrpages; i++) {
106 if (data->pages[i])
107 __free_page(data->pages[i]);
108 }
109 kfree(data->pages);
110err1:
111 kfree(data);
112 return ret;
113}
114
115static void ion_iommu_heap_free(struct ion_buffer *buffer)
116{
117 struct ion_iommu_priv_data *data = buffer->priv_virt;
118 int i;
119
120 if (!data)
121 return;
122
123 for (i = 0; i < data->nrpages; i++)
124 __free_page(data->pages[i]);
125
126 kfree(data->pages);
127 kfree(data);
128}
129
130void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800131 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700132{
133 struct ion_iommu_priv_data *data = buffer->priv_virt;
134 pgprot_t page_prot = PAGE_KERNEL;
135
136 if (!data)
137 return NULL;
138
Laura Abbottb14ed962012-01-30 14:18:08 -0800139 if (!ION_IS_CACHED(buffer->flags))
Laura Abbott8c017362011-09-22 20:59:12 -0700140 page_prot = pgprot_noncached(page_prot);
141
142 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
143
144 return buffer->vaddr;
145}
146
147void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
148 struct ion_buffer *buffer)
149{
150 if (!buffer->vaddr)
151 return;
152
153 vunmap(buffer->vaddr);
154 buffer->vaddr = NULL;
155}
156
157int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800158 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700159{
160 struct ion_iommu_priv_data *data = buffer->priv_virt;
161 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800162 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700163 if (!data)
164 return -EINVAL;
165
Laura Abbottb14ed962012-01-30 14:18:08 -0800166 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800167 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700168
Olav Haugandbec7db2012-02-25 10:32:41 -0800169 curr_addr = vma->vm_start;
170 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
171 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700172 /*
173 * This will fail the mmap which will
174 * clean up the vma space properly.
175 */
176 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800177 }
178 curr_addr += PAGE_SIZE;
179 }
Laura Abbott8c017362011-09-22 20:59:12 -0700180 return 0;
181}
182
183int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
184 struct ion_iommu_map *data,
185 unsigned int domain_num,
186 unsigned int partition_num,
187 unsigned long align,
188 unsigned long iova_length,
189 unsigned long flags)
190{
Laura Abbott8c017362011-09-22 20:59:12 -0700191 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700192 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700193 unsigned long extra;
Olav Hauganf310cf22012-05-08 08:42:49 -0700194 int prot = IOMMU_WRITE | IOMMU_READ;
195 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700196
197 BUG_ON(!msm_use_iommu());
198
199 data->mapped_size = iova_length;
200 extra = iova_length - buffer->size;
201
Laura Abbottd01221b2012-05-16 17:52:49 -0700202 ret = msm_allocate_iova_address(domain_num, partition_num,
203 data->mapped_size, align,
204 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700205
Laura Abbotte27cdcd2012-06-21 07:58:41 -0700206 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700207 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700208
209 domain = msm_get_iommu_domain(domain_num);
210
211 if (!domain) {
212 ret = -ENOMEM;
213 goto out1;
214 }
215
Olav Haugan16cdb412012-03-27 13:02:17 -0700216 ret = iommu_map_range(domain, data->iova_addr,
Laura Abbottb14ed962012-01-30 14:18:08 -0800217 buffer->sg_table->sgl,
218 buffer->size, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700219 if (ret) {
220 pr_err("%s: could not map %lx in domain %p\n",
221 __func__, data->iova_addr, domain);
222 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700223 }
224
Olav Haugan16cdb412012-03-27 13:02:17 -0700225 if (extra) {
226 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700227 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
228 prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700229 if (ret)
230 goto out2;
231 }
232 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700233
234out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700235 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700236out1:
237 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
238 buffer->size);
239
240out:
241
242 return ret;
243}
244
245void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
246{
Laura Abbott8c017362011-09-22 20:59:12 -0700247 unsigned int domain_num;
248 unsigned int partition_num;
249 struct iommu_domain *domain;
250
251 BUG_ON(!msm_use_iommu());
252
253 domain_num = iommu_map_domain(data);
254 partition_num = iommu_map_partition(data);
255
256 domain = msm_get_iommu_domain(domain_num);
257
258 if (!domain) {
259 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
260 return;
261 }
262
Olav Haugan16cdb412012-03-27 13:02:17 -0700263 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700264 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
265 data->mapped_size);
266
267 return;
268}
269
Olav Hauganef010712012-03-05 14:19:46 -0800270static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
271 void *vaddr, unsigned int offset, unsigned int length,
272 unsigned int cmd)
273{
Olav Haugan85c95402012-05-30 17:32:37 -0700274 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
275 struct ion_iommu_heap *iommu_heap =
276 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800277
278 switch (cmd) {
279 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700280 dmac_clean_range(vaddr, vaddr + length);
281 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800282 break;
283 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700284 dmac_inv_range(vaddr, vaddr + length);
285 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800286 break;
287 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700288 dmac_flush_range(vaddr, vaddr + length);
289 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800290 break;
291 default:
292 return -EINVAL;
293 }
294
Olav Haugan85c95402012-05-30 17:32:37 -0700295 if (iommu_heap->has_outer_cache) {
296 unsigned long pstart;
297 unsigned int i;
298 struct ion_iommu_priv_data *data = buffer->priv_virt;
299 if (!data)
300 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800301
Olav Haugan85c95402012-05-30 17:32:37 -0700302 for (i = 0; i < data->nrpages; ++i) {
303 pstart = page_to_phys(data->pages[i]);
304 outer_cache_op(pstart, pstart + PAGE_SIZE);
305 }
306 }
Olav Hauganef010712012-03-05 14:19:46 -0800307 return 0;
308}
Laura Abbott8c017362011-09-22 20:59:12 -0700309
Laura Abbottb14ed962012-01-30 14:18:08 -0800310static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800311 struct ion_buffer *buffer)
312{
Laura Abbottb14ed962012-01-30 14:18:08 -0800313 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800314}
315
316static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
317 struct ion_buffer *buffer)
318{
Laura Abbottb14ed962012-01-30 14:18:08 -0800319 if (buffer->sg_table)
320 sg_free_table(buffer->sg_table);
321 kfree(buffer->sg_table);
322 buffer->sg_table = 0;
Olav Hauganab804b82012-03-05 14:41:16 -0800323}
324
Laura Abbott8c017362011-09-22 20:59:12 -0700325static struct ion_heap_ops iommu_heap_ops = {
326 .allocate = ion_iommu_heap_allocate,
327 .free = ion_iommu_heap_free,
328 .map_user = ion_iommu_heap_map_user,
329 .map_kernel = ion_iommu_heap_map_kernel,
330 .unmap_kernel = ion_iommu_heap_unmap_kernel,
331 .map_iommu = ion_iommu_heap_map_iommu,
332 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800333 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800334 .map_dma = ion_iommu_heap_map_dma,
335 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700336};
337
338struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
339{
340 struct ion_iommu_heap *iommu_heap;
341
342 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
343 if (!iommu_heap)
344 return ERR_PTR(-ENOMEM);
345
346 iommu_heap->heap.ops = &iommu_heap_ops;
347 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700348 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700349
350 return &iommu_heap->heap;
351}
352
353void ion_iommu_heap_destroy(struct ion_heap *heap)
354{
355 struct ion_iommu_heap *iommu_heap =
356 container_of(heap, struct ion_iommu_heap, heap);
357
358 kfree(iommu_heap);
359 iommu_heap = NULL;
360}