blob: 5483054fd5a9b976f60758f12c8f0a335b548c45 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Laura Abbott60c92c72012-10-10 13:12:52 -07002 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
Laura Abbott60c92c72012-10-10 13:12:52 -070022#include <linux/dma-mapping.h>
Laura Abbott8c017362011-09-22 20:59:12 -070023#include "ion_priv.h"
24
25#include <asm/mach/map.h>
26#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070027#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070028#include <mach/iommu_domains.h>
29
30struct ion_iommu_heap {
31 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070032 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070033};
34
35struct ion_iommu_priv_data {
36 struct page **pages;
37 int nrpages;
38 unsigned long size;
39};
40
41static int ion_iommu_heap_allocate(struct ion_heap *heap,
42 struct ion_buffer *buffer,
43 unsigned long size, unsigned long align,
44 unsigned long flags)
45{
46 int ret, i;
47 struct ion_iommu_priv_data *data = NULL;
48
49 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -080050 struct scatterlist *sg;
51 struct sg_table *table;
52 unsigned int i;
53
Laura Abbott8c017362011-09-22 20:59:12 -070054 data = kmalloc(sizeof(*data), GFP_KERNEL);
55 if (!data)
56 return -ENOMEM;
57
58 data->size = PFN_ALIGN(size);
59 data->nrpages = data->size >> PAGE_SHIFT;
60 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
61 GFP_KERNEL);
62 if (!data->pages) {
63 ret = -ENOMEM;
64 goto err1;
65 }
Laura Abbottb14ed962012-01-30 14:18:08 -080066
67 table = buffer->sg_table =
68 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
69
70 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -070071 ret = -ENOMEM;
72 goto err1;
73 }
Laura Abbottb14ed962012-01-30 14:18:08 -080074 ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
75 if (ret)
76 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -070077
Laura Abbottb14ed962012-01-30 14:18:08 -080078 for_each_sg(table->sgl, sg, table->nents, i) {
Olav Hauganc6ac87c2012-12-12 18:07:13 -080079 data->pages[i] = alloc_page(
80 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
Laura Abbott8c017362011-09-22 20:59:12 -070081 if (!data->pages[i])
Laura Abbottb14ed962012-01-30 14:18:08 -080082 goto err3;
Olav Haugan16cdb412012-03-27 13:02:17 -070083
Laura Abbottb14ed962012-01-30 14:18:08 -080084 sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
Laura Abbott60c92c72012-10-10 13:12:52 -070085 sg_dma_address(sg) = sg_phys(sg);
Laura Abbott8c017362011-09-22 20:59:12 -070086 }
87
Laura Abbott60c92c72012-10-10 13:12:52 -070088 if (!ION_IS_CACHED(flags))
89 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
90 DMA_BIDIRECTIONAL);
91
Laura Abbott8c017362011-09-22 20:59:12 -070092 buffer->priv_virt = data;
93 return 0;
94
95 } else {
96 return -ENOMEM;
97 }
98
99
Laura Abbottb14ed962012-01-30 14:18:08 -0800100err3:
101 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -0700102err2:
Laura Abbottb14ed962012-01-30 14:18:08 -0800103 kfree(buffer->sg_table);
104 buffer->sg_table = 0;
Olav Haugan16cdb412012-03-27 13:02:17 -0700105
Laura Abbott8c017362011-09-22 20:59:12 -0700106 for (i = 0; i < data->nrpages; i++) {
107 if (data->pages[i])
108 __free_page(data->pages[i]);
109 }
110 kfree(data->pages);
111err1:
112 kfree(data);
113 return ret;
114}
115
116static void ion_iommu_heap_free(struct ion_buffer *buffer)
117{
118 struct ion_iommu_priv_data *data = buffer->priv_virt;
119 int i;
120
121 if (!data)
122 return;
123
124 for (i = 0; i < data->nrpages; i++)
125 __free_page(data->pages[i]);
126
127 kfree(data->pages);
128 kfree(data);
129}
130
131void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800132 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700133{
134 struct ion_iommu_priv_data *data = buffer->priv_virt;
135 pgprot_t page_prot = PAGE_KERNEL;
136
137 if (!data)
138 return NULL;
139
Laura Abbottb14ed962012-01-30 14:18:08 -0800140 if (!ION_IS_CACHED(buffer->flags))
Laura Abbott8c017362011-09-22 20:59:12 -0700141 page_prot = pgprot_noncached(page_prot);
142
143 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
144
145 return buffer->vaddr;
146}
147
148void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
149 struct ion_buffer *buffer)
150{
151 if (!buffer->vaddr)
152 return;
153
154 vunmap(buffer->vaddr);
155 buffer->vaddr = NULL;
156}
157
158int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800159 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700160{
161 struct ion_iommu_priv_data *data = buffer->priv_virt;
162 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800163 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700164 if (!data)
165 return -EINVAL;
166
Laura Abbottb14ed962012-01-30 14:18:08 -0800167 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800168 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700169
Olav Haugandbec7db2012-02-25 10:32:41 -0800170 curr_addr = vma->vm_start;
171 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
172 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700173 /*
174 * This will fail the mmap which will
175 * clean up the vma space properly.
176 */
177 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800178 }
179 curr_addr += PAGE_SIZE;
180 }
Laura Abbott8c017362011-09-22 20:59:12 -0700181 return 0;
182}
183
184int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
185 struct ion_iommu_map *data,
186 unsigned int domain_num,
187 unsigned int partition_num,
188 unsigned long align,
189 unsigned long iova_length,
190 unsigned long flags)
191{
Laura Abbott8c017362011-09-22 20:59:12 -0700192 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700193 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700194 unsigned long extra;
Olav Hauganf310cf22012-05-08 08:42:49 -0700195 int prot = IOMMU_WRITE | IOMMU_READ;
196 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700197
198 BUG_ON(!msm_use_iommu());
199
200 data->mapped_size = iova_length;
201 extra = iova_length - buffer->size;
202
Laura Abbottd01221b2012-05-16 17:52:49 -0700203 ret = msm_allocate_iova_address(domain_num, partition_num,
204 data->mapped_size, align,
205 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700206
Laura Abbotte27cdcd2012-06-21 07:58:41 -0700207 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700208 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700209
210 domain = msm_get_iommu_domain(domain_num);
211
212 if (!domain) {
213 ret = -ENOMEM;
214 goto out1;
215 }
216
Olav Haugan16cdb412012-03-27 13:02:17 -0700217 ret = iommu_map_range(domain, data->iova_addr,
Laura Abbottb14ed962012-01-30 14:18:08 -0800218 buffer->sg_table->sgl,
219 buffer->size, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700220 if (ret) {
221 pr_err("%s: could not map %lx in domain %p\n",
222 __func__, data->iova_addr, domain);
223 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700224 }
225
Olav Haugan16cdb412012-03-27 13:02:17 -0700226 if (extra) {
227 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700228 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
229 prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700230 if (ret)
231 goto out2;
232 }
233 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700234
235out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700236 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700237out1:
238 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
239 buffer->size);
240
241out:
242
243 return ret;
244}
245
246void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
247{
Laura Abbott8c017362011-09-22 20:59:12 -0700248 unsigned int domain_num;
249 unsigned int partition_num;
250 struct iommu_domain *domain;
251
252 BUG_ON(!msm_use_iommu());
253
254 domain_num = iommu_map_domain(data);
255 partition_num = iommu_map_partition(data);
256
257 domain = msm_get_iommu_domain(domain_num);
258
259 if (!domain) {
260 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
261 return;
262 }
263
Olav Haugan16cdb412012-03-27 13:02:17 -0700264 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700265 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
266 data->mapped_size);
267
268 return;
269}
270
Olav Hauganef010712012-03-05 14:19:46 -0800271static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
272 void *vaddr, unsigned int offset, unsigned int length,
273 unsigned int cmd)
274{
Olav Haugan85c95402012-05-30 17:32:37 -0700275 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
276 struct ion_iommu_heap *iommu_heap =
277 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800278
279 switch (cmd) {
280 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700281 dmac_clean_range(vaddr, vaddr + length);
282 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800283 break;
284 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700285 dmac_inv_range(vaddr, vaddr + length);
286 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800287 break;
288 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700289 dmac_flush_range(vaddr, vaddr + length);
290 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800291 break;
292 default:
293 return -EINVAL;
294 }
295
Olav Haugan85c95402012-05-30 17:32:37 -0700296 if (iommu_heap->has_outer_cache) {
297 unsigned long pstart;
298 unsigned int i;
299 struct ion_iommu_priv_data *data = buffer->priv_virt;
300 if (!data)
301 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800302
Olav Haugan85c95402012-05-30 17:32:37 -0700303 for (i = 0; i < data->nrpages; ++i) {
304 pstart = page_to_phys(data->pages[i]);
305 outer_cache_op(pstart, pstart + PAGE_SIZE);
306 }
307 }
Olav Hauganef010712012-03-05 14:19:46 -0800308 return 0;
309}
Laura Abbott8c017362011-09-22 20:59:12 -0700310
Laura Abbottb14ed962012-01-30 14:18:08 -0800311static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800312 struct ion_buffer *buffer)
313{
Laura Abbottb14ed962012-01-30 14:18:08 -0800314 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800315}
316
317static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
318 struct ion_buffer *buffer)
319{
Laura Abbottb14ed962012-01-30 14:18:08 -0800320 if (buffer->sg_table)
321 sg_free_table(buffer->sg_table);
322 kfree(buffer->sg_table);
323 buffer->sg_table = 0;
Olav Hauganab804b82012-03-05 14:41:16 -0800324}
325
Laura Abbott8c017362011-09-22 20:59:12 -0700326static struct ion_heap_ops iommu_heap_ops = {
327 .allocate = ion_iommu_heap_allocate,
328 .free = ion_iommu_heap_free,
329 .map_user = ion_iommu_heap_map_user,
330 .map_kernel = ion_iommu_heap_map_kernel,
331 .unmap_kernel = ion_iommu_heap_unmap_kernel,
332 .map_iommu = ion_iommu_heap_map_iommu,
333 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800334 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800335 .map_dma = ion_iommu_heap_map_dma,
336 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700337};
338
339struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
340{
341 struct ion_iommu_heap *iommu_heap;
342
343 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
344 if (!iommu_heap)
345 return ERR_PTR(-ENOMEM);
346
347 iommu_heap->heap.ops = &iommu_heap_ops;
348 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700349 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700350
351 return &iommu_heap->heap;
352}
353
354void ion_iommu_heap_destroy(struct ion_heap *heap)
355{
356 struct ion_iommu_heap *iommu_heap =
357 container_of(heap, struct ion_iommu_heap, heap);
358
359 kfree(iommu_heap);
360 iommu_heap = NULL;
361}