blob: 345c07dbef68a5c292e012dd0b8be4b99e18e099 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Olav Haugandbec7db2012-02-25 10:32:41 -08002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
Mitchel Humpherysaf2e5c52012-09-06 12:16:36 -070015#include <linux/msm_ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070016#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
22#include "ion_priv.h"
23
24#include <asm/mach/map.h>
25#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070026#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <mach/iommu_domains.h>
28
29struct ion_iommu_heap {
30 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070031 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070032};
33
34struct ion_iommu_priv_data {
35 struct page **pages;
36 int nrpages;
37 unsigned long size;
38};
39
40static int ion_iommu_heap_allocate(struct ion_heap *heap,
41 struct ion_buffer *buffer,
42 unsigned long size, unsigned long align,
43 unsigned long flags)
44{
45 int ret, i;
46 struct ion_iommu_priv_data *data = NULL;
47
48 if (msm_use_iommu()) {
Laura Abbottb14ed962012-01-30 14:18:08 -080049 struct scatterlist *sg;
50 struct sg_table *table;
51 unsigned int i;
52
Laura Abbott8c017362011-09-22 20:59:12 -070053 data = kmalloc(sizeof(*data), GFP_KERNEL);
54 if (!data)
55 return -ENOMEM;
56
57 data->size = PFN_ALIGN(size);
58 data->nrpages = data->size >> PAGE_SHIFT;
59 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
60 GFP_KERNEL);
61 if (!data->pages) {
62 ret = -ENOMEM;
63 goto err1;
64 }
Laura Abbottb14ed962012-01-30 14:18:08 -080065
66 table = buffer->sg_table =
67 kzalloc(sizeof(struct sg_table), GFP_KERNEL);
68
69 if (!table) {
Olav Haugan16cdb412012-03-27 13:02:17 -070070 ret = -ENOMEM;
71 goto err1;
72 }
Laura Abbottb14ed962012-01-30 14:18:08 -080073 ret = sg_alloc_table(table, data->nrpages, GFP_KERNEL);
74 if (ret)
75 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -070076
Laura Abbottb14ed962012-01-30 14:18:08 -080077 for_each_sg(table->sgl, sg, table->nents, i) {
Laura Abbott8c017362011-09-22 20:59:12 -070078 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
79 if (!data->pages[i])
Laura Abbottb14ed962012-01-30 14:18:08 -080080 goto err3;
Olav Haugan16cdb412012-03-27 13:02:17 -070081
Laura Abbottb14ed962012-01-30 14:18:08 -080082 sg_set_page(sg, data->pages[i], PAGE_SIZE, 0);
Laura Abbott8c017362011-09-22 20:59:12 -070083 }
84
Laura Abbott8c017362011-09-22 20:59:12 -070085 buffer->priv_virt = data;
86 return 0;
87
88 } else {
89 return -ENOMEM;
90 }
91
92
Laura Abbottb14ed962012-01-30 14:18:08 -080093err3:
94 sg_free_table(buffer->sg_table);
Laura Abbott8c017362011-09-22 20:59:12 -070095err2:
Laura Abbottb14ed962012-01-30 14:18:08 -080096 kfree(buffer->sg_table);
97 buffer->sg_table = 0;
Olav Haugan16cdb412012-03-27 13:02:17 -070098
Laura Abbott8c017362011-09-22 20:59:12 -070099 for (i = 0; i < data->nrpages; i++) {
100 if (data->pages[i])
101 __free_page(data->pages[i]);
102 }
103 kfree(data->pages);
104err1:
105 kfree(data);
106 return ret;
107}
108
109static void ion_iommu_heap_free(struct ion_buffer *buffer)
110{
111 struct ion_iommu_priv_data *data = buffer->priv_virt;
112 int i;
113
114 if (!data)
115 return;
116
117 for (i = 0; i < data->nrpages; i++)
118 __free_page(data->pages[i]);
119
120 kfree(data->pages);
121 kfree(data);
122}
123
124void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
Laura Abbottb14ed962012-01-30 14:18:08 -0800125 struct ion_buffer *buffer)
Laura Abbott8c017362011-09-22 20:59:12 -0700126{
127 struct ion_iommu_priv_data *data = buffer->priv_virt;
128 pgprot_t page_prot = PAGE_KERNEL;
129
130 if (!data)
131 return NULL;
132
Laura Abbottb14ed962012-01-30 14:18:08 -0800133 if (!ION_IS_CACHED(buffer->flags))
Laura Abbott8c017362011-09-22 20:59:12 -0700134 page_prot = pgprot_noncached(page_prot);
135
136 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
137
138 return buffer->vaddr;
139}
140
141void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
142 struct ion_buffer *buffer)
143{
144 if (!buffer->vaddr)
145 return;
146
147 vunmap(buffer->vaddr);
148 buffer->vaddr = NULL;
149}
150
151int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
Laura Abbottb14ed962012-01-30 14:18:08 -0800152 struct vm_area_struct *vma)
Laura Abbott8c017362011-09-22 20:59:12 -0700153{
154 struct ion_iommu_priv_data *data = buffer->priv_virt;
155 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800156 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700157 if (!data)
158 return -EINVAL;
159
Laura Abbottb14ed962012-01-30 14:18:08 -0800160 if (!ION_IS_CACHED(buffer->flags))
Olav Haugande074a72012-02-22 15:39:54 -0800161 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700162
Olav Haugandbec7db2012-02-25 10:32:41 -0800163 curr_addr = vma->vm_start;
164 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
165 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700166 /*
167 * This will fail the mmap which will
168 * clean up the vma space properly.
169 */
170 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800171 }
172 curr_addr += PAGE_SIZE;
173 }
Laura Abbott8c017362011-09-22 20:59:12 -0700174 return 0;
175}
176
177int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
178 struct ion_iommu_map *data,
179 unsigned int domain_num,
180 unsigned int partition_num,
181 unsigned long align,
182 unsigned long iova_length,
183 unsigned long flags)
184{
Laura Abbott8c017362011-09-22 20:59:12 -0700185 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700186 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700187 unsigned long extra;
Olav Hauganf310cf22012-05-08 08:42:49 -0700188 int prot = IOMMU_WRITE | IOMMU_READ;
189 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700190
191 BUG_ON(!msm_use_iommu());
192
193 data->mapped_size = iova_length;
194 extra = iova_length - buffer->size;
195
Laura Abbottd01221b2012-05-16 17:52:49 -0700196 ret = msm_allocate_iova_address(domain_num, partition_num,
197 data->mapped_size, align,
198 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700199
Laura Abbotte27cdcd2012-06-21 07:58:41 -0700200 if (ret)
Laura Abbott8c017362011-09-22 20:59:12 -0700201 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700202
203 domain = msm_get_iommu_domain(domain_num);
204
205 if (!domain) {
206 ret = -ENOMEM;
207 goto out1;
208 }
209
Olav Haugan16cdb412012-03-27 13:02:17 -0700210 ret = iommu_map_range(domain, data->iova_addr,
Laura Abbottb14ed962012-01-30 14:18:08 -0800211 buffer->sg_table->sgl,
212 buffer->size, prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700213 if (ret) {
214 pr_err("%s: could not map %lx in domain %p\n",
215 __func__, data->iova_addr, domain);
216 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700217 }
218
Olav Haugan16cdb412012-03-27 13:02:17 -0700219 if (extra) {
220 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700221 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
222 prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700223 if (ret)
224 goto out2;
225 }
226 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700227
228out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700229 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700230out1:
231 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
232 buffer->size);
233
234out:
235
236 return ret;
237}
238
239void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
240{
Laura Abbott8c017362011-09-22 20:59:12 -0700241 unsigned int domain_num;
242 unsigned int partition_num;
243 struct iommu_domain *domain;
244
245 BUG_ON(!msm_use_iommu());
246
247 domain_num = iommu_map_domain(data);
248 partition_num = iommu_map_partition(data);
249
250 domain = msm_get_iommu_domain(domain_num);
251
252 if (!domain) {
253 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
254 return;
255 }
256
Olav Haugan16cdb412012-03-27 13:02:17 -0700257 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700258 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
259 data->mapped_size);
260
261 return;
262}
263
Olav Hauganef010712012-03-05 14:19:46 -0800264static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
265 void *vaddr, unsigned int offset, unsigned int length,
266 unsigned int cmd)
267{
Olav Haugan85c95402012-05-30 17:32:37 -0700268 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
269 struct ion_iommu_heap *iommu_heap =
270 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800271
272 switch (cmd) {
273 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700274 dmac_clean_range(vaddr, vaddr + length);
275 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800276 break;
277 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700278 dmac_inv_range(vaddr, vaddr + length);
279 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800280 break;
281 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700282 dmac_flush_range(vaddr, vaddr + length);
283 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800284 break;
285 default:
286 return -EINVAL;
287 }
288
Olav Haugan85c95402012-05-30 17:32:37 -0700289 if (iommu_heap->has_outer_cache) {
290 unsigned long pstart;
291 unsigned int i;
292 struct ion_iommu_priv_data *data = buffer->priv_virt;
293 if (!data)
294 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800295
Olav Haugan85c95402012-05-30 17:32:37 -0700296 for (i = 0; i < data->nrpages; ++i) {
297 pstart = page_to_phys(data->pages[i]);
298 outer_cache_op(pstart, pstart + PAGE_SIZE);
299 }
300 }
Olav Hauganef010712012-03-05 14:19:46 -0800301 return 0;
302}
Laura Abbott8c017362011-09-22 20:59:12 -0700303
Laura Abbottb14ed962012-01-30 14:18:08 -0800304static struct sg_table *ion_iommu_heap_map_dma(struct ion_heap *heap,
Olav Hauganab804b82012-03-05 14:41:16 -0800305 struct ion_buffer *buffer)
306{
Laura Abbottb14ed962012-01-30 14:18:08 -0800307 return buffer->sg_table;
Olav Hauganab804b82012-03-05 14:41:16 -0800308}
309
310static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
311 struct ion_buffer *buffer)
312{
Laura Abbottb14ed962012-01-30 14:18:08 -0800313 if (buffer->sg_table)
314 sg_free_table(buffer->sg_table);
315 kfree(buffer->sg_table);
316 buffer->sg_table = 0;
Olav Hauganab804b82012-03-05 14:41:16 -0800317}
318
Laura Abbott8c017362011-09-22 20:59:12 -0700319static struct ion_heap_ops iommu_heap_ops = {
320 .allocate = ion_iommu_heap_allocate,
321 .free = ion_iommu_heap_free,
322 .map_user = ion_iommu_heap_map_user,
323 .map_kernel = ion_iommu_heap_map_kernel,
324 .unmap_kernel = ion_iommu_heap_unmap_kernel,
325 .map_iommu = ion_iommu_heap_map_iommu,
326 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800327 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800328 .map_dma = ion_iommu_heap_map_dma,
329 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700330};
331
332struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
333{
334 struct ion_iommu_heap *iommu_heap;
335
336 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
337 if (!iommu_heap)
338 return ERR_PTR(-ENOMEM);
339
340 iommu_heap->heap.ops = &iommu_heap_ops;
341 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700342 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700343
344 return &iommu_heap->heap;
345}
346
347void ion_iommu_heap_destroy(struct ion_heap *heap)
348{
349 struct ion_iommu_heap *iommu_heap =
350 container_of(heap, struct ion_iommu_heap, heap);
351
352 kfree(iommu_heap);
353 iommu_heap = NULL;
354}