blob: 621144b111889ee495828658904c257fdb55f600 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Olav Haugandbec7db2012-02-25 10:32:41 -08002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/ion.h>
16#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
22#include "ion_priv.h"
23
24#include <asm/mach/map.h>
25#include <asm/page.h>
Olav Haugan85c95402012-05-30 17:32:37 -070026#include <asm/cacheflush.h>
Laura Abbott8c017362011-09-22 20:59:12 -070027#include <mach/iommu_domains.h>
28
29struct ion_iommu_heap {
30 struct ion_heap heap;
Olav Haugan85c95402012-05-30 17:32:37 -070031 unsigned int has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -070032};
33
34struct ion_iommu_priv_data {
35 struct page **pages;
36 int nrpages;
37 unsigned long size;
Olav Haugan16cdb412012-03-27 13:02:17 -070038 struct scatterlist *iommu_sglist;
Laura Abbott8c017362011-09-22 20:59:12 -070039};
40
41static int ion_iommu_heap_allocate(struct ion_heap *heap,
42 struct ion_buffer *buffer,
43 unsigned long size, unsigned long align,
44 unsigned long flags)
45{
46 int ret, i;
47 struct ion_iommu_priv_data *data = NULL;
48
49 if (msm_use_iommu()) {
50 data = kmalloc(sizeof(*data), GFP_KERNEL);
51 if (!data)
52 return -ENOMEM;
53
54 data->size = PFN_ALIGN(size);
55 data->nrpages = data->size >> PAGE_SHIFT;
56 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
57 GFP_KERNEL);
58 if (!data->pages) {
59 ret = -ENOMEM;
60 goto err1;
61 }
Olav Haugan16cdb412012-03-27 13:02:17 -070062 data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
63 data->nrpages);
64 if (!data->iommu_sglist) {
65 ret = -ENOMEM;
66 goto err1;
67 }
68
69 sg_init_table(data->iommu_sglist, data->nrpages);
Laura Abbott8c017362011-09-22 20:59:12 -070070
71 for (i = 0; i < data->nrpages; i++) {
72 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
73 if (!data->pages[i])
74 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -070075
76 sg_set_page(&data->iommu_sglist[i], data->pages[i],
77 PAGE_SIZE, 0);
Laura Abbott8c017362011-09-22 20:59:12 -070078 }
79
80
81 buffer->priv_virt = data;
82 return 0;
83
84 } else {
85 return -ENOMEM;
86 }
87
88
89err2:
Olav Haugan16cdb412012-03-27 13:02:17 -070090 vfree(data->iommu_sglist);
91 data->iommu_sglist = NULL;
92
Laura Abbott8c017362011-09-22 20:59:12 -070093 for (i = 0; i < data->nrpages; i++) {
94 if (data->pages[i])
95 __free_page(data->pages[i]);
96 }
97 kfree(data->pages);
98err1:
99 kfree(data);
100 return ret;
101}
102
103static void ion_iommu_heap_free(struct ion_buffer *buffer)
104{
105 struct ion_iommu_priv_data *data = buffer->priv_virt;
106 int i;
107
108 if (!data)
109 return;
110
111 for (i = 0; i < data->nrpages; i++)
112 __free_page(data->pages[i]);
113
Olav Haugan16cdb412012-03-27 13:02:17 -0700114 vfree(data->iommu_sglist);
115 data->iommu_sglist = NULL;
116
Laura Abbott8c017362011-09-22 20:59:12 -0700117 kfree(data->pages);
118 kfree(data);
119}
120
121void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
122 struct ion_buffer *buffer,
123 unsigned long flags)
124{
125 struct ion_iommu_priv_data *data = buffer->priv_virt;
126 pgprot_t page_prot = PAGE_KERNEL;
127
128 if (!data)
129 return NULL;
130
131 if (!ION_IS_CACHED(flags))
132 page_prot = pgprot_noncached(page_prot);
133
134 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
135
136 return buffer->vaddr;
137}
138
139void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
140 struct ion_buffer *buffer)
141{
142 if (!buffer->vaddr)
143 return;
144
145 vunmap(buffer->vaddr);
146 buffer->vaddr = NULL;
147}
148
149int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
150 struct vm_area_struct *vma, unsigned long flags)
151{
152 struct ion_iommu_priv_data *data = buffer->priv_virt;
153 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800154 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700155 if (!data)
156 return -EINVAL;
157
158 if (!ION_IS_CACHED(flags))
Olav Haugande074a72012-02-22 15:39:54 -0800159 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700160
Olav Haugandbec7db2012-02-25 10:32:41 -0800161 curr_addr = vma->vm_start;
162 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
163 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700164 /*
165 * This will fail the mmap which will
166 * clean up the vma space properly.
167 */
168 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800169 }
170 curr_addr += PAGE_SIZE;
171 }
Laura Abbott8c017362011-09-22 20:59:12 -0700172 return 0;
173}
174
175int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
176 struct ion_iommu_map *data,
177 unsigned int domain_num,
178 unsigned int partition_num,
179 unsigned long align,
180 unsigned long iova_length,
181 unsigned long flags)
182{
Laura Abbott8c017362011-09-22 20:59:12 -0700183 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700184 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700185 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700186 struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700187 int prot = IOMMU_WRITE | IOMMU_READ;
188 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700189
190 BUG_ON(!msm_use_iommu());
191
192 data->mapped_size = iova_length;
193 extra = iova_length - buffer->size;
194
Laura Abbottd01221b2012-05-16 17:52:49 -0700195 ret = msm_allocate_iova_address(domain_num, partition_num,
196 data->mapped_size, align,
197 &data->iova_addr);
Laura Abbott8c017362011-09-22 20:59:12 -0700198
Laura Abbottd01221b2012-05-16 17:52:49 -0700199 if (!data->iova_addr)
Laura Abbott8c017362011-09-22 20:59:12 -0700200 goto out;
Laura Abbott8c017362011-09-22 20:59:12 -0700201
202 domain = msm_get_iommu_domain(domain_num);
203
204 if (!domain) {
205 ret = -ENOMEM;
206 goto out1;
207 }
208
Olav Haugan16cdb412012-03-27 13:02:17 -0700209 ret = iommu_map_range(domain, data->iova_addr,
210 buffer_data->iommu_sglist, buffer->size, prot);
211 if (ret) {
212 pr_err("%s: could not map %lx in domain %p\n",
213 __func__, data->iova_addr, domain);
214 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700215 }
216
Olav Haugan16cdb412012-03-27 13:02:17 -0700217 if (extra) {
218 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700219 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
220 prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700221 if (ret)
222 goto out2;
223 }
224 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700225
226out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700227 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700228out1:
229 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
230 buffer->size);
231
232out:
233
234 return ret;
235}
236
237void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
238{
Laura Abbott8c017362011-09-22 20:59:12 -0700239 unsigned int domain_num;
240 unsigned int partition_num;
241 struct iommu_domain *domain;
242
243 BUG_ON(!msm_use_iommu());
244
245 domain_num = iommu_map_domain(data);
246 partition_num = iommu_map_partition(data);
247
248 domain = msm_get_iommu_domain(domain_num);
249
250 if (!domain) {
251 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
252 return;
253 }
254
Olav Haugan16cdb412012-03-27 13:02:17 -0700255 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700256 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
257 data->mapped_size);
258
259 return;
260}
261
Olav Hauganef010712012-03-05 14:19:46 -0800262static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
263 void *vaddr, unsigned int offset, unsigned int length,
264 unsigned int cmd)
265{
Olav Haugan85c95402012-05-30 17:32:37 -0700266 void (*outer_cache_op)(phys_addr_t, phys_addr_t);
267 struct ion_iommu_heap *iommu_heap =
268 container_of(heap, struct ion_iommu_heap, heap);
Olav Hauganef010712012-03-05 14:19:46 -0800269
270 switch (cmd) {
271 case ION_IOC_CLEAN_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700272 dmac_clean_range(vaddr, vaddr + length);
273 outer_cache_op = outer_clean_range;
Olav Hauganef010712012-03-05 14:19:46 -0800274 break;
275 case ION_IOC_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700276 dmac_inv_range(vaddr, vaddr + length);
277 outer_cache_op = outer_inv_range;
Olav Hauganef010712012-03-05 14:19:46 -0800278 break;
279 case ION_IOC_CLEAN_INV_CACHES:
Olav Haugan85c95402012-05-30 17:32:37 -0700280 dmac_flush_range(vaddr, vaddr + length);
281 outer_cache_op = outer_flush_range;
Olav Hauganef010712012-03-05 14:19:46 -0800282 break;
283 default:
284 return -EINVAL;
285 }
286
Olav Haugan85c95402012-05-30 17:32:37 -0700287 if (iommu_heap->has_outer_cache) {
288 unsigned long pstart;
289 unsigned int i;
290 struct ion_iommu_priv_data *data = buffer->priv_virt;
291 if (!data)
292 return -ENOMEM;
Olav Hauganef010712012-03-05 14:19:46 -0800293
Olav Haugan85c95402012-05-30 17:32:37 -0700294 for (i = 0; i < data->nrpages; ++i) {
295 pstart = page_to_phys(data->pages[i]);
296 outer_cache_op(pstart, pstart + PAGE_SIZE);
297 }
298 }
Olav Hauganef010712012-03-05 14:19:46 -0800299 return 0;
300}
Laura Abbott8c017362011-09-22 20:59:12 -0700301
Olav Hauganab804b82012-03-05 14:41:16 -0800302static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
303 struct ion_buffer *buffer)
304{
Olav Haugan16cdb412012-03-27 13:02:17 -0700305 struct ion_iommu_priv_data *data = buffer->priv_virt;
306 return data->iommu_sglist;
Olav Hauganab804b82012-03-05 14:41:16 -0800307}
308
309static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
310 struct ion_buffer *buffer)
311{
Olav Hauganab804b82012-03-05 14:41:16 -0800312}
313
Laura Abbott8c017362011-09-22 20:59:12 -0700314static struct ion_heap_ops iommu_heap_ops = {
315 .allocate = ion_iommu_heap_allocate,
316 .free = ion_iommu_heap_free,
317 .map_user = ion_iommu_heap_map_user,
318 .map_kernel = ion_iommu_heap_map_kernel,
319 .unmap_kernel = ion_iommu_heap_unmap_kernel,
320 .map_iommu = ion_iommu_heap_map_iommu,
321 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800322 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800323 .map_dma = ion_iommu_heap_map_dma,
324 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700325};
326
327struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
328{
329 struct ion_iommu_heap *iommu_heap;
330
331 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
332 if (!iommu_heap)
333 return ERR_PTR(-ENOMEM);
334
335 iommu_heap->heap.ops = &iommu_heap_ops;
336 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
Olav Haugan85c95402012-05-30 17:32:37 -0700337 iommu_heap->has_outer_cache = heap_data->has_outer_cache;
Laura Abbott8c017362011-09-22 20:59:12 -0700338
339 return &iommu_heap->heap;
340}
341
342void ion_iommu_heap_destroy(struct ion_heap *heap)
343{
344 struct ion_iommu_heap *iommu_heap =
345 container_of(heap, struct ion_iommu_heap, heap);
346
347 kfree(iommu_heap);
348 iommu_heap = NULL;
349}