blob: e754496d5cd85b0680a3fcf71cce9f6965ceaf55 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Olav Haugandbec7db2012-02-25 10:32:41 -08002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/ion.h>
16#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
22#include "ion_priv.h"
23
24#include <asm/mach/map.h>
25#include <asm/page.h>
26#include <mach/iommu_domains.h>
27
28struct ion_iommu_heap {
29 struct ion_heap heap;
30};
31
32struct ion_iommu_priv_data {
33 struct page **pages;
34 int nrpages;
35 unsigned long size;
36};
37
38static int ion_iommu_heap_allocate(struct ion_heap *heap,
39 struct ion_buffer *buffer,
40 unsigned long size, unsigned long align,
41 unsigned long flags)
42{
43 int ret, i;
44 struct ion_iommu_priv_data *data = NULL;
45
46 if (msm_use_iommu()) {
47 data = kmalloc(sizeof(*data), GFP_KERNEL);
48 if (!data)
49 return -ENOMEM;
50
51 data->size = PFN_ALIGN(size);
52 data->nrpages = data->size >> PAGE_SHIFT;
53 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
54 GFP_KERNEL);
55 if (!data->pages) {
56 ret = -ENOMEM;
57 goto err1;
58 }
59
60 for (i = 0; i < data->nrpages; i++) {
61 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
62 if (!data->pages[i])
63 goto err2;
64 }
65
66
67 buffer->priv_virt = data;
68 return 0;
69
70 } else {
71 return -ENOMEM;
72 }
73
74
75err2:
76 for (i = 0; i < data->nrpages; i++) {
77 if (data->pages[i])
78 __free_page(data->pages[i]);
79 }
80 kfree(data->pages);
81err1:
82 kfree(data);
83 return ret;
84}
85
86static void ion_iommu_heap_free(struct ion_buffer *buffer)
87{
88 struct ion_iommu_priv_data *data = buffer->priv_virt;
89 int i;
90
91 if (!data)
92 return;
93
94 for (i = 0; i < data->nrpages; i++)
95 __free_page(data->pages[i]);
96
97 kfree(data->pages);
98 kfree(data);
99}
100
101void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
102 struct ion_buffer *buffer,
103 unsigned long flags)
104{
105 struct ion_iommu_priv_data *data = buffer->priv_virt;
106 pgprot_t page_prot = PAGE_KERNEL;
107
108 if (!data)
109 return NULL;
110
111 if (!ION_IS_CACHED(flags))
112 page_prot = pgprot_noncached(page_prot);
113
114 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
115
116 return buffer->vaddr;
117}
118
119void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
120 struct ion_buffer *buffer)
121{
122 if (!buffer->vaddr)
123 return;
124
125 vunmap(buffer->vaddr);
126 buffer->vaddr = NULL;
127}
128
129int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
130 struct vm_area_struct *vma, unsigned long flags)
131{
132 struct ion_iommu_priv_data *data = buffer->priv_virt;
133 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800134 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700135 if (!data)
136 return -EINVAL;
137
138 if (!ION_IS_CACHED(flags))
Olav Haugande074a72012-02-22 15:39:54 -0800139 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700140
Olav Haugandbec7db2012-02-25 10:32:41 -0800141 curr_addr = vma->vm_start;
142 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
143 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700144 /*
145 * This will fail the mmap which will
146 * clean up the vma space properly.
147 */
148 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800149 }
150 curr_addr += PAGE_SIZE;
151 }
Laura Abbott8c017362011-09-22 20:59:12 -0700152 return 0;
153}
154
155int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
156 struct ion_iommu_map *data,
157 unsigned int domain_num,
158 unsigned int partition_num,
159 unsigned long align,
160 unsigned long iova_length,
161 unsigned long flags)
162{
163 unsigned long temp_iova;
164 struct iommu_domain *domain;
165 struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
166 int i, j, ret = 0;
167 unsigned long extra;
168
169 BUG_ON(!msm_use_iommu());
170
171 data->mapped_size = iova_length;
172 extra = iova_length - buffer->size;
173
174 data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
175 data->mapped_size, align);
176
177 if (!data->iova_addr) {
178 ret = -ENOMEM;
179 goto out;
180 }
181
182 domain = msm_get_iommu_domain(domain_num);
183
184 if (!domain) {
185 ret = -ENOMEM;
186 goto out1;
187 }
188
189 temp_iova = data->iova_addr;
190 for (i = buffer->size, j = 0; i > 0; j++, i -= SZ_4K,
191 temp_iova += SZ_4K) {
192 ret = iommu_map(domain, temp_iova,
193 page_to_phys(buffer_data->pages[j]),
194 get_order(SZ_4K),
195 ION_IS_CACHED(flags) ? 1 : 0);
196
197 if (ret) {
198 pr_err("%s: could not map %lx to %x in domain %p\n",
199 __func__, temp_iova,
200 page_to_phys(buffer_data->pages[j]),
201 domain);
202 goto out2;
203 }
204 }
205
206
207 if (extra &&
208 msm_iommu_map_extra
209 (domain, temp_iova, extra, flags) < 0)
210 goto out2;
211
212 return 0;
213
214
215out2:
216 for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
217 iommu_unmap(domain, temp_iova, get_order(SZ_4K));
218
219out1:
220 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
221 buffer->size);
222
223out:
224
225 return ret;
226}
227
228void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
229{
230 int i;
231 unsigned long temp_iova;
232 unsigned int domain_num;
233 unsigned int partition_num;
234 struct iommu_domain *domain;
235
236 BUG_ON(!msm_use_iommu());
237
238 domain_num = iommu_map_domain(data);
239 partition_num = iommu_map_partition(data);
240
241 domain = msm_get_iommu_domain(domain_num);
242
243 if (!domain) {
244 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
245 return;
246 }
247
248 temp_iova = data->iova_addr;
249 for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
250 iommu_unmap(domain, temp_iova, get_order(SZ_4K));
251
252 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
253 data->mapped_size);
254
255 return;
256}
257
Olav Hauganef010712012-03-05 14:19:46 -0800258static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
259 void *vaddr, unsigned int offset, unsigned int length,
260 unsigned int cmd)
261{
262 unsigned long vstart, pstart;
263 void (*op)(unsigned long, unsigned long, unsigned long);
264 unsigned int i;
265 struct ion_iommu_priv_data *data = buffer->priv_virt;
266
267 if (!data)
268 return -ENOMEM;
269
270 switch (cmd) {
271 case ION_IOC_CLEAN_CACHES:
272 op = clean_caches;
273 break;
274 case ION_IOC_INV_CACHES:
275 op = invalidate_caches;
276 break;
277 case ION_IOC_CLEAN_INV_CACHES:
278 op = clean_and_invalidate_caches;
279 break;
280 default:
281 return -EINVAL;
282 }
283
284 vstart = (unsigned long) vaddr;
285 for (i = 0; i < data->nrpages; ++i, vstart += PAGE_SIZE) {
286 pstart = page_to_phys(data->pages[i]);
287 op(vstart, PAGE_SIZE, pstart);
288 }
289
290 return 0;
291}
Laura Abbott8c017362011-09-22 20:59:12 -0700292
Olav Hauganab804b82012-03-05 14:41:16 -0800293static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
294 struct ion_buffer *buffer)
295{
296 struct scatterlist *sglist = NULL;
297 if (buffer->priv_virt) {
298 struct ion_iommu_priv_data *data = buffer->priv_virt;
299 unsigned int i;
300
301 if (!data->nrpages)
302 return NULL;
303
304 sglist = vmalloc(sizeof(*sglist) * data->nrpages);
305 if (!sglist)
306 return ERR_PTR(-ENOMEM);
307
308 sg_init_table(sglist, data->nrpages);
309 for (i = 0; i < data->nrpages; ++i)
310 sg_set_page(&sglist[i], data->pages[i], PAGE_SIZE, 0);
311 }
312 return sglist;
313}
314
315static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
316 struct ion_buffer *buffer)
317{
318 if (buffer->sglist)
319 vfree(buffer->sglist);
320}
321
Laura Abbott8c017362011-09-22 20:59:12 -0700322static struct ion_heap_ops iommu_heap_ops = {
323 .allocate = ion_iommu_heap_allocate,
324 .free = ion_iommu_heap_free,
325 .map_user = ion_iommu_heap_map_user,
326 .map_kernel = ion_iommu_heap_map_kernel,
327 .unmap_kernel = ion_iommu_heap_unmap_kernel,
328 .map_iommu = ion_iommu_heap_map_iommu,
329 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800330 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800331 .map_dma = ion_iommu_heap_map_dma,
332 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700333};
334
335struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
336{
337 struct ion_iommu_heap *iommu_heap;
338
339 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
340 if (!iommu_heap)
341 return ERR_PTR(-ENOMEM);
342
343 iommu_heap->heap.ops = &iommu_heap_ops;
344 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
345
346 return &iommu_heap->heap;
347}
348
349void ion_iommu_heap_destroy(struct ion_heap *heap)
350{
351 struct ion_iommu_heap *iommu_heap =
352 container_of(heap, struct ion_iommu_heap, heap);
353
354 kfree(iommu_heap);
355 iommu_heap = NULL;
356}