blob: 312ca42562a87b50a393dfdb4f8cf5c8fecf3af0 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Olav Haugandbec7db2012-02-25 10:32:41 -08002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/ion.h>
16#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
22#include "ion_priv.h"
23
24#include <asm/mach/map.h>
25#include <asm/page.h>
26#include <mach/iommu_domains.h>
27
28struct ion_iommu_heap {
29 struct ion_heap heap;
30};
31
32struct ion_iommu_priv_data {
33 struct page **pages;
34 int nrpages;
35 unsigned long size;
Olav Haugan16cdb412012-03-27 13:02:17 -070036 struct scatterlist *iommu_sglist;
Laura Abbott8c017362011-09-22 20:59:12 -070037};
38
39static int ion_iommu_heap_allocate(struct ion_heap *heap,
40 struct ion_buffer *buffer,
41 unsigned long size, unsigned long align,
42 unsigned long flags)
43{
44 int ret, i;
45 struct ion_iommu_priv_data *data = NULL;
46
47 if (msm_use_iommu()) {
48 data = kmalloc(sizeof(*data), GFP_KERNEL);
49 if (!data)
50 return -ENOMEM;
51
52 data->size = PFN_ALIGN(size);
53 data->nrpages = data->size >> PAGE_SHIFT;
54 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
55 GFP_KERNEL);
56 if (!data->pages) {
57 ret = -ENOMEM;
58 goto err1;
59 }
Olav Haugan16cdb412012-03-27 13:02:17 -070060 data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
61 data->nrpages);
62 if (!data->iommu_sglist) {
63 ret = -ENOMEM;
64 goto err1;
65 }
66
67 sg_init_table(data->iommu_sglist, data->nrpages);
Laura Abbott8c017362011-09-22 20:59:12 -070068
69 for (i = 0; i < data->nrpages; i++) {
70 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
71 if (!data->pages[i])
72 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -070073
74 sg_set_page(&data->iommu_sglist[i], data->pages[i],
75 PAGE_SIZE, 0);
Laura Abbott8c017362011-09-22 20:59:12 -070076 }
77
78
79 buffer->priv_virt = data;
80 return 0;
81
82 } else {
83 return -ENOMEM;
84 }
85
86
87err2:
Olav Haugan16cdb412012-03-27 13:02:17 -070088 vfree(data->iommu_sglist);
89 data->iommu_sglist = NULL;
90
Laura Abbott8c017362011-09-22 20:59:12 -070091 for (i = 0; i < data->nrpages; i++) {
92 if (data->pages[i])
93 __free_page(data->pages[i]);
94 }
95 kfree(data->pages);
96err1:
97 kfree(data);
98 return ret;
99}
100
101static void ion_iommu_heap_free(struct ion_buffer *buffer)
102{
103 struct ion_iommu_priv_data *data = buffer->priv_virt;
104 int i;
105
106 if (!data)
107 return;
108
109 for (i = 0; i < data->nrpages; i++)
110 __free_page(data->pages[i]);
111
Olav Haugan16cdb412012-03-27 13:02:17 -0700112 vfree(data->iommu_sglist);
113 data->iommu_sglist = NULL;
114
Laura Abbott8c017362011-09-22 20:59:12 -0700115 kfree(data->pages);
116 kfree(data);
117}
118
119void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
120 struct ion_buffer *buffer,
121 unsigned long flags)
122{
123 struct ion_iommu_priv_data *data = buffer->priv_virt;
124 pgprot_t page_prot = PAGE_KERNEL;
125
126 if (!data)
127 return NULL;
128
129 if (!ION_IS_CACHED(flags))
130 page_prot = pgprot_noncached(page_prot);
131
132 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
133
134 return buffer->vaddr;
135}
136
137void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
138 struct ion_buffer *buffer)
139{
140 if (!buffer->vaddr)
141 return;
142
143 vunmap(buffer->vaddr);
144 buffer->vaddr = NULL;
145}
146
147int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
148 struct vm_area_struct *vma, unsigned long flags)
149{
150 struct ion_iommu_priv_data *data = buffer->priv_virt;
151 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800152 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700153 if (!data)
154 return -EINVAL;
155
156 if (!ION_IS_CACHED(flags))
Olav Haugande074a72012-02-22 15:39:54 -0800157 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700158
Olav Haugandbec7db2012-02-25 10:32:41 -0800159 curr_addr = vma->vm_start;
160 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
161 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700162 /*
163 * This will fail the mmap which will
164 * clean up the vma space properly.
165 */
166 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800167 }
168 curr_addr += PAGE_SIZE;
169 }
Laura Abbott8c017362011-09-22 20:59:12 -0700170 return 0;
171}
172
173int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
174 struct ion_iommu_map *data,
175 unsigned int domain_num,
176 unsigned int partition_num,
177 unsigned long align,
178 unsigned long iova_length,
179 unsigned long flags)
180{
Laura Abbott8c017362011-09-22 20:59:12 -0700181 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700182 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700183 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700184 struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
Olav Hauganf310cf22012-05-08 08:42:49 -0700185 int prot = IOMMU_WRITE | IOMMU_READ;
186 prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700187
188 BUG_ON(!msm_use_iommu());
189
190 data->mapped_size = iova_length;
191 extra = iova_length - buffer->size;
192
193 data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
194 data->mapped_size, align);
195
196 if (!data->iova_addr) {
197 ret = -ENOMEM;
198 goto out;
199 }
200
201 domain = msm_get_iommu_domain(domain_num);
202
203 if (!domain) {
204 ret = -ENOMEM;
205 goto out1;
206 }
207
Olav Haugan16cdb412012-03-27 13:02:17 -0700208 ret = iommu_map_range(domain, data->iova_addr,
209 buffer_data->iommu_sglist, buffer->size, prot);
210 if (ret) {
211 pr_err("%s: could not map %lx in domain %p\n",
212 __func__, data->iova_addr, domain);
213 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700214 }
215
Olav Haugan16cdb412012-03-27 13:02:17 -0700216 if (extra) {
217 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
Olav Haugan8726caf2012-05-10 15:11:35 -0700218 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
219 prot);
Olav Haugan16cdb412012-03-27 13:02:17 -0700220 if (ret)
221 goto out2;
222 }
223 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700224
225out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700226 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700227out1:
228 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
229 buffer->size);
230
231out:
232
233 return ret;
234}
235
236void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
237{
Laura Abbott8c017362011-09-22 20:59:12 -0700238 unsigned int domain_num;
239 unsigned int partition_num;
240 struct iommu_domain *domain;
241
242 BUG_ON(!msm_use_iommu());
243
244 domain_num = iommu_map_domain(data);
245 partition_num = iommu_map_partition(data);
246
247 domain = msm_get_iommu_domain(domain_num);
248
249 if (!domain) {
250 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
251 return;
252 }
253
Olav Haugan16cdb412012-03-27 13:02:17 -0700254 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700255 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
256 data->mapped_size);
257
258 return;
259}
260
Olav Hauganef010712012-03-05 14:19:46 -0800261static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
262 void *vaddr, unsigned int offset, unsigned int length,
263 unsigned int cmd)
264{
265 unsigned long vstart, pstart;
266 void (*op)(unsigned long, unsigned long, unsigned long);
267 unsigned int i;
268 struct ion_iommu_priv_data *data = buffer->priv_virt;
269
270 if (!data)
271 return -ENOMEM;
272
273 switch (cmd) {
274 case ION_IOC_CLEAN_CACHES:
275 op = clean_caches;
276 break;
277 case ION_IOC_INV_CACHES:
278 op = invalidate_caches;
279 break;
280 case ION_IOC_CLEAN_INV_CACHES:
281 op = clean_and_invalidate_caches;
282 break;
283 default:
284 return -EINVAL;
285 }
286
287 vstart = (unsigned long) vaddr;
288 for (i = 0; i < data->nrpages; ++i, vstart += PAGE_SIZE) {
289 pstart = page_to_phys(data->pages[i]);
290 op(vstart, PAGE_SIZE, pstart);
291 }
292
293 return 0;
294}
Laura Abbott8c017362011-09-22 20:59:12 -0700295
Olav Hauganab804b82012-03-05 14:41:16 -0800296static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
297 struct ion_buffer *buffer)
298{
Olav Haugan16cdb412012-03-27 13:02:17 -0700299 struct ion_iommu_priv_data *data = buffer->priv_virt;
300 return data->iommu_sglist;
Olav Hauganab804b82012-03-05 14:41:16 -0800301}
302
303static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
304 struct ion_buffer *buffer)
305{
Olav Hauganab804b82012-03-05 14:41:16 -0800306}
307
Laura Abbott8c017362011-09-22 20:59:12 -0700308static struct ion_heap_ops iommu_heap_ops = {
309 .allocate = ion_iommu_heap_allocate,
310 .free = ion_iommu_heap_free,
311 .map_user = ion_iommu_heap_map_user,
312 .map_kernel = ion_iommu_heap_map_kernel,
313 .unmap_kernel = ion_iommu_heap_unmap_kernel,
314 .map_iommu = ion_iommu_heap_map_iommu,
315 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800316 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800317 .map_dma = ion_iommu_heap_map_dma,
318 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700319};
320
321struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
322{
323 struct ion_iommu_heap *iommu_heap;
324
325 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
326 if (!iommu_heap)
327 return ERR_PTR(-ENOMEM);
328
329 iommu_heap->heap.ops = &iommu_heap_ops;
330 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
331
332 return &iommu_heap->heap;
333}
334
335void ion_iommu_heap_destroy(struct ion_heap *heap)
336{
337 struct ion_iommu_heap *iommu_heap =
338 container_of(heap, struct ion_iommu_heap, heap);
339
340 kfree(iommu_heap);
341 iommu_heap = NULL;
342}