blob: baf0a66c18f474b2d92a724b3018b9205a43ec28 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
Olav Haugandbec7db2012-02-25 10:32:41 -08002 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Laura Abbott8c017362011-09-22 20:59:12 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/ion.h>
16#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
22#include "ion_priv.h"
23
24#include <asm/mach/map.h>
25#include <asm/page.h>
26#include <mach/iommu_domains.h>
27
28struct ion_iommu_heap {
29 struct ion_heap heap;
30};
31
32struct ion_iommu_priv_data {
33 struct page **pages;
34 int nrpages;
35 unsigned long size;
Olav Haugan16cdb412012-03-27 13:02:17 -070036 struct scatterlist *iommu_sglist;
Laura Abbott8c017362011-09-22 20:59:12 -070037};
38
39static int ion_iommu_heap_allocate(struct ion_heap *heap,
40 struct ion_buffer *buffer,
41 unsigned long size, unsigned long align,
42 unsigned long flags)
43{
44 int ret, i;
45 struct ion_iommu_priv_data *data = NULL;
46
47 if (msm_use_iommu()) {
48 data = kmalloc(sizeof(*data), GFP_KERNEL);
49 if (!data)
50 return -ENOMEM;
51
52 data->size = PFN_ALIGN(size);
53 data->nrpages = data->size >> PAGE_SHIFT;
54 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
55 GFP_KERNEL);
56 if (!data->pages) {
57 ret = -ENOMEM;
58 goto err1;
59 }
Olav Haugan16cdb412012-03-27 13:02:17 -070060 data->iommu_sglist = vmalloc(sizeof(*data->iommu_sglist) *
61 data->nrpages);
62 if (!data->iommu_sglist) {
63 ret = -ENOMEM;
64 goto err1;
65 }
66
67 sg_init_table(data->iommu_sglist, data->nrpages);
Laura Abbott8c017362011-09-22 20:59:12 -070068
69 for (i = 0; i < data->nrpages; i++) {
70 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
71 if (!data->pages[i])
72 goto err2;
Olav Haugan16cdb412012-03-27 13:02:17 -070073
74 sg_set_page(&data->iommu_sglist[i], data->pages[i],
75 PAGE_SIZE, 0);
Laura Abbott8c017362011-09-22 20:59:12 -070076 }
77
78
79 buffer->priv_virt = data;
80 return 0;
81
82 } else {
83 return -ENOMEM;
84 }
85
86
87err2:
Olav Haugan16cdb412012-03-27 13:02:17 -070088 vfree(data->iommu_sglist);
89 data->iommu_sglist = NULL;
90
Laura Abbott8c017362011-09-22 20:59:12 -070091 for (i = 0; i < data->nrpages; i++) {
92 if (data->pages[i])
93 __free_page(data->pages[i]);
94 }
95 kfree(data->pages);
96err1:
97 kfree(data);
98 return ret;
99}
100
101static void ion_iommu_heap_free(struct ion_buffer *buffer)
102{
103 struct ion_iommu_priv_data *data = buffer->priv_virt;
104 int i;
105
106 if (!data)
107 return;
108
109 for (i = 0; i < data->nrpages; i++)
110 __free_page(data->pages[i]);
111
Olav Haugan16cdb412012-03-27 13:02:17 -0700112 vfree(data->iommu_sglist);
113 data->iommu_sglist = NULL;
114
Laura Abbott8c017362011-09-22 20:59:12 -0700115 kfree(data->pages);
116 kfree(data);
117}
118
119void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
120 struct ion_buffer *buffer,
121 unsigned long flags)
122{
123 struct ion_iommu_priv_data *data = buffer->priv_virt;
124 pgprot_t page_prot = PAGE_KERNEL;
125
126 if (!data)
127 return NULL;
128
129 if (!ION_IS_CACHED(flags))
130 page_prot = pgprot_noncached(page_prot);
131
132 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
133
134 return buffer->vaddr;
135}
136
137void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
138 struct ion_buffer *buffer)
139{
140 if (!buffer->vaddr)
141 return;
142
143 vunmap(buffer->vaddr);
144 buffer->vaddr = NULL;
145}
146
147int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
148 struct vm_area_struct *vma, unsigned long flags)
149{
150 struct ion_iommu_priv_data *data = buffer->priv_virt;
151 int i;
Olav Haugandbec7db2012-02-25 10:32:41 -0800152 unsigned long curr_addr;
Laura Abbott8c017362011-09-22 20:59:12 -0700153 if (!data)
154 return -EINVAL;
155
156 if (!ION_IS_CACHED(flags))
Olav Haugande074a72012-02-22 15:39:54 -0800157 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700158
Olav Haugandbec7db2012-02-25 10:32:41 -0800159 curr_addr = vma->vm_start;
160 for (i = 0; i < data->nrpages && curr_addr < vma->vm_end; i++) {
161 if (vm_insert_page(vma, curr_addr, data->pages[i])) {
Laura Abbott8c017362011-09-22 20:59:12 -0700162 /*
163 * This will fail the mmap which will
164 * clean up the vma space properly.
165 */
166 return -EINVAL;
Olav Haugandbec7db2012-02-25 10:32:41 -0800167 }
168 curr_addr += PAGE_SIZE;
169 }
Laura Abbott8c017362011-09-22 20:59:12 -0700170 return 0;
171}
172
173int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
174 struct ion_iommu_map *data,
175 unsigned int domain_num,
176 unsigned int partition_num,
177 unsigned long align,
178 unsigned long iova_length,
179 unsigned long flags)
180{
Laura Abbott8c017362011-09-22 20:59:12 -0700181 struct iommu_domain *domain;
Olav Haugan16cdb412012-03-27 13:02:17 -0700182 int ret = 0;
Laura Abbott8c017362011-09-22 20:59:12 -0700183 unsigned long extra;
Olav Haugan16cdb412012-03-27 13:02:17 -0700184 int prot = ION_IS_CACHED(flags) ? 1 : 0;
185 struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
Laura Abbott8c017362011-09-22 20:59:12 -0700186
187 BUG_ON(!msm_use_iommu());
188
189 data->mapped_size = iova_length;
190 extra = iova_length - buffer->size;
191
192 data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
193 data->mapped_size, align);
194
195 if (!data->iova_addr) {
196 ret = -ENOMEM;
197 goto out;
198 }
199
200 domain = msm_get_iommu_domain(domain_num);
201
202 if (!domain) {
203 ret = -ENOMEM;
204 goto out1;
205 }
206
Olav Haugan16cdb412012-03-27 13:02:17 -0700207 ret = iommu_map_range(domain, data->iova_addr,
208 buffer_data->iommu_sglist, buffer->size, prot);
209 if (ret) {
210 pr_err("%s: could not map %lx in domain %p\n",
211 __func__, data->iova_addr, domain);
212 goto out1;
Laura Abbott8c017362011-09-22 20:59:12 -0700213 }
214
Olav Haugan16cdb412012-03-27 13:02:17 -0700215 if (extra) {
216 unsigned long extra_iova_addr = data->iova_addr + buffer->size;
217 ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, prot);
218 if (ret)
219 goto out2;
220 }
221 return ret;
Laura Abbott8c017362011-09-22 20:59:12 -0700222
223out2:
Olav Haugan16cdb412012-03-27 13:02:17 -0700224 iommu_unmap_range(domain, data->iova_addr, buffer->size);
Laura Abbott8c017362011-09-22 20:59:12 -0700225out1:
226 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
227 buffer->size);
228
229out:
230
231 return ret;
232}
233
234void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
235{
Laura Abbott8c017362011-09-22 20:59:12 -0700236 unsigned int domain_num;
237 unsigned int partition_num;
238 struct iommu_domain *domain;
239
240 BUG_ON(!msm_use_iommu());
241
242 domain_num = iommu_map_domain(data);
243 partition_num = iommu_map_partition(data);
244
245 domain = msm_get_iommu_domain(domain_num);
246
247 if (!domain) {
248 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
249 return;
250 }
251
Olav Haugan16cdb412012-03-27 13:02:17 -0700252 iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
Laura Abbott8c017362011-09-22 20:59:12 -0700253 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
254 data->mapped_size);
255
256 return;
257}
258
Olav Hauganef010712012-03-05 14:19:46 -0800259static int ion_iommu_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
260 void *vaddr, unsigned int offset, unsigned int length,
261 unsigned int cmd)
262{
263 unsigned long vstart, pstart;
264 void (*op)(unsigned long, unsigned long, unsigned long);
265 unsigned int i;
266 struct ion_iommu_priv_data *data = buffer->priv_virt;
267
268 if (!data)
269 return -ENOMEM;
270
271 switch (cmd) {
272 case ION_IOC_CLEAN_CACHES:
273 op = clean_caches;
274 break;
275 case ION_IOC_INV_CACHES:
276 op = invalidate_caches;
277 break;
278 case ION_IOC_CLEAN_INV_CACHES:
279 op = clean_and_invalidate_caches;
280 break;
281 default:
282 return -EINVAL;
283 }
284
285 vstart = (unsigned long) vaddr;
286 for (i = 0; i < data->nrpages; ++i, vstart += PAGE_SIZE) {
287 pstart = page_to_phys(data->pages[i]);
288 op(vstart, PAGE_SIZE, pstart);
289 }
290
291 return 0;
292}
Laura Abbott8c017362011-09-22 20:59:12 -0700293
Olav Hauganab804b82012-03-05 14:41:16 -0800294static struct scatterlist *ion_iommu_heap_map_dma(struct ion_heap *heap,
295 struct ion_buffer *buffer)
296{
Olav Haugan16cdb412012-03-27 13:02:17 -0700297 struct ion_iommu_priv_data *data = buffer->priv_virt;
298 return data->iommu_sglist;
Olav Hauganab804b82012-03-05 14:41:16 -0800299}
300
301static void ion_iommu_heap_unmap_dma(struct ion_heap *heap,
302 struct ion_buffer *buffer)
303{
Olav Hauganab804b82012-03-05 14:41:16 -0800304}
305
Laura Abbott8c017362011-09-22 20:59:12 -0700306static struct ion_heap_ops iommu_heap_ops = {
307 .allocate = ion_iommu_heap_allocate,
308 .free = ion_iommu_heap_free,
309 .map_user = ion_iommu_heap_map_user,
310 .map_kernel = ion_iommu_heap_map_kernel,
311 .unmap_kernel = ion_iommu_heap_unmap_kernel,
312 .map_iommu = ion_iommu_heap_map_iommu,
313 .unmap_iommu = ion_iommu_heap_unmap_iommu,
Olav Hauganef010712012-03-05 14:19:46 -0800314 .cache_op = ion_iommu_cache_ops,
Olav Hauganab804b82012-03-05 14:41:16 -0800315 .map_dma = ion_iommu_heap_map_dma,
316 .unmap_dma = ion_iommu_heap_unmap_dma,
Laura Abbott8c017362011-09-22 20:59:12 -0700317};
318
319struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
320{
321 struct ion_iommu_heap *iommu_heap;
322
323 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
324 if (!iommu_heap)
325 return ERR_PTR(-ENOMEM);
326
327 iommu_heap->heap.ops = &iommu_heap_ops;
328 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
329
330 return &iommu_heap->heap;
331}
332
333void ion_iommu_heap_destroy(struct ion_heap *heap)
334{
335 struct ion_iommu_heap *iommu_heap =
336 container_of(heap, struct ion_iommu_heap, heap);
337
338 kfree(iommu_heap);
339 iommu_heap = NULL;
340}