blob: 2f9e80c43898b4783032e453591717b668da5150 [file] [log] [blame]
Laura Abbott8c017362011-09-22 20:59:12 -07001/*
2 * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/err.h>
14#include <linux/io.h>
15#include <linux/ion.h>
16#include <linux/mm.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/iommu.h>
21#include <linux/pfn.h>
22#include "ion_priv.h"
23
24#include <asm/mach/map.h>
25#include <asm/page.h>
26#include <mach/iommu_domains.h>
27
28struct ion_iommu_heap {
29 struct ion_heap heap;
30};
31
32struct ion_iommu_priv_data {
33 struct page **pages;
34 int nrpages;
35 unsigned long size;
36};
37
38static int ion_iommu_heap_allocate(struct ion_heap *heap,
39 struct ion_buffer *buffer,
40 unsigned long size, unsigned long align,
41 unsigned long flags)
42{
43 int ret, i;
44 struct ion_iommu_priv_data *data = NULL;
45
46 if (msm_use_iommu()) {
47 data = kmalloc(sizeof(*data), GFP_KERNEL);
48 if (!data)
49 return -ENOMEM;
50
51 data->size = PFN_ALIGN(size);
52 data->nrpages = data->size >> PAGE_SHIFT;
53 data->pages = kzalloc(sizeof(struct page *)*data->nrpages,
54 GFP_KERNEL);
55 if (!data->pages) {
56 ret = -ENOMEM;
57 goto err1;
58 }
59
60 for (i = 0; i < data->nrpages; i++) {
61 data->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
62 if (!data->pages[i])
63 goto err2;
64 }
65
66
67 buffer->priv_virt = data;
68 return 0;
69
70 } else {
71 return -ENOMEM;
72 }
73
74
75err2:
76 for (i = 0; i < data->nrpages; i++) {
77 if (data->pages[i])
78 __free_page(data->pages[i]);
79 }
80 kfree(data->pages);
81err1:
82 kfree(data);
83 return ret;
84}
85
86static void ion_iommu_heap_free(struct ion_buffer *buffer)
87{
88 struct ion_iommu_priv_data *data = buffer->priv_virt;
89 int i;
90
91 if (!data)
92 return;
93
94 for (i = 0; i < data->nrpages; i++)
95 __free_page(data->pages[i]);
96
97 kfree(data->pages);
98 kfree(data);
99}
100
101void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
102 struct ion_buffer *buffer,
103 unsigned long flags)
104{
105 struct ion_iommu_priv_data *data = buffer->priv_virt;
106 pgprot_t page_prot = PAGE_KERNEL;
107
108 if (!data)
109 return NULL;
110
111 if (!ION_IS_CACHED(flags))
112 page_prot = pgprot_noncached(page_prot);
113
114 buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
115
116 return buffer->vaddr;
117}
118
119void ion_iommu_heap_unmap_kernel(struct ion_heap *heap,
120 struct ion_buffer *buffer)
121{
122 if (!buffer->vaddr)
123 return;
124
125 vunmap(buffer->vaddr);
126 buffer->vaddr = NULL;
127}
128
129int ion_iommu_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
130 struct vm_area_struct *vma, unsigned long flags)
131{
132 struct ion_iommu_priv_data *data = buffer->priv_virt;
133 int i;
134
135 if (!data)
136 return -EINVAL;
137
138 if (!ION_IS_CACHED(flags))
Olav Haugande074a72012-02-22 15:39:54 -0800139 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
Laura Abbott8c017362011-09-22 20:59:12 -0700140
141 for (i = 0; i < data->nrpages; i++)
142 if (vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
143 data->pages[i]))
144 /*
145 * This will fail the mmap which will
146 * clean up the vma space properly.
147 */
148 return -EINVAL;
149
150 return 0;
151}
152
153int ion_iommu_heap_map_iommu(struct ion_buffer *buffer,
154 struct ion_iommu_map *data,
155 unsigned int domain_num,
156 unsigned int partition_num,
157 unsigned long align,
158 unsigned long iova_length,
159 unsigned long flags)
160{
161 unsigned long temp_iova;
162 struct iommu_domain *domain;
163 struct ion_iommu_priv_data *buffer_data = buffer->priv_virt;
164 int i, j, ret = 0;
165 unsigned long extra;
166
167 BUG_ON(!msm_use_iommu());
168
169 data->mapped_size = iova_length;
170 extra = iova_length - buffer->size;
171
172 data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
173 data->mapped_size, align);
174
175 if (!data->iova_addr) {
176 ret = -ENOMEM;
177 goto out;
178 }
179
180 domain = msm_get_iommu_domain(domain_num);
181
182 if (!domain) {
183 ret = -ENOMEM;
184 goto out1;
185 }
186
187 temp_iova = data->iova_addr;
188 for (i = buffer->size, j = 0; i > 0; j++, i -= SZ_4K,
189 temp_iova += SZ_4K) {
190 ret = iommu_map(domain, temp_iova,
191 page_to_phys(buffer_data->pages[j]),
192 get_order(SZ_4K),
193 ION_IS_CACHED(flags) ? 1 : 0);
194
195 if (ret) {
196 pr_err("%s: could not map %lx to %x in domain %p\n",
197 __func__, temp_iova,
198 page_to_phys(buffer_data->pages[j]),
199 domain);
200 goto out2;
201 }
202 }
203
204
205 if (extra &&
206 msm_iommu_map_extra
207 (domain, temp_iova, extra, flags) < 0)
208 goto out2;
209
210 return 0;
211
212
213out2:
214 for ( ; i < buffer->size; i += SZ_4K, temp_iova -= SZ_4K)
215 iommu_unmap(domain, temp_iova, get_order(SZ_4K));
216
217out1:
218 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
219 buffer->size);
220
221out:
222
223 return ret;
224}
225
226void ion_iommu_heap_unmap_iommu(struct ion_iommu_map *data)
227{
228 int i;
229 unsigned long temp_iova;
230 unsigned int domain_num;
231 unsigned int partition_num;
232 struct iommu_domain *domain;
233
234 BUG_ON(!msm_use_iommu());
235
236 domain_num = iommu_map_domain(data);
237 partition_num = iommu_map_partition(data);
238
239 domain = msm_get_iommu_domain(domain_num);
240
241 if (!domain) {
242 WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
243 return;
244 }
245
246 temp_iova = data->iova_addr;
247 for (i = data->mapped_size; i > 0; i -= SZ_4K, temp_iova += SZ_4K)
248 iommu_unmap(domain, temp_iova, get_order(SZ_4K));
249
250 msm_free_iova_address(data->iova_addr, domain_num, partition_num,
251 data->mapped_size);
252
253 return;
254}
255
256
257static struct ion_heap_ops iommu_heap_ops = {
258 .allocate = ion_iommu_heap_allocate,
259 .free = ion_iommu_heap_free,
260 .map_user = ion_iommu_heap_map_user,
261 .map_kernel = ion_iommu_heap_map_kernel,
262 .unmap_kernel = ion_iommu_heap_unmap_kernel,
263 .map_iommu = ion_iommu_heap_map_iommu,
264 .unmap_iommu = ion_iommu_heap_unmap_iommu,
265};
266
267struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *heap_data)
268{
269 struct ion_iommu_heap *iommu_heap;
270
271 iommu_heap = kzalloc(sizeof(struct ion_iommu_heap), GFP_KERNEL);
272 if (!iommu_heap)
273 return ERR_PTR(-ENOMEM);
274
275 iommu_heap->heap.ops = &iommu_heap_ops;
276 iommu_heap->heap.type = ION_HEAP_TYPE_IOMMU;
277
278 return &iommu_heap->heap;
279}
280
281void ion_iommu_heap_destroy(struct ion_heap *heap)
282{
283 struct ion_iommu_heap *iommu_heap =
284 container_of(heap, struct ion_iommu_heap, heap);
285
286 kfree(iommu_heap);
287 iommu_heap = NULL;
288}