blob: 271e252b2c43adf8f65baa92bcc535121928988f [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Laura Abbottd01221b2012-05-16 17:52:49 -070013#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070015#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070016#include <linux/platform_device.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070017#include <linux/rbtree.h>
18#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070019#include <linux/vmalloc.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070020#include <asm/sizes.h>
21#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <mach/iommu.h>
23#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070024#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070025#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Olav Haugan8726caf2012-05-10 15:11:35 -070027/* dummy 64K for overmapping */
28char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070029
Laura Abbottd01221b2012-05-16 17:52:49 -070030struct msm_iova_data {
31 struct rb_node node;
32 struct mem_pool *pools;
33 int npools;
34 struct iommu_domain *domain;
35 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036};
37
Laura Abbottd01221b2012-05-16 17:52:49 -070038static struct rb_root domain_root;
39DEFINE_MUTEX(domain_mutex);
40static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
Laura Abbotte956cce2011-10-25 13:33:20 -070042int msm_iommu_map_extra(struct iommu_domain *domain,
43 unsigned long start_iova,
44 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070045 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070046 int cached)
47{
Olav Haugan8726caf2012-05-10 15:11:35 -070048 int i, ret_value = 0;
49 unsigned long order = get_order(page_size);
50 unsigned long aligned_size = ALIGN(size, page_size);
51 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
52 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
53 unsigned long temp_iova = start_iova;
Laura Abbotte956cce2011-10-25 13:33:20 -070054
Olav Haugan8726caf2012-05-10 15:11:35 -070055 for (i = 0; i < nrpages; i++) {
56 int ret = iommu_map(domain, temp_iova, phy_addr, order, cached);
57 if (ret) {
58 pr_err("%s: could not map %lx in domain %p, error: %d\n",
59 __func__, start_iova, domain, ret);
60 ret_value = -EAGAIN;
61 goto out;
62 }
63 temp_iova += page_size;
Laura Abbotte956cce2011-10-25 13:33:20 -070064 }
Olav Haugan8726caf2012-05-10 15:11:35 -070065 return ret_value;
66out:
67 for (; i > 0; --i) {
68 temp_iova -= page_size;
69 iommu_unmap(domain, start_iova, order);
Olav Haugan16cdb412012-03-27 13:02:17 -070070 }
Olav Haugan8726caf2012-05-10 15:11:35 -070071 return ret_value;
72}
Laura Abbotte956cce2011-10-25 13:33:20 -070073
Olav Haugan8726caf2012-05-10 15:11:35 -070074void msm_iommu_unmap_extra(struct iommu_domain *domain,
75 unsigned long start_iova,
76 unsigned long size,
77 unsigned long page_size)
78{
79 int i;
80 unsigned long order = get_order(page_size);
81 unsigned long aligned_size = ALIGN(size, page_size);
82 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
83 unsigned long temp_iova = start_iova;
84
85 for (i = 0; i < nrpages; ++i) {
86 iommu_unmap(domain, temp_iova, order);
87 temp_iova += page_size;
88 }
Laura Abbotte956cce2011-10-25 13:33:20 -070089}
90
Laura Abbottd027fdb2012-04-17 16:22:24 -070091static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
92 unsigned long iova,
93 unsigned long phys,
94 unsigned long size,
95 int cached)
96{
97 int ret;
98 struct scatterlist *sglist;
99
100 sglist = vmalloc(sizeof(*sglist));
101 if (!sglist) {
102 ret = -ENOMEM;
103 goto err1;
104 }
105
106 sg_init_table(sglist, 1);
107 sglist->length = size;
108 sglist->offset = 0;
109 sglist->dma_address = phys;
110
111 ret = iommu_map_range(domain, iova, sglist, size, cached);
112 if (ret) {
113 pr_err("%s: could not map extra %lx in domain %p\n",
114 __func__, iova, domain);
115 }
116
117 vfree(sglist);
118err1:
119 return ret;
120
121}
122
123int msm_iommu_map_contig_buffer(unsigned long phys,
124 unsigned int domain_no,
125 unsigned int partition_no,
126 unsigned long size,
127 unsigned long align,
128 unsigned long cached,
129 unsigned long *iova_val)
130{
131 unsigned long iova;
132 int ret;
133
134 if (size & (align - 1))
135 return -EINVAL;
136
Laura Abbottd01221b2012-05-16 17:52:49 -0700137 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
138 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700139
Laura Abbottd01221b2012-05-16 17:52:49 -0700140 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700141 return -ENOMEM;
142
143 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
144 phys, size, cached);
145
146 if (ret)
147 msm_free_iova_address(iova, domain_no, partition_no, size);
148 else
149 *iova_val = iova;
150
151 return ret;
152}
153
154void msm_iommu_unmap_contig_buffer(unsigned long iova,
155 unsigned int domain_no,
156 unsigned int partition_no,
157 unsigned long size)
158{
159 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
160 msm_free_iova_address(iova, domain_no, partition_no, size);
161}
Laura Abbotte956cce2011-10-25 13:33:20 -0700162
Laura Abbottd01221b2012-05-16 17:52:49 -0700163static struct msm_iova_data *find_domain(int domain_num)
164{
165 struct rb_root *root = &domain_root;
166 struct rb_node *p = root->rb_node;
167
168 mutex_lock(&domain_mutex);
169
170 while (p) {
171 struct msm_iova_data *node;
172
173 node = rb_entry(p, struct msm_iova_data, node);
174 if (domain_num < node->domain_num)
175 p = p->rb_left;
176 else if (domain_num > domain_num)
177 p = p->rb_right;
178 else {
179 mutex_unlock(&domain_mutex);
180 return node;
181 }
182 }
183 mutex_unlock(&domain_mutex);
184 return NULL;
185}
186
187static int add_domain(struct msm_iova_data *node)
188{
189 struct rb_root *root = &domain_root;
190 struct rb_node **p = &root->rb_node;
191 struct rb_node *parent = NULL;
192
193 mutex_lock(&domain_mutex);
194 while (*p) {
195 struct msm_iova_data *tmp;
196 parent = *p;
197
198 tmp = rb_entry(parent, struct msm_iova_data, node);
199
200 if (node->domain_num < tmp->domain_num)
201 p = &(*p)->rb_left;
202 else if (node->domain_num > tmp->domain_num)
203 p = &(*p)->rb_right;
204 else
205 BUG();
206 }
207 rb_link_node(&node->node, parent, p);
208 rb_insert_color(&node->node, root);
209 mutex_unlock(&domain_mutex);
210 return 0;
211}
212
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700213struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214{
Laura Abbottd01221b2012-05-16 17:52:49 -0700215 struct msm_iova_data *data;
216
217 data = find_domain(domain_num);
218
219 if (data)
220 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700221 else
222 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223}
224
Laura Abbottd01221b2012-05-16 17:52:49 -0700225int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700226 unsigned int partition_no,
227 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700228 unsigned long align,
229 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700230{
Laura Abbottd01221b2012-05-16 17:52:49 -0700231 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700232 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700233 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700234
Laura Abbottd01221b2012-05-16 17:52:49 -0700235 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700236
Laura Abbottd01221b2012-05-16 17:52:49 -0700237 if (!data)
238 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700239
Laura Abbottd01221b2012-05-16 17:52:49 -0700240 if (partition_no >= data->npools)
241 return -EINVAL;
242
243 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700244
245 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700246 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700247
Laura Abbottd01221b2012-05-16 17:52:49 -0700248 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
249 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700250 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700251 /* Offset because genpool can't handle 0 addresses */
252 if (pool->paddr == 0)
253 va -= SZ_4K;
254 *iova = va;
255 return 0;
256 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700257
Laura Abbottd01221b2012-05-16 17:52:49 -0700258 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700259}
260
261void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700262 unsigned int iommu_domain,
263 unsigned int partition_no,
264 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700265{
Laura Abbottd01221b2012-05-16 17:52:49 -0700266 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700267 struct mem_pool *pool;
268
Laura Abbottd01221b2012-05-16 17:52:49 -0700269 data = find_domain(iommu_domain);
270
271 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700272 WARN(1, "Invalid domain %d\n", iommu_domain);
273 return;
274 }
275
Laura Abbottd01221b2012-05-16 17:52:49 -0700276 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700277 WARN(1, "Invalid partition %d for domain %d\n",
278 partition_no, iommu_domain);
279 return;
280 }
281
Laura Abbottd01221b2012-05-16 17:52:49 -0700282 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700283
284 if (!pool)
285 return;
286
287 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700288
289 /* Offset because genpool can't handle 0 addresses */
290 if (pool->paddr == 0)
291 iova += SZ_4K;
292
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700293 gen_pool_free(pool->gpool, iova, size);
294}
295
Laura Abbottd01221b2012-05-16 17:52:49 -0700296int msm_register_domain(struct msm_iova_layout *layout)
297{
298 int i;
299 struct msm_iova_data *data;
300 struct mem_pool *pools;
301
302 if (!layout)
303 return -EINVAL;
304
305 data = kmalloc(sizeof(*data), GFP_KERNEL);
306
307 if (!data)
308 return -ENOMEM;
309
310 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
311 GFP_KERNEL);
312
313 if (!pools)
314 goto out;
315
316 for (i = 0; i < layout->npartitions; i++) {
317 if (layout->partitions[i].size == 0)
318 continue;
319
320 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
321
322 if (!pools[i].gpool)
323 continue;
324
325 pools[i].paddr = layout->partitions[i].start;
326 pools[i].size = layout->partitions[i].size;
327
328 /*
329 * genalloc can't handle a pool starting at address 0.
330 * For now, solve this problem by offsetting the value
331 * put in by 4k.
332 * gen pool address = actual address + 4k
333 */
334 if (pools[i].paddr == 0)
335 layout->partitions[i].start += SZ_4K;
336
337 if (gen_pool_add(pools[i].gpool,
338 layout->partitions[i].start,
339 layout->partitions[i].size, -1)) {
340 gen_pool_destroy(pools[i].gpool);
341 pools[i].gpool = NULL;
342 continue;
343 }
344 }
345
346 data->pools = pools;
347 data->npools = layout->npartitions;
348 data->domain_num = atomic_inc_return(&domain_nums);
349 data->domain = iommu_domain_alloc(layout->domain_flags);
350
351 add_domain(data);
352
353 return data->domain_num;
354
355out:
356 kfree(data);
357
358 return -EINVAL;
359}
360
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700361int msm_use_iommu()
362{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700363 /*
364 * If there are no domains, don't bother trying to use the iommu
365 */
Laura Abbottd01221b2012-05-16 17:52:49 -0700366 return iommu_found();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367}
368
Laura Abbott0577d7b2012-04-17 11:14:30 -0700369static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700371 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700372 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
Laura Abbott0577d7b2012-04-17 11:14:30 -0700374 if (!p)
375 return -ENODEV;
376
Laura Abbottd01221b2012-05-16 17:52:49 -0700377 for (i = 0; i < p->ndomains; i++) {
378 struct msm_iova_layout l;
379 struct msm_iova_partition *part;
380 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700381
Laura Abbottd01221b2012-05-16 17:52:49 -0700382 domains = p->domains;
383 l.npartitions = domains[i].npools;
384 part = kmalloc(
385 sizeof(struct msm_iova_partition) * l.npartitions,
386 GFP_KERNEL);
387
388 if (!part) {
389 pr_info("%s: could not allocate space for domain %d",
390 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700391 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700392 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700393
394 for (j = 0; j < l.npartitions; j++) {
395 part[j].start = p->domains[i].iova_pools[j].paddr;
396 part[j].size = p->domains[i].iova_pools[j].size;
397 }
398
399 l.partitions = part;
400
401 msm_register_domain(&l);
402
403 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404 }
405
Laura Abbott0577d7b2012-04-17 11:14:30 -0700406 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700408 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700409 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410
411 if (!ctx)
412 continue;
413
Laura Abbottd01221b2012-05-16 17:52:49 -0700414 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
Laura Abbottd01221b2012-05-16 17:52:49 -0700416 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 continue;
418
Laura Abbottd01221b2012-05-16 17:52:49 -0700419 if (iommu_attach_device(domain, ctx)) {
420 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700422 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700423 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 continue;
425 }
426 }
427
428 return 0;
429}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700430
431static struct platform_driver iommu_domain_driver = {
432 .driver = {
433 .name = "iommu_domains",
434 .owner = THIS_MODULE
435 },
436};
437
438static int __init msm_subsystem_iommu_init(void)
439{
440 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
441}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442device_initcall(msm_subsystem_iommu_init);