blob: e92b5c5214f0dd8898141aacc205d1d85b803123 [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/vmalloc.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070019#include <linux/rbtree.h>
20#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070021#include <linux/vmalloc.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <asm/sizes.h>
23#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <mach/iommu.h>
25#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070026#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070027#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
Olav Haugan8726caf2012-05-10 15:11:35 -070029/* dummy 64K for overmapping */
30char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070031
Laura Abbottd01221b2012-05-16 17:52:49 -070032struct msm_iova_data {
33 struct rb_node node;
34 struct mem_pool *pools;
35 int npools;
36 struct iommu_domain *domain;
37 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038};
39
Laura Abbottd01221b2012-05-16 17:52:49 -070040static struct rb_root domain_root;
41DEFINE_MUTEX(domain_mutex);
42static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Laura Abbotte956cce2011-10-25 13:33:20 -070044int msm_iommu_map_extra(struct iommu_domain *domain,
45 unsigned long start_iova,
46 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070047 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070048 int cached)
49{
Olav Haugan5e7befd2012-06-19 14:59:37 -070050 int ret = 0;
51 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070052 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
53 unsigned long temp_iova = start_iova;
Olav Haugan5e7befd2012-06-19 14:59:37 -070054 if (page_size == SZ_4K) {
55 struct scatterlist *sglist;
56 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
57 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070058
Olav Haugan5e7befd2012-06-19 14:59:37 -070059 sglist = vmalloc(sizeof(*sglist) * nrpages);
60 if (!sglist) {
61 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070062 goto out;
63 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070064
65 sg_init_table(sglist, nrpages);
66
67 for (i = 0; i < nrpages; i++)
68 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
69
70 ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
71 if (ret) {
72 pr_err("%s: could not map extra %lx in domain %p\n",
73 __func__, start_iova, domain);
74 }
75
76 vfree(sglist);
77 } else {
78 unsigned long order = get_order(page_size);
79 unsigned long aligned_size = ALIGN(size, page_size);
80 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
81
82 for (i = 0; i < nrpages; i++) {
83 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
84 cached);
85 if (ret) {
86 pr_err("%s: could not map %lx in domain %p, error: %d\n",
87 __func__, start_iova, domain, ret);
88 ret = -EAGAIN;
89 goto out;
90 }
91 temp_iova += page_size;
92 }
Laura Abbotte956cce2011-10-25 13:33:20 -070093 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070094 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -070095out:
96 for (; i > 0; --i) {
97 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -070098 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -070099 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700100 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700101}
Laura Abbotte956cce2011-10-25 13:33:20 -0700102
Olav Haugan8726caf2012-05-10 15:11:35 -0700103void msm_iommu_unmap_extra(struct iommu_domain *domain,
104 unsigned long start_iova,
105 unsigned long size,
106 unsigned long page_size)
107{
108 int i;
109 unsigned long order = get_order(page_size);
110 unsigned long aligned_size = ALIGN(size, page_size);
111 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
112 unsigned long temp_iova = start_iova;
113
114 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700115 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700116 temp_iova += page_size;
117 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700118}
119
Laura Abbottd027fdb2012-04-17 16:22:24 -0700120static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
121 unsigned long iova,
122 unsigned long phys,
123 unsigned long size,
124 int cached)
125{
126 int ret;
127 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700128 int prot = IOMMU_WRITE | IOMMU_READ;
129 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700130
131 sglist = vmalloc(sizeof(*sglist));
132 if (!sglist) {
133 ret = -ENOMEM;
134 goto err1;
135 }
136
137 sg_init_table(sglist, 1);
138 sglist->length = size;
139 sglist->offset = 0;
140 sglist->dma_address = phys;
141
Laura Abbotte543cfc2012-06-07 17:51:53 -0700142 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700143 if (ret) {
144 pr_err("%s: could not map extra %lx in domain %p\n",
145 __func__, iova, domain);
146 }
147
148 vfree(sglist);
149err1:
150 return ret;
151
152}
153
154int msm_iommu_map_contig_buffer(unsigned long phys,
155 unsigned int domain_no,
156 unsigned int partition_no,
157 unsigned long size,
158 unsigned long align,
159 unsigned long cached,
160 unsigned long *iova_val)
161{
162 unsigned long iova;
163 int ret;
164
165 if (size & (align - 1))
166 return -EINVAL;
167
Laura Abbottd01221b2012-05-16 17:52:49 -0700168 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
169 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700170
Laura Abbottd01221b2012-05-16 17:52:49 -0700171 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700172 return -ENOMEM;
173
174 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
175 phys, size, cached);
176
177 if (ret)
178 msm_free_iova_address(iova, domain_no, partition_no, size);
179 else
180 *iova_val = iova;
181
182 return ret;
183}
184
185void msm_iommu_unmap_contig_buffer(unsigned long iova,
186 unsigned int domain_no,
187 unsigned int partition_no,
188 unsigned long size)
189{
190 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
191 msm_free_iova_address(iova, domain_no, partition_no, size);
192}
Laura Abbotte956cce2011-10-25 13:33:20 -0700193
Laura Abbottd01221b2012-05-16 17:52:49 -0700194static struct msm_iova_data *find_domain(int domain_num)
195{
196 struct rb_root *root = &domain_root;
197 struct rb_node *p = root->rb_node;
198
199 mutex_lock(&domain_mutex);
200
201 while (p) {
202 struct msm_iova_data *node;
203
204 node = rb_entry(p, struct msm_iova_data, node);
205 if (domain_num < node->domain_num)
206 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700207 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700208 p = p->rb_right;
209 else {
210 mutex_unlock(&domain_mutex);
211 return node;
212 }
213 }
214 mutex_unlock(&domain_mutex);
215 return NULL;
216}
217
218static int add_domain(struct msm_iova_data *node)
219{
220 struct rb_root *root = &domain_root;
221 struct rb_node **p = &root->rb_node;
222 struct rb_node *parent = NULL;
223
224 mutex_lock(&domain_mutex);
225 while (*p) {
226 struct msm_iova_data *tmp;
227 parent = *p;
228
229 tmp = rb_entry(parent, struct msm_iova_data, node);
230
231 if (node->domain_num < tmp->domain_num)
232 p = &(*p)->rb_left;
233 else if (node->domain_num > tmp->domain_num)
234 p = &(*p)->rb_right;
235 else
236 BUG();
237 }
238 rb_link_node(&node->node, parent, p);
239 rb_insert_color(&node->node, root);
240 mutex_unlock(&domain_mutex);
241 return 0;
242}
243
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700244struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245{
Laura Abbottd01221b2012-05-16 17:52:49 -0700246 struct msm_iova_data *data;
247
248 data = find_domain(domain_num);
249
250 if (data)
251 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700252 else
253 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254}
255
Laura Abbottd01221b2012-05-16 17:52:49 -0700256int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700257 unsigned int partition_no,
258 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700259 unsigned long align,
260 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700261{
Laura Abbottd01221b2012-05-16 17:52:49 -0700262 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700263 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700264 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700265
Laura Abbottd01221b2012-05-16 17:52:49 -0700266 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700267
Laura Abbottd01221b2012-05-16 17:52:49 -0700268 if (!data)
269 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700270
Laura Abbottd01221b2012-05-16 17:52:49 -0700271 if (partition_no >= data->npools)
272 return -EINVAL;
273
274 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700275
276 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700277 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700278
Laura Abbottd01221b2012-05-16 17:52:49 -0700279 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
280 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700281 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700282 /* Offset because genpool can't handle 0 addresses */
283 if (pool->paddr == 0)
284 va -= SZ_4K;
285 *iova = va;
286 return 0;
287 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700288
Laura Abbottd01221b2012-05-16 17:52:49 -0700289 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700290}
291
292void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700293 unsigned int iommu_domain,
294 unsigned int partition_no,
295 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700296{
Laura Abbottd01221b2012-05-16 17:52:49 -0700297 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700298 struct mem_pool *pool;
299
Laura Abbottd01221b2012-05-16 17:52:49 -0700300 data = find_domain(iommu_domain);
301
302 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700303 WARN(1, "Invalid domain %d\n", iommu_domain);
304 return;
305 }
306
Laura Abbottd01221b2012-05-16 17:52:49 -0700307 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700308 WARN(1, "Invalid partition %d for domain %d\n",
309 partition_no, iommu_domain);
310 return;
311 }
312
Laura Abbottd01221b2012-05-16 17:52:49 -0700313 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700314
315 if (!pool)
316 return;
317
318 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700319
320 /* Offset because genpool can't handle 0 addresses */
321 if (pool->paddr == 0)
322 iova += SZ_4K;
323
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700324 gen_pool_free(pool->gpool, iova, size);
325}
326
Laura Abbottd01221b2012-05-16 17:52:49 -0700327int msm_register_domain(struct msm_iova_layout *layout)
328{
329 int i;
330 struct msm_iova_data *data;
331 struct mem_pool *pools;
332
333 if (!layout)
334 return -EINVAL;
335
336 data = kmalloc(sizeof(*data), GFP_KERNEL);
337
338 if (!data)
339 return -ENOMEM;
340
341 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
342 GFP_KERNEL);
343
344 if (!pools)
345 goto out;
346
347 for (i = 0; i < layout->npartitions; i++) {
348 if (layout->partitions[i].size == 0)
349 continue;
350
351 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
352
353 if (!pools[i].gpool)
354 continue;
355
356 pools[i].paddr = layout->partitions[i].start;
357 pools[i].size = layout->partitions[i].size;
358
359 /*
360 * genalloc can't handle a pool starting at address 0.
361 * For now, solve this problem by offsetting the value
362 * put in by 4k.
363 * gen pool address = actual address + 4k
364 */
365 if (pools[i].paddr == 0)
366 layout->partitions[i].start += SZ_4K;
367
368 if (gen_pool_add(pools[i].gpool,
369 layout->partitions[i].start,
370 layout->partitions[i].size, -1)) {
371 gen_pool_destroy(pools[i].gpool);
372 pools[i].gpool = NULL;
373 continue;
374 }
375 }
376
377 data->pools = pools;
378 data->npools = layout->npartitions;
379 data->domain_num = atomic_inc_return(&domain_nums);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700380 data->domain = iommu_domain_alloc(&platform_bus_type,
381 layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700382
383 add_domain(data);
384
385 return data->domain_num;
386
387out:
388 kfree(data);
389
390 return -EINVAL;
391}
392
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700393int msm_use_iommu()
394{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700395 return iommu_present(&platform_bus_type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396}
397
Laura Abbott0577d7b2012-04-17 11:14:30 -0700398static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700400 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700401 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402
Laura Abbott0577d7b2012-04-17 11:14:30 -0700403 if (!p)
404 return -ENODEV;
405
Laura Abbottd01221b2012-05-16 17:52:49 -0700406 for (i = 0; i < p->ndomains; i++) {
407 struct msm_iova_layout l;
408 struct msm_iova_partition *part;
409 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700410
Laura Abbottd01221b2012-05-16 17:52:49 -0700411 domains = p->domains;
412 l.npartitions = domains[i].npools;
413 part = kmalloc(
414 sizeof(struct msm_iova_partition) * l.npartitions,
415 GFP_KERNEL);
416
417 if (!part) {
418 pr_info("%s: could not allocate space for domain %d",
419 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700422
423 for (j = 0; j < l.npartitions; j++) {
424 part[j].start = p->domains[i].iova_pools[j].paddr;
425 part[j].size = p->domains[i].iova_pools[j].size;
426 }
427
428 l.partitions = part;
429
430 msm_register_domain(&l);
431
432 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 }
434
Laura Abbott0577d7b2012-04-17 11:14:30 -0700435 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700436 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700437 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700438 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 if (!ctx)
441 continue;
442
Laura Abbottd01221b2012-05-16 17:52:49 -0700443 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444
Laura Abbottd01221b2012-05-16 17:52:49 -0700445 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446 continue;
447
Laura Abbottd01221b2012-05-16 17:52:49 -0700448 if (iommu_attach_device(domain, ctx)) {
449 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700451 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700452 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 continue;
454 }
455 }
456
457 return 0;
458}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700459
460static struct platform_driver iommu_domain_driver = {
461 .driver = {
462 .name = "iommu_domains",
463 .owner = THIS_MODULE
464 },
465};
466
467static int __init msm_subsystem_iommu_init(void)
468{
469 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
470}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471device_initcall(msm_subsystem_iommu_init);