blob: 3acb6d865c8518e0a6773121a21a82c0bac4f389 [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/vmalloc.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070019#include <linux/rbtree.h>
20#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070021#include <linux/vmalloc.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <asm/sizes.h>
23#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <mach/iommu.h>
25#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070026#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070027#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
Olav Haugan8726caf2012-05-10 15:11:35 -070029/* dummy 64K for overmapping */
30char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070031
Laura Abbottd01221b2012-05-16 17:52:49 -070032struct msm_iova_data {
33 struct rb_node node;
34 struct mem_pool *pools;
35 int npools;
36 struct iommu_domain *domain;
37 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038};
39
Laura Abbottd01221b2012-05-16 17:52:49 -070040static struct rb_root domain_root;
41DEFINE_MUTEX(domain_mutex);
42static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Laura Abbott2030c1b2012-07-18 06:38:00 -070044int msm_use_iommu()
45{
46 return iommu_present(&platform_bus_type);
47}
48
Laura Abbotte956cce2011-10-25 13:33:20 -070049int msm_iommu_map_extra(struct iommu_domain *domain,
50 unsigned long start_iova,
51 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070052 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070053 int cached)
54{
Olav Haugan5e7befd2012-06-19 14:59:37 -070055 int ret = 0;
56 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070057 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
58 unsigned long temp_iova = start_iova;
Olav Haugan5e7befd2012-06-19 14:59:37 -070059 if (page_size == SZ_4K) {
60 struct scatterlist *sglist;
61 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
62 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070063
Olav Haugan5e7befd2012-06-19 14:59:37 -070064 sglist = vmalloc(sizeof(*sglist) * nrpages);
65 if (!sglist) {
66 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070067 goto out;
68 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070069
70 sg_init_table(sglist, nrpages);
71
72 for (i = 0; i < nrpages; i++)
73 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
74
75 ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
76 if (ret) {
77 pr_err("%s: could not map extra %lx in domain %p\n",
78 __func__, start_iova, domain);
79 }
80
81 vfree(sglist);
82 } else {
83 unsigned long order = get_order(page_size);
84 unsigned long aligned_size = ALIGN(size, page_size);
85 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
86
87 for (i = 0; i < nrpages; i++) {
88 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
89 cached);
90 if (ret) {
91 pr_err("%s: could not map %lx in domain %p, error: %d\n",
92 __func__, start_iova, domain, ret);
93 ret = -EAGAIN;
94 goto out;
95 }
96 temp_iova += page_size;
97 }
Laura Abbotte956cce2011-10-25 13:33:20 -070098 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070099 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700100out:
101 for (; i > 0; --i) {
102 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700103 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -0700104 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700105 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700106}
Laura Abbotte956cce2011-10-25 13:33:20 -0700107
Olav Haugan8726caf2012-05-10 15:11:35 -0700108void msm_iommu_unmap_extra(struct iommu_domain *domain,
109 unsigned long start_iova,
110 unsigned long size,
111 unsigned long page_size)
112{
113 int i;
114 unsigned long order = get_order(page_size);
115 unsigned long aligned_size = ALIGN(size, page_size);
116 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
117 unsigned long temp_iova = start_iova;
118
119 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700120 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700121 temp_iova += page_size;
122 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700123}
124
Laura Abbottd027fdb2012-04-17 16:22:24 -0700125static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
126 unsigned long iova,
127 unsigned long phys,
128 unsigned long size,
129 int cached)
130{
131 int ret;
132 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700133 int prot = IOMMU_WRITE | IOMMU_READ;
134 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700135
136 sglist = vmalloc(sizeof(*sglist));
137 if (!sglist) {
138 ret = -ENOMEM;
139 goto err1;
140 }
141
142 sg_init_table(sglist, 1);
143 sglist->length = size;
144 sglist->offset = 0;
145 sglist->dma_address = phys;
146
Laura Abbotte543cfc2012-06-07 17:51:53 -0700147 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700148 if (ret) {
149 pr_err("%s: could not map extra %lx in domain %p\n",
150 __func__, iova, domain);
151 }
152
153 vfree(sglist);
154err1:
155 return ret;
156
157}
158
159int msm_iommu_map_contig_buffer(unsigned long phys,
160 unsigned int domain_no,
161 unsigned int partition_no,
162 unsigned long size,
163 unsigned long align,
164 unsigned long cached,
165 unsigned long *iova_val)
166{
167 unsigned long iova;
168 int ret;
169
170 if (size & (align - 1))
171 return -EINVAL;
172
Laura Abbott2030c1b2012-07-18 06:38:00 -0700173 if (!msm_use_iommu()) {
174 *iova_val = phys;
175 return 0;
176 }
177
Laura Abbottd01221b2012-05-16 17:52:49 -0700178 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
179 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700180
Laura Abbottd01221b2012-05-16 17:52:49 -0700181 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700182 return -ENOMEM;
183
184 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
185 phys, size, cached);
186
187 if (ret)
188 msm_free_iova_address(iova, domain_no, partition_no, size);
189 else
190 *iova_val = iova;
191
192 return ret;
193}
Laura Abbott33b30be2012-07-09 09:48:54 -0700194EXPORT_SYMBOL(msm_iommu_map_contig_buffer);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700195
196void msm_iommu_unmap_contig_buffer(unsigned long iova,
197 unsigned int domain_no,
198 unsigned int partition_no,
199 unsigned long size)
200{
Laura Abbott2030c1b2012-07-18 06:38:00 -0700201 if (!msm_use_iommu())
202 return;
203
Laura Abbottd027fdb2012-04-17 16:22:24 -0700204 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
205 msm_free_iova_address(iova, domain_no, partition_no, size);
206}
Laura Abbott33b30be2012-07-09 09:48:54 -0700207EXPORT_SYMBOL(msm_iommu_unmap_contig_buffer);
Laura Abbotte956cce2011-10-25 13:33:20 -0700208
Laura Abbottd01221b2012-05-16 17:52:49 -0700209static struct msm_iova_data *find_domain(int domain_num)
210{
211 struct rb_root *root = &domain_root;
212 struct rb_node *p = root->rb_node;
213
214 mutex_lock(&domain_mutex);
215
216 while (p) {
217 struct msm_iova_data *node;
218
219 node = rb_entry(p, struct msm_iova_data, node);
220 if (domain_num < node->domain_num)
221 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700222 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700223 p = p->rb_right;
224 else {
225 mutex_unlock(&domain_mutex);
226 return node;
227 }
228 }
229 mutex_unlock(&domain_mutex);
230 return NULL;
231}
232
233static int add_domain(struct msm_iova_data *node)
234{
235 struct rb_root *root = &domain_root;
236 struct rb_node **p = &root->rb_node;
237 struct rb_node *parent = NULL;
238
239 mutex_lock(&domain_mutex);
240 while (*p) {
241 struct msm_iova_data *tmp;
242 parent = *p;
243
244 tmp = rb_entry(parent, struct msm_iova_data, node);
245
246 if (node->domain_num < tmp->domain_num)
247 p = &(*p)->rb_left;
248 else if (node->domain_num > tmp->domain_num)
249 p = &(*p)->rb_right;
250 else
251 BUG();
252 }
253 rb_link_node(&node->node, parent, p);
254 rb_insert_color(&node->node, root);
255 mutex_unlock(&domain_mutex);
256 return 0;
257}
258
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700259struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260{
Laura Abbottd01221b2012-05-16 17:52:49 -0700261 struct msm_iova_data *data;
262
263 data = find_domain(domain_num);
264
265 if (data)
266 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700267 else
268 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269}
270
Laura Abbottd01221b2012-05-16 17:52:49 -0700271int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700272 unsigned int partition_no,
273 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700274 unsigned long align,
275 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700276{
Laura Abbottd01221b2012-05-16 17:52:49 -0700277 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700278 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700279 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700280
Laura Abbottd01221b2012-05-16 17:52:49 -0700281 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700282
Laura Abbottd01221b2012-05-16 17:52:49 -0700283 if (!data)
284 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700285
Laura Abbottd01221b2012-05-16 17:52:49 -0700286 if (partition_no >= data->npools)
287 return -EINVAL;
288
289 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700290
291 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700292 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700293
Laura Abbottd01221b2012-05-16 17:52:49 -0700294 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
295 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700296 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700297 /* Offset because genpool can't handle 0 addresses */
298 if (pool->paddr == 0)
299 va -= SZ_4K;
300 *iova = va;
301 return 0;
302 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700303
Laura Abbottd01221b2012-05-16 17:52:49 -0700304 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700305}
306
307void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700308 unsigned int iommu_domain,
309 unsigned int partition_no,
310 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700311{
Laura Abbottd01221b2012-05-16 17:52:49 -0700312 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700313 struct mem_pool *pool;
314
Laura Abbottd01221b2012-05-16 17:52:49 -0700315 data = find_domain(iommu_domain);
316
317 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700318 WARN(1, "Invalid domain %d\n", iommu_domain);
319 return;
320 }
321
Laura Abbottd01221b2012-05-16 17:52:49 -0700322 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700323 WARN(1, "Invalid partition %d for domain %d\n",
324 partition_no, iommu_domain);
325 return;
326 }
327
Laura Abbottd01221b2012-05-16 17:52:49 -0700328 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700329
330 if (!pool)
331 return;
332
333 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700334
335 /* Offset because genpool can't handle 0 addresses */
336 if (pool->paddr == 0)
337 iova += SZ_4K;
338
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700339 gen_pool_free(pool->gpool, iova, size);
340}
341
Laura Abbottd01221b2012-05-16 17:52:49 -0700342int msm_register_domain(struct msm_iova_layout *layout)
343{
344 int i;
345 struct msm_iova_data *data;
346 struct mem_pool *pools;
347
348 if (!layout)
349 return -EINVAL;
350
351 data = kmalloc(sizeof(*data), GFP_KERNEL);
352
353 if (!data)
354 return -ENOMEM;
355
356 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
357 GFP_KERNEL);
358
359 if (!pools)
360 goto out;
361
362 for (i = 0; i < layout->npartitions; i++) {
363 if (layout->partitions[i].size == 0)
364 continue;
365
366 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
367
368 if (!pools[i].gpool)
369 continue;
370
371 pools[i].paddr = layout->partitions[i].start;
372 pools[i].size = layout->partitions[i].size;
373
374 /*
375 * genalloc can't handle a pool starting at address 0.
376 * For now, solve this problem by offsetting the value
377 * put in by 4k.
378 * gen pool address = actual address + 4k
379 */
380 if (pools[i].paddr == 0)
381 layout->partitions[i].start += SZ_4K;
382
383 if (gen_pool_add(pools[i].gpool,
384 layout->partitions[i].start,
385 layout->partitions[i].size, -1)) {
386 gen_pool_destroy(pools[i].gpool);
387 pools[i].gpool = NULL;
388 continue;
389 }
390 }
391
392 data->pools = pools;
393 data->npools = layout->npartitions;
394 data->domain_num = atomic_inc_return(&domain_nums);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700395 data->domain = iommu_domain_alloc(&platform_bus_type,
396 layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700397
398 add_domain(data);
399
400 return data->domain_num;
401
402out:
403 kfree(data);
404
405 return -EINVAL;
406}
Laura Abbott33b30be2012-07-09 09:48:54 -0700407EXPORT_SYMBOL(msm_register_domain);
Laura Abbottd01221b2012-05-16 17:52:49 -0700408
Laura Abbott0577d7b2012-04-17 11:14:30 -0700409static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700411 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700412 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
Chintan Pandyace27e562012-07-06 23:07:57 +0530414 if (!msm_use_iommu())
415 return -ENODEV;
416
Laura Abbott0577d7b2012-04-17 11:14:30 -0700417 if (!p)
418 return -ENODEV;
419
Laura Abbottd01221b2012-05-16 17:52:49 -0700420 for (i = 0; i < p->ndomains; i++) {
421 struct msm_iova_layout l;
422 struct msm_iova_partition *part;
423 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700424
Laura Abbottd01221b2012-05-16 17:52:49 -0700425 domains = p->domains;
426 l.npartitions = domains[i].npools;
427 part = kmalloc(
428 sizeof(struct msm_iova_partition) * l.npartitions,
429 GFP_KERNEL);
430
431 if (!part) {
432 pr_info("%s: could not allocate space for domain %d",
433 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700436
437 for (j = 0; j < l.npartitions; j++) {
438 part[j].start = p->domains[i].iova_pools[j].paddr;
439 part[j].size = p->domains[i].iova_pools[j].size;
440 }
441
442 l.partitions = part;
443
444 msm_register_domain(&l);
445
446 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 }
448
Laura Abbott0577d7b2012-04-17 11:14:30 -0700449 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700451 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700452 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453
454 if (!ctx)
455 continue;
456
Laura Abbottd01221b2012-05-16 17:52:49 -0700457 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700458
Laura Abbottd01221b2012-05-16 17:52:49 -0700459 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 continue;
461
Laura Abbottd01221b2012-05-16 17:52:49 -0700462 if (iommu_attach_device(domain, ctx)) {
463 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700465 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700466 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 continue;
468 }
469 }
470
471 return 0;
472}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700473
474static struct platform_driver iommu_domain_driver = {
475 .driver = {
476 .name = "iommu_domains",
477 .owner = THIS_MODULE
478 },
479};
480
481static int __init msm_subsystem_iommu_init(void)
482{
483 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
484}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485device_initcall(msm_subsystem_iommu_init);