blob: eb44c405bfb1744262042e96a5d2ba88c144c2a5 [file] [log] [blame]
Shalaj Jain47969302013-02-09 17:28:17 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/vmalloc.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070019#include <linux/rbtree.h>
20#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070021#include <linux/vmalloc.h>
Olav Haugand77d12c2013-03-05 13:17:30 -080022#include <linux/of.h>
23#include <linux/of_address.h>
24#include <linux/of_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <asm/sizes.h>
26#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/iommu.h>
28#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070029#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070030#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
Laura Abbottd01221b2012-05-16 17:52:49 -070032struct msm_iova_data {
33 struct rb_node node;
34 struct mem_pool *pools;
35 int npools;
36 struct iommu_domain *domain;
37 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038};
39
Laura Abbottd01221b2012-05-16 17:52:49 -070040static struct rb_root domain_root;
41DEFINE_MUTEX(domain_mutex);
42static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Laura Abbott2030c1b2012-07-18 06:38:00 -070044int msm_use_iommu()
45{
46 return iommu_present(&platform_bus_type);
47}
48
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080049bool msm_iommu_page_size_is_supported(unsigned long page_size)
50{
51 return page_size == SZ_4K
52 || page_size == SZ_64K
53 || page_size == SZ_1M
54 || page_size == SZ_16M;
55}
56
Laura Abbotte956cce2011-10-25 13:33:20 -070057int msm_iommu_map_extra(struct iommu_domain *domain,
58 unsigned long start_iova,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080059 unsigned long phy_addr,
Laura Abbotte956cce2011-10-25 13:33:20 -070060 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070061 unsigned long page_size,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080062 int prot)
Laura Abbotte956cce2011-10-25 13:33:20 -070063{
Olav Haugan5e7befd2012-06-19 14:59:37 -070064 int ret = 0;
65 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070066 unsigned long temp_iova = start_iova;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080067 /* the extra "padding" should never be written to. map it
68 * read-only. */
69 prot &= ~IOMMU_WRITE;
70
71 if (msm_iommu_page_size_is_supported(page_size)) {
Olav Haugan5e7befd2012-06-19 14:59:37 -070072 struct scatterlist *sglist;
73 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
74 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070075
Olav Haugan5e7befd2012-06-19 14:59:37 -070076 sglist = vmalloc(sizeof(*sglist) * nrpages);
77 if (!sglist) {
78 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070079 goto out;
80 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070081
82 sg_init_table(sglist, nrpages);
83
84 for (i = 0; i < nrpages; i++)
85 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
86
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080087 ret = iommu_map_range(domain, temp_iova, sglist, size, prot);
Olav Haugan5e7befd2012-06-19 14:59:37 -070088 if (ret) {
89 pr_err("%s: could not map extra %lx in domain %p\n",
90 __func__, start_iova, domain);
91 }
92
93 vfree(sglist);
94 } else {
95 unsigned long order = get_order(page_size);
96 unsigned long aligned_size = ALIGN(size, page_size);
97 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
98
99 for (i = 0; i < nrpages; i++) {
100 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800101 prot);
Olav Haugan5e7befd2012-06-19 14:59:37 -0700102 if (ret) {
103 pr_err("%s: could not map %lx in domain %p, error: %d\n",
104 __func__, start_iova, domain, ret);
105 ret = -EAGAIN;
106 goto out;
107 }
108 temp_iova += page_size;
109 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700110 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700111 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700112out:
113 for (; i > 0; --i) {
114 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700115 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -0700116 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700117 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700118}
Laura Abbotte956cce2011-10-25 13:33:20 -0700119
Olav Haugan8726caf2012-05-10 15:11:35 -0700120void msm_iommu_unmap_extra(struct iommu_domain *domain,
121 unsigned long start_iova,
122 unsigned long size,
123 unsigned long page_size)
124{
125 int i;
126 unsigned long order = get_order(page_size);
127 unsigned long aligned_size = ALIGN(size, page_size);
128 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
129 unsigned long temp_iova = start_iova;
130
131 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700132 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700133 temp_iova += page_size;
134 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700135}
136
Laura Abbottd027fdb2012-04-17 16:22:24 -0700137static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
138 unsigned long iova,
139 unsigned long phys,
140 unsigned long size,
141 int cached)
142{
143 int ret;
144 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700145 int prot = IOMMU_WRITE | IOMMU_READ;
146 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700147
148 sglist = vmalloc(sizeof(*sglist));
149 if (!sglist) {
150 ret = -ENOMEM;
151 goto err1;
152 }
153
154 sg_init_table(sglist, 1);
155 sglist->length = size;
156 sglist->offset = 0;
157 sglist->dma_address = phys;
158
Laura Abbotte543cfc2012-06-07 17:51:53 -0700159 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700160 if (ret) {
161 pr_err("%s: could not map extra %lx in domain %p\n",
162 __func__, iova, domain);
163 }
164
165 vfree(sglist);
166err1:
167 return ret;
168
169}
170
171int msm_iommu_map_contig_buffer(unsigned long phys,
172 unsigned int domain_no,
173 unsigned int partition_no,
174 unsigned long size,
175 unsigned long align,
176 unsigned long cached,
177 unsigned long *iova_val)
178{
179 unsigned long iova;
180 int ret;
181
182 if (size & (align - 1))
183 return -EINVAL;
184
Laura Abbott2030c1b2012-07-18 06:38:00 -0700185 if (!msm_use_iommu()) {
186 *iova_val = phys;
187 return 0;
188 }
189
Laura Abbottd01221b2012-05-16 17:52:49 -0700190 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
191 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700192
Laura Abbottd01221b2012-05-16 17:52:49 -0700193 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700194 return -ENOMEM;
195
196 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
197 phys, size, cached);
198
199 if (ret)
200 msm_free_iova_address(iova, domain_no, partition_no, size);
201 else
202 *iova_val = iova;
203
204 return ret;
205}
Laura Abbott33b30be2012-07-09 09:48:54 -0700206EXPORT_SYMBOL(msm_iommu_map_contig_buffer);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700207
208void msm_iommu_unmap_contig_buffer(unsigned long iova,
209 unsigned int domain_no,
210 unsigned int partition_no,
211 unsigned long size)
212{
Laura Abbott2030c1b2012-07-18 06:38:00 -0700213 if (!msm_use_iommu())
214 return;
215
Laura Abbottd027fdb2012-04-17 16:22:24 -0700216 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
217 msm_free_iova_address(iova, domain_no, partition_no, size);
218}
Laura Abbott33b30be2012-07-09 09:48:54 -0700219EXPORT_SYMBOL(msm_iommu_unmap_contig_buffer);
Laura Abbotte956cce2011-10-25 13:33:20 -0700220
Laura Abbottd01221b2012-05-16 17:52:49 -0700221static struct msm_iova_data *find_domain(int domain_num)
222{
223 struct rb_root *root = &domain_root;
224 struct rb_node *p = root->rb_node;
225
226 mutex_lock(&domain_mutex);
227
228 while (p) {
229 struct msm_iova_data *node;
230
231 node = rb_entry(p, struct msm_iova_data, node);
232 if (domain_num < node->domain_num)
233 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700234 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700235 p = p->rb_right;
236 else {
237 mutex_unlock(&domain_mutex);
238 return node;
239 }
240 }
241 mutex_unlock(&domain_mutex);
242 return NULL;
243}
244
245static int add_domain(struct msm_iova_data *node)
246{
247 struct rb_root *root = &domain_root;
248 struct rb_node **p = &root->rb_node;
249 struct rb_node *parent = NULL;
250
251 mutex_lock(&domain_mutex);
252 while (*p) {
253 struct msm_iova_data *tmp;
254 parent = *p;
255
256 tmp = rb_entry(parent, struct msm_iova_data, node);
257
258 if (node->domain_num < tmp->domain_num)
259 p = &(*p)->rb_left;
260 else if (node->domain_num > tmp->domain_num)
261 p = &(*p)->rb_right;
262 else
263 BUG();
264 }
265 rb_link_node(&node->node, parent, p);
266 rb_insert_color(&node->node, root);
267 mutex_unlock(&domain_mutex);
268 return 0;
269}
270
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700271struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272{
Laura Abbottd01221b2012-05-16 17:52:49 -0700273 struct msm_iova_data *data;
274
275 data = find_domain(domain_num);
276
277 if (data)
278 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700279 else
280 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700281}
Laura Abbottb1d68872012-10-17 10:50:39 -0700282EXPORT_SYMBOL(msm_get_iommu_domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283
Olav Haugan35deadc2012-12-10 18:28:27 -0800284int msm_find_domain_no(const struct iommu_domain *domain)
285{
286 struct rb_root *root = &domain_root;
287 struct rb_node *n;
288 struct msm_iova_data *node;
289 int domain_num = -EINVAL;
290
291 mutex_lock(&domain_mutex);
292
293 for (n = rb_first(root); n; n = rb_next(n)) {
294 node = rb_entry(n, struct msm_iova_data, node);
295 if (node->domain == domain) {
296 domain_num = node->domain_num;
297 break;
298 }
299 }
300 mutex_unlock(&domain_mutex);
301 return domain_num;
302}
303EXPORT_SYMBOL(msm_find_domain_no);
304
Laura Abbottd01221b2012-05-16 17:52:49 -0700305int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700306 unsigned int partition_no,
307 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700308 unsigned long align,
309 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700310{
Laura Abbottd01221b2012-05-16 17:52:49 -0700311 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700312 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700313 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700314
Laura Abbottd01221b2012-05-16 17:52:49 -0700315 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700316
Laura Abbottd01221b2012-05-16 17:52:49 -0700317 if (!data)
318 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700319
Laura Abbottd01221b2012-05-16 17:52:49 -0700320 if (partition_no >= data->npools)
321 return -EINVAL;
322
323 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700324
325 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700326 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700327
Laura Abbottd01221b2012-05-16 17:52:49 -0700328 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
329 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700330 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700331 /* Offset because genpool can't handle 0 addresses */
332 if (pool->paddr == 0)
333 va -= SZ_4K;
334 *iova = va;
335 return 0;
336 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700337
Laura Abbottd01221b2012-05-16 17:52:49 -0700338 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700339}
340
341void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700342 unsigned int iommu_domain,
343 unsigned int partition_no,
344 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700345{
Laura Abbottd01221b2012-05-16 17:52:49 -0700346 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700347 struct mem_pool *pool;
348
Laura Abbottd01221b2012-05-16 17:52:49 -0700349 data = find_domain(iommu_domain);
350
351 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700352 WARN(1, "Invalid domain %d\n", iommu_domain);
353 return;
354 }
355
Laura Abbottd01221b2012-05-16 17:52:49 -0700356 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700357 WARN(1, "Invalid partition %d for domain %d\n",
358 partition_no, iommu_domain);
359 return;
360 }
361
Laura Abbottd01221b2012-05-16 17:52:49 -0700362 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700363
364 if (!pool)
365 return;
366
367 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700368
369 /* Offset because genpool can't handle 0 addresses */
370 if (pool->paddr == 0)
371 iova += SZ_4K;
372
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700373 gen_pool_free(pool->gpool, iova, size);
374}
375
Laura Abbottd01221b2012-05-16 17:52:49 -0700376int msm_register_domain(struct msm_iova_layout *layout)
377{
378 int i;
379 struct msm_iova_data *data;
380 struct mem_pool *pools;
Laura Abbott6a6ca552012-08-30 11:26:31 -0700381 struct bus_type *bus;
Laura Abbottd01221b2012-05-16 17:52:49 -0700382
383 if (!layout)
384 return -EINVAL;
385
386 data = kmalloc(sizeof(*data), GFP_KERNEL);
387
388 if (!data)
389 return -ENOMEM;
390
391 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
392 GFP_KERNEL);
393
394 if (!pools)
395 goto out;
396
397 for (i = 0; i < layout->npartitions; i++) {
398 if (layout->partitions[i].size == 0)
399 continue;
400
401 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
402
403 if (!pools[i].gpool)
404 continue;
405
406 pools[i].paddr = layout->partitions[i].start;
407 pools[i].size = layout->partitions[i].size;
408
409 /*
410 * genalloc can't handle a pool starting at address 0.
411 * For now, solve this problem by offsetting the value
412 * put in by 4k.
413 * gen pool address = actual address + 4k
414 */
415 if (pools[i].paddr == 0)
416 layout->partitions[i].start += SZ_4K;
417
418 if (gen_pool_add(pools[i].gpool,
419 layout->partitions[i].start,
420 layout->partitions[i].size, -1)) {
421 gen_pool_destroy(pools[i].gpool);
422 pools[i].gpool = NULL;
423 continue;
424 }
425 }
426
Laura Abbott6a6ca552012-08-30 11:26:31 -0700427 bus = layout->is_secure == MSM_IOMMU_DOMAIN_SECURE ?
428 &msm_iommu_sec_bus_type :
429 &platform_bus_type;
430
Laura Abbottd01221b2012-05-16 17:52:49 -0700431 data->pools = pools;
432 data->npools = layout->npartitions;
433 data->domain_num = atomic_inc_return(&domain_nums);
Laura Abbott6a6ca552012-08-30 11:26:31 -0700434 data->domain = iommu_domain_alloc(bus, layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700435
436 add_domain(data);
437
438 return data->domain_num;
439
440out:
441 kfree(data);
442
443 return -EINVAL;
444}
Laura Abbott33b30be2012-07-09 09:48:54 -0700445EXPORT_SYMBOL(msm_register_domain);
Laura Abbottd01221b2012-05-16 17:52:49 -0700446
Olav Haugan35deadc2012-12-10 18:28:27 -0800447static int find_and_add_contexts(struct iommu_group *group,
448 const struct device_node *node,
449 unsigned int num_contexts)
450{
451 unsigned int i;
452 struct device *ctx;
453 const char *name;
454 struct device_node *ctx_node;
455 int ret_val = 0;
456
457 for (i = 0; i < num_contexts; ++i) {
458 ctx_node = of_parse_phandle((struct device_node *) node,
459 "qcom,iommu-contexts", i);
460 if (!ctx_node) {
461 pr_err("Unable to parse phandle #%u\n", i);
462 ret_val = -EINVAL;
463 goto out;
464 }
465 if (of_property_read_string(ctx_node, "label", &name)) {
466 pr_err("Could not find label property\n");
467 ret_val = -EINVAL;
468 goto out;
469 }
470 ctx = msm_iommu_get_ctx(name);
471 if (!ctx) {
472 pr_err("Unable to find context %s\n", name);
473 ret_val = -EINVAL;
474 goto out;
475 }
476 iommu_group_add_device(group, ctx);
477 }
478out:
479 return ret_val;
480}
481
482static int create_and_add_domain(struct iommu_group *group,
483 const struct device_node *node)
484{
485 unsigned int ret_val = 0;
Shalaj Jain47969302013-02-09 17:28:17 -0800486 unsigned int i, j;
Olav Haugan35deadc2012-12-10 18:28:27 -0800487 struct msm_iova_layout l;
488 struct msm_iova_partition *part = 0;
489 struct iommu_domain *domain = 0;
490 unsigned int *addr_array;
491 unsigned int array_size;
492 int domain_no;
493 int secure_domain;
494 int l2_redirect;
495
496 if (of_get_property(node, "qcom,virtual-addr-pool", &array_size)) {
497 l.npartitions = array_size / sizeof(unsigned int) / 2;
498 part = kmalloc(
499 sizeof(struct msm_iova_partition) * l.npartitions,
500 GFP_KERNEL);
501 if (!part) {
502 pr_err("%s: could not allocate space for partition",
503 __func__);
504 ret_val = -ENOMEM;
505 goto out;
506 }
507 addr_array = kmalloc(array_size, GFP_KERNEL);
508 if (!addr_array) {
509 pr_err("%s: could not allocate space for partition",
510 __func__);
511 ret_val = -ENOMEM;
512 goto free_mem;
513 }
514
515 ret_val = of_property_read_u32_array(node,
516 "qcom,virtual-addr-pool",
517 addr_array,
518 array_size/sizeof(unsigned int));
519 if (ret_val) {
520 ret_val = -EINVAL;
521 goto free_mem;
522 }
523
Shalaj Jain47969302013-02-09 17:28:17 -0800524 for (i = 0, j = 0; j < l.npartitions * 2; i++, j += 2) {
525 part[i].start = addr_array[j];
526 part[i].size = addr_array[j+1];
Olav Haugan35deadc2012-12-10 18:28:27 -0800527 }
528 } else {
529 l.npartitions = 1;
530 part = kmalloc(
531 sizeof(struct msm_iova_partition) * l.npartitions,
532 GFP_KERNEL);
533 if (!part) {
534 pr_err("%s: could not allocate space for partition",
535 __func__);
536 ret_val = -ENOMEM;
537 goto out;
538 }
539 part[0].start = 0x0;
540 part[0].size = 0xFFFFFFFF;
541 }
542
543 l.partitions = part;
544
545 secure_domain = of_property_read_bool(node, "qcom,secure-domain");
546 l.is_secure = (secure_domain) ? MSM_IOMMU_DOMAIN_SECURE : 0;
547
548 l2_redirect = of_property_read_bool(node, "qcom,l2-redirect");
549 l.domain_flags = (l2_redirect) ? MSM_IOMMU_DOMAIN_PT_CACHEABLE : 0;
550
551 domain_no = msm_register_domain(&l);
552 if (domain_no >= 0)
553 domain = msm_get_iommu_domain(domain_no);
554 else
555 ret_val = domain_no;
556
557 iommu_group_set_iommudata(group, domain, NULL);
558
559free_mem:
560 kfree(part);
561out:
562 return ret_val;
563}
564
565static int iommu_domain_parse_dt(const struct device_node *dt_node)
566{
567 struct device_node *node;
568 int sz;
569 unsigned int num_contexts;
570 int ret_val = 0;
571 struct iommu_group *group = 0;
572 const char *name;
573
574 for_each_child_of_node(dt_node, node) {
575 group = iommu_group_alloc();
576 if (IS_ERR(group)) {
577 ret_val = PTR_ERR(group);
578 goto out;
579 }
580 if (of_property_read_string(node, "label", &name)) {
581 ret_val = -EINVAL;
582 goto free_group;
583 }
584 iommu_group_set_name(group, name);
585
586 if (!of_get_property(node, "qcom,iommu-contexts", &sz)) {
587 pr_err("Could not find qcom,iommu-contexts property\n");
588 ret_val = -EINVAL;
589 goto free_group;
590 }
591 num_contexts = sz / sizeof(unsigned int);
592
593 ret_val = find_and_add_contexts(group, node, num_contexts);
594 if (ret_val) {
595 ret_val = -EINVAL;
596 goto free_group;
597 }
598 ret_val = create_and_add_domain(group, node);
599 if (ret_val) {
600 ret_val = -EINVAL;
601 goto free_group;
602 }
603 }
604free_group:
605 /* No iommu_group_free() function */
606out:
607 return ret_val;
608}
609
610static int iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700611{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700612 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700613 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614
Chintan Pandyace27e562012-07-06 23:07:57 +0530615 if (!msm_use_iommu())
616 return -ENODEV;
617
Olav Haugan35deadc2012-12-10 18:28:27 -0800618 if (pdev->dev.of_node)
619 return iommu_domain_parse_dt(pdev->dev.of_node);
620 else if (!p)
Laura Abbott0577d7b2012-04-17 11:14:30 -0700621 return -ENODEV;
622
Laura Abbottd01221b2012-05-16 17:52:49 -0700623 for (i = 0; i < p->ndomains; i++) {
624 struct msm_iova_layout l;
625 struct msm_iova_partition *part;
626 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700627
Laura Abbottd01221b2012-05-16 17:52:49 -0700628 domains = p->domains;
629 l.npartitions = domains[i].npools;
630 part = kmalloc(
631 sizeof(struct msm_iova_partition) * l.npartitions,
632 GFP_KERNEL);
633
634 if (!part) {
635 pr_info("%s: could not allocate space for domain %d",
636 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700638 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700639
640 for (j = 0; j < l.npartitions; j++) {
641 part[j].start = p->domains[i].iova_pools[j].paddr;
642 part[j].size = p->domains[i].iova_pools[j].size;
643 }
644
645 l.partitions = part;
646
647 msm_register_domain(&l);
648
649 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700650 }
651
Laura Abbott0577d7b2012-04-17 11:14:30 -0700652 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700654 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700655 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656
657 if (!ctx)
658 continue;
659
Laura Abbottd01221b2012-05-16 17:52:49 -0700660 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661
Laura Abbottd01221b2012-05-16 17:52:49 -0700662 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 continue;
664
Laura Abbottd01221b2012-05-16 17:52:49 -0700665 if (iommu_attach_device(domain, ctx)) {
Olav Haugan35deadc2012-12-10 18:28:27 -0800666 WARN(1, "%s: could not attach domain %p to context %s. iommu programming will not occur.\n",
667 __func__, domain, p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700668 continue;
669 }
670 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700671 return 0;
672}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700673
Olav Haugan35deadc2012-12-10 18:28:27 -0800674static int __devexit iommu_domain_exit(struct platform_device *pdev)
675{
676 return 0;
677}
678
679static struct of_device_id msm_iommu_domain_match_table[] = {
680 { .name = "qcom,iommu-domains", },
681 {}
682};
683
Laura Abbott0577d7b2012-04-17 11:14:30 -0700684static struct platform_driver iommu_domain_driver = {
685 .driver = {
686 .name = "iommu_domains",
Olav Haugan35deadc2012-12-10 18:28:27 -0800687 .of_match_table = msm_iommu_domain_match_table,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700688 .owner = THIS_MODULE
689 },
Olav Haugan35deadc2012-12-10 18:28:27 -0800690 .probe = iommu_domain_probe,
691 .remove = __devexit_p(iommu_domain_exit),
Laura Abbott0577d7b2012-04-17 11:14:30 -0700692};
693
694static int __init msm_subsystem_iommu_init(void)
695{
Olav Haugan35deadc2012-12-10 18:28:27 -0800696 int ret;
697 ret = platform_driver_register(&iommu_domain_driver);
698 if (ret != 0)
699 pr_err("Failed to register IOMMU domain driver\n");
700 return ret;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700701}
Olav Haugan35deadc2012-12-10 18:28:27 -0800702
703static void __exit msm_subsystem_iommu_exit(void)
704{
705 platform_driver_unregister(&iommu_domain_driver);
706}
707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700708device_initcall(msm_subsystem_iommu_init);
Olav Haugan35deadc2012-12-10 18:28:27 -0800709module_exit(msm_subsystem_iommu_exit);
710