blob: 18562a3f78af557f0f29c66835518cf192b75dfc [file] [log] [blame]
Shalaj Jain47969302013-02-09 17:28:17 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070018#include <linux/rbtree.h>
19#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070020#include <linux/vmalloc.h>
Olav Haugand77d12c2013-03-05 13:17:30 -080021#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <asm/sizes.h>
25#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <mach/iommu.h>
27#include <mach/iommu_domains.h>
Olav Haugana9820ce2013-03-22 14:50:28 -070028#include <mach/msm_iommu_priv.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070029#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Laura Abbottd01221b2012-05-16 17:52:49 -070031struct msm_iova_data {
32 struct rb_node node;
33 struct mem_pool *pools;
34 int npools;
35 struct iommu_domain *domain;
36 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037};
38
Laura Abbottd01221b2012-05-16 17:52:49 -070039static struct rb_root domain_root;
40DEFINE_MUTEX(domain_mutex);
41static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042
Olav Haugana9820ce2013-03-22 14:50:28 -070043void msm_iommu_set_client_name(struct iommu_domain *domain, char const *name)
44{
45 struct msm_iommu_priv *priv = domain->priv;
46 priv->client_name = name;
47}
48
Laura Abbott2030c1b2012-07-18 06:38:00 -070049int msm_use_iommu()
50{
51 return iommu_present(&platform_bus_type);
52}
53
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080054bool msm_iommu_page_size_is_supported(unsigned long page_size)
55{
56 return page_size == SZ_4K
57 || page_size == SZ_64K
58 || page_size == SZ_1M
59 || page_size == SZ_16M;
60}
61
Laura Abbotte956cce2011-10-25 13:33:20 -070062int msm_iommu_map_extra(struct iommu_domain *domain,
63 unsigned long start_iova,
Laura Abbott3cfa2e62013-03-13 17:48:26 -070064 phys_addr_t phy_addr,
Laura Abbotte956cce2011-10-25 13:33:20 -070065 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070066 unsigned long page_size,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080067 int prot)
Laura Abbotte956cce2011-10-25 13:33:20 -070068{
Olav Haugan5e7befd2012-06-19 14:59:37 -070069 int ret = 0;
70 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070071 unsigned long temp_iova = start_iova;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080072 /* the extra "padding" should never be written to. map it
73 * read-only. */
74 prot &= ~IOMMU_WRITE;
75
76 if (msm_iommu_page_size_is_supported(page_size)) {
Olav Haugan5e7befd2012-06-19 14:59:37 -070077 struct scatterlist *sglist;
78 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
79 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070080
Olav Haugan5e7befd2012-06-19 14:59:37 -070081 sglist = vmalloc(sizeof(*sglist) * nrpages);
82 if (!sglist) {
83 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070084 goto out;
85 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070086
87 sg_init_table(sglist, nrpages);
88
89 for (i = 0; i < nrpages; i++)
90 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
91
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080092 ret = iommu_map_range(domain, temp_iova, sglist, size, prot);
Olav Haugan5e7befd2012-06-19 14:59:37 -070093 if (ret) {
94 pr_err("%s: could not map extra %lx in domain %p\n",
95 __func__, start_iova, domain);
96 }
97
98 vfree(sglist);
99 } else {
100 unsigned long order = get_order(page_size);
101 unsigned long aligned_size = ALIGN(size, page_size);
102 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
103
104 for (i = 0; i < nrpages; i++) {
105 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800106 prot);
Olav Haugan5e7befd2012-06-19 14:59:37 -0700107 if (ret) {
108 pr_err("%s: could not map %lx in domain %p, error: %d\n",
109 __func__, start_iova, domain, ret);
110 ret = -EAGAIN;
111 goto out;
112 }
113 temp_iova += page_size;
114 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700115 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700116 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700117out:
118 for (; i > 0; --i) {
119 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700120 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -0700121 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700122 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700123}
Laura Abbotte956cce2011-10-25 13:33:20 -0700124
Olav Haugan8726caf2012-05-10 15:11:35 -0700125void msm_iommu_unmap_extra(struct iommu_domain *domain,
126 unsigned long start_iova,
127 unsigned long size,
128 unsigned long page_size)
129{
130 int i;
131 unsigned long order = get_order(page_size);
132 unsigned long aligned_size = ALIGN(size, page_size);
133 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
134 unsigned long temp_iova = start_iova;
135
136 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700137 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700138 temp_iova += page_size;
139 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700140}
141
Laura Abbottd027fdb2012-04-17 16:22:24 -0700142static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
143 unsigned long iova,
Laura Abbott3cfa2e62013-03-13 17:48:26 -0700144 phys_addr_t phys,
Laura Abbottd027fdb2012-04-17 16:22:24 -0700145 unsigned long size,
146 int cached)
147{
148 int ret;
149 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700150 int prot = IOMMU_WRITE | IOMMU_READ;
151 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700152
153 sglist = vmalloc(sizeof(*sglist));
154 if (!sglist) {
155 ret = -ENOMEM;
156 goto err1;
157 }
158
159 sg_init_table(sglist, 1);
160 sglist->length = size;
161 sglist->offset = 0;
162 sglist->dma_address = phys;
163
Laura Abbotte543cfc2012-06-07 17:51:53 -0700164 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700165 if (ret) {
166 pr_err("%s: could not map extra %lx in domain %p\n",
167 __func__, iova, domain);
168 }
169
170 vfree(sglist);
171err1:
172 return ret;
173
174}
175
Laura Abbott3cfa2e62013-03-13 17:48:26 -0700176int msm_iommu_map_contig_buffer(phys_addr_t phys,
Laura Abbottd027fdb2012-04-17 16:22:24 -0700177 unsigned int domain_no,
178 unsigned int partition_no,
179 unsigned long size,
180 unsigned long align,
181 unsigned long cached,
182 unsigned long *iova_val)
183{
184 unsigned long iova;
185 int ret;
186
187 if (size & (align - 1))
188 return -EINVAL;
189
Laura Abbott2030c1b2012-07-18 06:38:00 -0700190 if (!msm_use_iommu()) {
191 *iova_val = phys;
192 return 0;
193 }
194
Laura Abbottd01221b2012-05-16 17:52:49 -0700195 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
196 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700197
Laura Abbottd01221b2012-05-16 17:52:49 -0700198 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700199 return -ENOMEM;
200
201 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
202 phys, size, cached);
203
204 if (ret)
205 msm_free_iova_address(iova, domain_no, partition_no, size);
206 else
207 *iova_val = iova;
208
209 return ret;
210}
Laura Abbott33b30be2012-07-09 09:48:54 -0700211EXPORT_SYMBOL(msm_iommu_map_contig_buffer);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700212
213void msm_iommu_unmap_contig_buffer(unsigned long iova,
214 unsigned int domain_no,
215 unsigned int partition_no,
216 unsigned long size)
217{
Laura Abbott2030c1b2012-07-18 06:38:00 -0700218 if (!msm_use_iommu())
219 return;
220
Laura Abbottd027fdb2012-04-17 16:22:24 -0700221 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
222 msm_free_iova_address(iova, domain_no, partition_no, size);
223}
Laura Abbott33b30be2012-07-09 09:48:54 -0700224EXPORT_SYMBOL(msm_iommu_unmap_contig_buffer);
Laura Abbotte956cce2011-10-25 13:33:20 -0700225
Laura Abbottd01221b2012-05-16 17:52:49 -0700226static struct msm_iova_data *find_domain(int domain_num)
227{
228 struct rb_root *root = &domain_root;
229 struct rb_node *p = root->rb_node;
230
231 mutex_lock(&domain_mutex);
232
233 while (p) {
234 struct msm_iova_data *node;
235
236 node = rb_entry(p, struct msm_iova_data, node);
237 if (domain_num < node->domain_num)
238 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700239 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700240 p = p->rb_right;
241 else {
242 mutex_unlock(&domain_mutex);
243 return node;
244 }
245 }
246 mutex_unlock(&domain_mutex);
247 return NULL;
248}
249
250static int add_domain(struct msm_iova_data *node)
251{
252 struct rb_root *root = &domain_root;
253 struct rb_node **p = &root->rb_node;
254 struct rb_node *parent = NULL;
255
256 mutex_lock(&domain_mutex);
257 while (*p) {
258 struct msm_iova_data *tmp;
259 parent = *p;
260
261 tmp = rb_entry(parent, struct msm_iova_data, node);
262
263 if (node->domain_num < tmp->domain_num)
264 p = &(*p)->rb_left;
265 else if (node->domain_num > tmp->domain_num)
266 p = &(*p)->rb_right;
267 else
268 BUG();
269 }
270 rb_link_node(&node->node, parent, p);
271 rb_insert_color(&node->node, root);
272 mutex_unlock(&domain_mutex);
273 return 0;
274}
275
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700276struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277{
Laura Abbottd01221b2012-05-16 17:52:49 -0700278 struct msm_iova_data *data;
279
280 data = find_domain(domain_num);
281
282 if (data)
283 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700284 else
285 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286}
Laura Abbottb1d68872012-10-17 10:50:39 -0700287EXPORT_SYMBOL(msm_get_iommu_domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288
Olav Haugan35deadc2012-12-10 18:28:27 -0800289int msm_find_domain_no(const struct iommu_domain *domain)
290{
291 struct rb_root *root = &domain_root;
292 struct rb_node *n;
293 struct msm_iova_data *node;
294 int domain_num = -EINVAL;
295
296 mutex_lock(&domain_mutex);
297
298 for (n = rb_first(root); n; n = rb_next(n)) {
299 node = rb_entry(n, struct msm_iova_data, node);
300 if (node->domain == domain) {
301 domain_num = node->domain_num;
302 break;
303 }
304 }
305 mutex_unlock(&domain_mutex);
306 return domain_num;
307}
308EXPORT_SYMBOL(msm_find_domain_no);
309
Laura Abbottd01221b2012-05-16 17:52:49 -0700310int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700311 unsigned int partition_no,
312 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700313 unsigned long align,
314 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700315{
Laura Abbottd01221b2012-05-16 17:52:49 -0700316 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700317 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700318 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700319
Laura Abbottd01221b2012-05-16 17:52:49 -0700320 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700321
Laura Abbottd01221b2012-05-16 17:52:49 -0700322 if (!data)
323 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700324
Laura Abbottd01221b2012-05-16 17:52:49 -0700325 if (partition_no >= data->npools)
326 return -EINVAL;
327
328 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700329
330 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700331 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700332
Laura Abbottd01221b2012-05-16 17:52:49 -0700333 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
334 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700335 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700336 /* Offset because genpool can't handle 0 addresses */
337 if (pool->paddr == 0)
338 va -= SZ_4K;
339 *iova = va;
340 return 0;
341 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700342
Laura Abbottd01221b2012-05-16 17:52:49 -0700343 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700344}
345
346void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700347 unsigned int iommu_domain,
348 unsigned int partition_no,
349 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700350{
Laura Abbottd01221b2012-05-16 17:52:49 -0700351 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700352 struct mem_pool *pool;
353
Laura Abbottd01221b2012-05-16 17:52:49 -0700354 data = find_domain(iommu_domain);
355
356 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700357 WARN(1, "Invalid domain %d\n", iommu_domain);
358 return;
359 }
360
Laura Abbottd01221b2012-05-16 17:52:49 -0700361 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700362 WARN(1, "Invalid partition %d for domain %d\n",
363 partition_no, iommu_domain);
364 return;
365 }
366
Laura Abbottd01221b2012-05-16 17:52:49 -0700367 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700368
369 if (!pool)
370 return;
371
372 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700373
374 /* Offset because genpool can't handle 0 addresses */
375 if (pool->paddr == 0)
376 iova += SZ_4K;
377
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700378 gen_pool_free(pool->gpool, iova, size);
379}
380
Laura Abbottd01221b2012-05-16 17:52:49 -0700381int msm_register_domain(struct msm_iova_layout *layout)
382{
383 int i;
384 struct msm_iova_data *data;
385 struct mem_pool *pools;
Laura Abbott6a6ca552012-08-30 11:26:31 -0700386 struct bus_type *bus;
Laura Abbottd01221b2012-05-16 17:52:49 -0700387
388 if (!layout)
389 return -EINVAL;
390
391 data = kmalloc(sizeof(*data), GFP_KERNEL);
392
393 if (!data)
394 return -ENOMEM;
395
396 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
397 GFP_KERNEL);
398
399 if (!pools)
400 goto out;
401
402 for (i = 0; i < layout->npartitions; i++) {
403 if (layout->partitions[i].size == 0)
404 continue;
405
406 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
407
408 if (!pools[i].gpool)
409 continue;
410
411 pools[i].paddr = layout->partitions[i].start;
412 pools[i].size = layout->partitions[i].size;
413
414 /*
415 * genalloc can't handle a pool starting at address 0.
416 * For now, solve this problem by offsetting the value
417 * put in by 4k.
418 * gen pool address = actual address + 4k
419 */
420 if (pools[i].paddr == 0)
421 layout->partitions[i].start += SZ_4K;
422
423 if (gen_pool_add(pools[i].gpool,
424 layout->partitions[i].start,
425 layout->partitions[i].size, -1)) {
426 gen_pool_destroy(pools[i].gpool);
427 pools[i].gpool = NULL;
428 continue;
429 }
430 }
431
Laura Abbott6a6ca552012-08-30 11:26:31 -0700432 bus = layout->is_secure == MSM_IOMMU_DOMAIN_SECURE ?
433 &msm_iommu_sec_bus_type :
434 &platform_bus_type;
435
Laura Abbottd01221b2012-05-16 17:52:49 -0700436 data->pools = pools;
437 data->npools = layout->npartitions;
438 data->domain_num = atomic_inc_return(&domain_nums);
Laura Abbott6a6ca552012-08-30 11:26:31 -0700439 data->domain = iommu_domain_alloc(bus, layout->domain_flags);
Olav Haugane7837212013-03-22 14:49:47 -0700440 if (!data->domain)
441 goto out;
Laura Abbottd01221b2012-05-16 17:52:49 -0700442
Olav Haugana9820ce2013-03-22 14:50:28 -0700443 msm_iommu_set_client_name(data->domain, layout->client_name);
444
Laura Abbottd01221b2012-05-16 17:52:49 -0700445 add_domain(data);
446
447 return data->domain_num;
448
449out:
450 kfree(data);
451
452 return -EINVAL;
453}
Laura Abbott33b30be2012-07-09 09:48:54 -0700454EXPORT_SYMBOL(msm_register_domain);
Laura Abbottd01221b2012-05-16 17:52:49 -0700455
Olav Haugan35deadc2012-12-10 18:28:27 -0800456static int find_and_add_contexts(struct iommu_group *group,
457 const struct device_node *node,
458 unsigned int num_contexts)
459{
460 unsigned int i;
461 struct device *ctx;
462 const char *name;
463 struct device_node *ctx_node;
464 int ret_val = 0;
465
466 for (i = 0; i < num_contexts; ++i) {
467 ctx_node = of_parse_phandle((struct device_node *) node,
468 "qcom,iommu-contexts", i);
469 if (!ctx_node) {
470 pr_err("Unable to parse phandle #%u\n", i);
471 ret_val = -EINVAL;
472 goto out;
473 }
474 if (of_property_read_string(ctx_node, "label", &name)) {
475 pr_err("Could not find label property\n");
476 ret_val = -EINVAL;
477 goto out;
478 }
479 ctx = msm_iommu_get_ctx(name);
480 if (!ctx) {
481 pr_err("Unable to find context %s\n", name);
482 ret_val = -EINVAL;
483 goto out;
484 }
485 iommu_group_add_device(group, ctx);
486 }
487out:
488 return ret_val;
489}
490
491static int create_and_add_domain(struct iommu_group *group,
Olav Haugana9820ce2013-03-22 14:50:28 -0700492 struct device_node const *node,
493 char const *name)
Olav Haugan35deadc2012-12-10 18:28:27 -0800494{
495 unsigned int ret_val = 0;
Shalaj Jain47969302013-02-09 17:28:17 -0800496 unsigned int i, j;
Olav Haugan35deadc2012-12-10 18:28:27 -0800497 struct msm_iova_layout l;
498 struct msm_iova_partition *part = 0;
499 struct iommu_domain *domain = 0;
500 unsigned int *addr_array;
501 unsigned int array_size;
502 int domain_no;
503 int secure_domain;
504 int l2_redirect;
505
506 if (of_get_property(node, "qcom,virtual-addr-pool", &array_size)) {
507 l.npartitions = array_size / sizeof(unsigned int) / 2;
508 part = kmalloc(
509 sizeof(struct msm_iova_partition) * l.npartitions,
510 GFP_KERNEL);
511 if (!part) {
512 pr_err("%s: could not allocate space for partition",
513 __func__);
514 ret_val = -ENOMEM;
515 goto out;
516 }
517 addr_array = kmalloc(array_size, GFP_KERNEL);
518 if (!addr_array) {
519 pr_err("%s: could not allocate space for partition",
520 __func__);
521 ret_val = -ENOMEM;
522 goto free_mem;
523 }
524
525 ret_val = of_property_read_u32_array(node,
526 "qcom,virtual-addr-pool",
527 addr_array,
528 array_size/sizeof(unsigned int));
529 if (ret_val) {
530 ret_val = -EINVAL;
531 goto free_mem;
532 }
533
Shalaj Jain47969302013-02-09 17:28:17 -0800534 for (i = 0, j = 0; j < l.npartitions * 2; i++, j += 2) {
535 part[i].start = addr_array[j];
536 part[i].size = addr_array[j+1];
Olav Haugan35deadc2012-12-10 18:28:27 -0800537 }
538 } else {
539 l.npartitions = 1;
540 part = kmalloc(
541 sizeof(struct msm_iova_partition) * l.npartitions,
542 GFP_KERNEL);
543 if (!part) {
544 pr_err("%s: could not allocate space for partition",
545 __func__);
546 ret_val = -ENOMEM;
547 goto out;
548 }
549 part[0].start = 0x0;
550 part[0].size = 0xFFFFFFFF;
551 }
552
Olav Haugana9820ce2013-03-22 14:50:28 -0700553 l.client_name = name;
Olav Haugan35deadc2012-12-10 18:28:27 -0800554 l.partitions = part;
555
556 secure_domain = of_property_read_bool(node, "qcom,secure-domain");
557 l.is_secure = (secure_domain) ? MSM_IOMMU_DOMAIN_SECURE : 0;
558
559 l2_redirect = of_property_read_bool(node, "qcom,l2-redirect");
560 l.domain_flags = (l2_redirect) ? MSM_IOMMU_DOMAIN_PT_CACHEABLE : 0;
561
562 domain_no = msm_register_domain(&l);
563 if (domain_no >= 0)
564 domain = msm_get_iommu_domain(domain_no);
565 else
566 ret_val = domain_no;
567
568 iommu_group_set_iommudata(group, domain, NULL);
569
570free_mem:
571 kfree(part);
572out:
573 return ret_val;
574}
575
576static int iommu_domain_parse_dt(const struct device_node *dt_node)
577{
578 struct device_node *node;
579 int sz;
580 unsigned int num_contexts;
581 int ret_val = 0;
582 struct iommu_group *group = 0;
583 const char *name;
584
585 for_each_child_of_node(dt_node, node) {
586 group = iommu_group_alloc();
587 if (IS_ERR(group)) {
588 ret_val = PTR_ERR(group);
589 goto out;
590 }
591 if (of_property_read_string(node, "label", &name)) {
592 ret_val = -EINVAL;
593 goto free_group;
594 }
595 iommu_group_set_name(group, name);
596
597 if (!of_get_property(node, "qcom,iommu-contexts", &sz)) {
598 pr_err("Could not find qcom,iommu-contexts property\n");
599 ret_val = -EINVAL;
600 goto free_group;
601 }
602 num_contexts = sz / sizeof(unsigned int);
603
604 ret_val = find_and_add_contexts(group, node, num_contexts);
605 if (ret_val) {
606 ret_val = -EINVAL;
607 goto free_group;
608 }
Olav Haugana9820ce2013-03-22 14:50:28 -0700609 ret_val = create_and_add_domain(group, node, name);
Olav Haugan35deadc2012-12-10 18:28:27 -0800610 if (ret_val) {
611 ret_val = -EINVAL;
612 goto free_group;
613 }
614 }
615free_group:
616 /* No iommu_group_free() function */
617out:
618 return ret_val;
619}
620
621static int iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700622{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700623 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700624 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625
Chintan Pandyace27e562012-07-06 23:07:57 +0530626 if (!msm_use_iommu())
627 return -ENODEV;
628
Olav Haugan35deadc2012-12-10 18:28:27 -0800629 if (pdev->dev.of_node)
630 return iommu_domain_parse_dt(pdev->dev.of_node);
631 else if (!p)
Laura Abbott0577d7b2012-04-17 11:14:30 -0700632 return -ENODEV;
633
Laura Abbottd01221b2012-05-16 17:52:49 -0700634 for (i = 0; i < p->ndomains; i++) {
635 struct msm_iova_layout l;
636 struct msm_iova_partition *part;
637 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700638
Laura Abbottd01221b2012-05-16 17:52:49 -0700639 domains = p->domains;
640 l.npartitions = domains[i].npools;
641 part = kmalloc(
642 sizeof(struct msm_iova_partition) * l.npartitions,
643 GFP_KERNEL);
644
645 if (!part) {
646 pr_info("%s: could not allocate space for domain %d",
647 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700648 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700650
651 for (j = 0; j < l.npartitions; j++) {
652 part[j].start = p->domains[i].iova_pools[j].paddr;
653 part[j].size = p->domains[i].iova_pools[j].size;
654 }
655
656 l.partitions = part;
657
658 msm_register_domain(&l);
659
660 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 }
662
Laura Abbott0577d7b2012-04-17 11:14:30 -0700663 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700664 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700665 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700666 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700667
668 if (!ctx)
669 continue;
670
Laura Abbottd01221b2012-05-16 17:52:49 -0700671 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672
Laura Abbottd01221b2012-05-16 17:52:49 -0700673 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 continue;
675
Laura Abbottd01221b2012-05-16 17:52:49 -0700676 if (iommu_attach_device(domain, ctx)) {
Olav Haugan35deadc2012-12-10 18:28:27 -0800677 WARN(1, "%s: could not attach domain %p to context %s. iommu programming will not occur.\n",
678 __func__, domain, p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679 continue;
680 }
681 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682 return 0;
683}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700684
Olav Haugan35deadc2012-12-10 18:28:27 -0800685static int __devexit iommu_domain_exit(struct platform_device *pdev)
686{
687 return 0;
688}
689
690static struct of_device_id msm_iommu_domain_match_table[] = {
691 { .name = "qcom,iommu-domains", },
692 {}
693};
694
Laura Abbott0577d7b2012-04-17 11:14:30 -0700695static struct platform_driver iommu_domain_driver = {
696 .driver = {
697 .name = "iommu_domains",
Olav Haugan35deadc2012-12-10 18:28:27 -0800698 .of_match_table = msm_iommu_domain_match_table,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700699 .owner = THIS_MODULE
700 },
Olav Haugan35deadc2012-12-10 18:28:27 -0800701 .probe = iommu_domain_probe,
702 .remove = __devexit_p(iommu_domain_exit),
Laura Abbott0577d7b2012-04-17 11:14:30 -0700703};
704
705static int __init msm_subsystem_iommu_init(void)
706{
Olav Haugan35deadc2012-12-10 18:28:27 -0800707 int ret;
708 ret = platform_driver_register(&iommu_domain_driver);
709 if (ret != 0)
710 pr_err("Failed to register IOMMU domain driver\n");
711 return ret;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700712}
Olav Haugan35deadc2012-12-10 18:28:27 -0800713
714static void __exit msm_subsystem_iommu_exit(void)
715{
716 platform_driver_unregister(&iommu_domain_driver);
717}
718
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719device_initcall(msm_subsystem_iommu_init);
Olav Haugan35deadc2012-12-10 18:28:27 -0800720module_exit(msm_subsystem_iommu_exit);
721