blob: 20a52493f31cbe59427f46b87dd867e264aa9b79 [file] [log] [blame]
Shalaj Jain47969302013-02-09 17:28:17 -08001/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070018#include <linux/rbtree.h>
19#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070020#include <linux/vmalloc.h>
Olav Haugand77d12c2013-03-05 13:17:30 -080021#include <linux/of.h>
22#include <linux/of_address.h>
23#include <linux/of_device.h>
Olav Hauganffa22b82013-04-26 13:47:11 -070024#include <linux/idr.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include <asm/sizes.h>
26#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027#include <mach/iommu.h>
28#include <mach/iommu_domains.h>
Olav Haugana9820ce2013-03-22 14:50:28 -070029#include <mach/msm_iommu_priv.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070030#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
Laura Abbottd01221b2012-05-16 17:52:49 -070032struct msm_iova_data {
33 struct rb_node node;
34 struct mem_pool *pools;
35 int npools;
36 struct iommu_domain *domain;
37 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038};
39
Olav Hauganffa22b82013-04-26 13:47:11 -070040struct msm_iommu_data_entry {
41 struct list_head list;
42 void *data;
43};
44
Laura Abbottd01221b2012-05-16 17:52:49 -070045static struct rb_root domain_root;
46DEFINE_MUTEX(domain_mutex);
Olav Hauganffa22b82013-04-26 13:47:11 -070047static DEFINE_IDA(domain_nums);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048
Olav Haugana9820ce2013-03-22 14:50:28 -070049void msm_iommu_set_client_name(struct iommu_domain *domain, char const *name)
50{
51 struct msm_iommu_priv *priv = domain->priv;
52 priv->client_name = name;
53}
54
Laura Abbott2030c1b2012-07-18 06:38:00 -070055int msm_use_iommu()
56{
57 return iommu_present(&platform_bus_type);
58}
59
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080060bool msm_iommu_page_size_is_supported(unsigned long page_size)
61{
62 return page_size == SZ_4K
63 || page_size == SZ_64K
64 || page_size == SZ_1M
65 || page_size == SZ_16M;
66}
67
Laura Abbotte956cce2011-10-25 13:33:20 -070068int msm_iommu_map_extra(struct iommu_domain *domain,
69 unsigned long start_iova,
Laura Abbott3cfa2e62013-03-13 17:48:26 -070070 phys_addr_t phy_addr,
Laura Abbotte956cce2011-10-25 13:33:20 -070071 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070072 unsigned long page_size,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080073 int prot)
Laura Abbotte956cce2011-10-25 13:33:20 -070074{
Olav Haugan5e7befd2012-06-19 14:59:37 -070075 int ret = 0;
76 int i = 0;
Olav Haugan8726caf2012-05-10 15:11:35 -070077 unsigned long temp_iova = start_iova;
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080078 /* the extra "padding" should never be written to. map it
79 * read-only. */
80 prot &= ~IOMMU_WRITE;
81
82 if (msm_iommu_page_size_is_supported(page_size)) {
Olav Haugan5e7befd2012-06-19 14:59:37 -070083 struct scatterlist *sglist;
84 unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
85 struct page *dummy_page = phys_to_page(phy_addr);
Laura Abbotte956cce2011-10-25 13:33:20 -070086
Olav Haugan5e7befd2012-06-19 14:59:37 -070087 sglist = vmalloc(sizeof(*sglist) * nrpages);
88 if (!sglist) {
89 ret = -ENOMEM;
Olav Haugan8726caf2012-05-10 15:11:35 -070090 goto out;
91 }
Olav Haugan5e7befd2012-06-19 14:59:37 -070092
93 sg_init_table(sglist, nrpages);
94
95 for (i = 0; i < nrpages; i++)
96 sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
97
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -080098 ret = iommu_map_range(domain, temp_iova, sglist, size, prot);
Olav Haugan5e7befd2012-06-19 14:59:37 -070099 if (ret) {
100 pr_err("%s: could not map extra %lx in domain %p\n",
101 __func__, start_iova, domain);
102 }
103
104 vfree(sglist);
105 } else {
106 unsigned long order = get_order(page_size);
107 unsigned long aligned_size = ALIGN(size, page_size);
108 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
109
110 for (i = 0; i < nrpages; i++) {
111 ret = iommu_map(domain, temp_iova, phy_addr, page_size,
Mitchel Humpherysaf3b5222013-01-15 15:38:52 -0800112 prot);
Olav Haugan5e7befd2012-06-19 14:59:37 -0700113 if (ret) {
114 pr_err("%s: could not map %lx in domain %p, error: %d\n",
115 __func__, start_iova, domain, ret);
116 ret = -EAGAIN;
117 goto out;
118 }
119 temp_iova += page_size;
120 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700121 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700122 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700123out:
124 for (; i > 0; --i) {
125 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700126 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -0700127 }
Olav Haugan5e7befd2012-06-19 14:59:37 -0700128 return ret;
Olav Haugan8726caf2012-05-10 15:11:35 -0700129}
Laura Abbotte956cce2011-10-25 13:33:20 -0700130
Olav Haugan8726caf2012-05-10 15:11:35 -0700131void msm_iommu_unmap_extra(struct iommu_domain *domain,
132 unsigned long start_iova,
133 unsigned long size,
134 unsigned long page_size)
135{
136 int i;
137 unsigned long order = get_order(page_size);
138 unsigned long aligned_size = ALIGN(size, page_size);
139 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
140 unsigned long temp_iova = start_iova;
141
142 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700143 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -0700144 temp_iova += page_size;
145 }
Laura Abbotte956cce2011-10-25 13:33:20 -0700146}
147
Laura Abbottd027fdb2012-04-17 16:22:24 -0700148static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
149 unsigned long iova,
Laura Abbott3cfa2e62013-03-13 17:48:26 -0700150 phys_addr_t phys,
Laura Abbottd027fdb2012-04-17 16:22:24 -0700151 unsigned long size,
152 int cached)
153{
154 int ret;
155 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700156 int prot = IOMMU_WRITE | IOMMU_READ;
157 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700158
159 sglist = vmalloc(sizeof(*sglist));
160 if (!sglist) {
161 ret = -ENOMEM;
162 goto err1;
163 }
164
165 sg_init_table(sglist, 1);
166 sglist->length = size;
167 sglist->offset = 0;
168 sglist->dma_address = phys;
169
Laura Abbotte543cfc2012-06-07 17:51:53 -0700170 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700171 if (ret) {
172 pr_err("%s: could not map extra %lx in domain %p\n",
173 __func__, iova, domain);
174 }
175
176 vfree(sglist);
177err1:
178 return ret;
179
180}
181
Laura Abbott3cfa2e62013-03-13 17:48:26 -0700182int msm_iommu_map_contig_buffer(phys_addr_t phys,
Laura Abbottd027fdb2012-04-17 16:22:24 -0700183 unsigned int domain_no,
184 unsigned int partition_no,
185 unsigned long size,
186 unsigned long align,
187 unsigned long cached,
188 unsigned long *iova_val)
189{
190 unsigned long iova;
191 int ret;
Olav Haugan81dcedc2013-06-05 14:14:25 -0700192 struct iommu_domain *domain;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700193
194 if (size & (align - 1))
195 return -EINVAL;
196
Laura Abbott2030c1b2012-07-18 06:38:00 -0700197 if (!msm_use_iommu()) {
198 *iova_val = phys;
199 return 0;
200 }
201
Laura Abbottd01221b2012-05-16 17:52:49 -0700202 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
203 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700204
Laura Abbottd01221b2012-05-16 17:52:49 -0700205 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700206 return -ENOMEM;
207
Olav Haugan81dcedc2013-06-05 14:14:25 -0700208 domain = msm_get_iommu_domain(domain_no);
209 if (!domain) {
210 pr_err("%s: Could not find domain %u. Unable to map\n",
211 __func__, domain_no);
212 msm_free_iova_address(iova, domain_no, partition_no, size);
213 return -EINVAL;
214 }
215 ret = msm_iommu_map_iova_phys(domain, iova, phys, size, cached);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700216
217 if (ret)
218 msm_free_iova_address(iova, domain_no, partition_no, size);
219 else
220 *iova_val = iova;
221
222 return ret;
223}
Laura Abbott33b30be2012-07-09 09:48:54 -0700224EXPORT_SYMBOL(msm_iommu_map_contig_buffer);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700225
226void msm_iommu_unmap_contig_buffer(unsigned long iova,
227 unsigned int domain_no,
228 unsigned int partition_no,
229 unsigned long size)
230{
Olav Haugan81dcedc2013-06-05 14:14:25 -0700231 struct iommu_domain *domain;
232
Laura Abbott2030c1b2012-07-18 06:38:00 -0700233 if (!msm_use_iommu())
234 return;
235
Olav Haugan81dcedc2013-06-05 14:14:25 -0700236 domain = msm_get_iommu_domain(domain_no);
237 if (domain) {
238 iommu_unmap_range(domain, iova, size);
239 } else {
240 pr_err("%s: Could not find domain %u. Unable to unmap\n",
241 __func__, domain_no);
242 }
Laura Abbottd027fdb2012-04-17 16:22:24 -0700243 msm_free_iova_address(iova, domain_no, partition_no, size);
244}
Laura Abbott33b30be2012-07-09 09:48:54 -0700245EXPORT_SYMBOL(msm_iommu_unmap_contig_buffer);
Laura Abbotte956cce2011-10-25 13:33:20 -0700246
Laura Abbottd01221b2012-05-16 17:52:49 -0700247static struct msm_iova_data *find_domain(int domain_num)
248{
249 struct rb_root *root = &domain_root;
Olav Haugan4be5b292013-06-05 14:13:40 -0700250 struct rb_node *p;
Laura Abbottd01221b2012-05-16 17:52:49 -0700251
252 mutex_lock(&domain_mutex);
Olav Haugan4be5b292013-06-05 14:13:40 -0700253 p = root->rb_node;
Laura Abbottd01221b2012-05-16 17:52:49 -0700254 while (p) {
255 struct msm_iova_data *node;
256
257 node = rb_entry(p, struct msm_iova_data, node);
258 if (domain_num < node->domain_num)
259 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700260 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700261 p = p->rb_right;
262 else {
263 mutex_unlock(&domain_mutex);
264 return node;
265 }
266 }
267 mutex_unlock(&domain_mutex);
268 return NULL;
269}
270
271static int add_domain(struct msm_iova_data *node)
272{
273 struct rb_root *root = &domain_root;
274 struct rb_node **p = &root->rb_node;
275 struct rb_node *parent = NULL;
276
277 mutex_lock(&domain_mutex);
278 while (*p) {
279 struct msm_iova_data *tmp;
280 parent = *p;
281
282 tmp = rb_entry(parent, struct msm_iova_data, node);
283
284 if (node->domain_num < tmp->domain_num)
285 p = &(*p)->rb_left;
286 else if (node->domain_num > tmp->domain_num)
287 p = &(*p)->rb_right;
288 else
289 BUG();
290 }
291 rb_link_node(&node->node, parent, p);
292 rb_insert_color(&node->node, root);
293 mutex_unlock(&domain_mutex);
294 return 0;
295}
296
Olav Hauganffa22b82013-04-26 13:47:11 -0700297static int remove_domain(struct iommu_domain *domain)
298{
299 struct rb_root *root = &domain_root;
300 struct rb_node *n;
301 struct msm_iova_data *node;
302 int ret = -EINVAL;
303
304 mutex_lock(&domain_mutex);
305
306 for (n = rb_first(root); n; n = rb_next(n)) {
307 node = rb_entry(n, struct msm_iova_data, node);
308 if (node->domain == domain) {
309 rb_erase(&node->node, &domain_root);
310 ret = 0;
311 break;
312 }
313 }
314 mutex_unlock(&domain_mutex);
315 return ret;
316}
317
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700318struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319{
Laura Abbottd01221b2012-05-16 17:52:49 -0700320 struct msm_iova_data *data;
321
322 data = find_domain(domain_num);
323
324 if (data)
325 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700326 else
327 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328}
Laura Abbottb1d68872012-10-17 10:50:39 -0700329EXPORT_SYMBOL(msm_get_iommu_domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330
Olav Haugan35deadc2012-12-10 18:28:27 -0800331int msm_find_domain_no(const struct iommu_domain *domain)
332{
333 struct rb_root *root = &domain_root;
334 struct rb_node *n;
335 struct msm_iova_data *node;
336 int domain_num = -EINVAL;
337
338 mutex_lock(&domain_mutex);
339
340 for (n = rb_first(root); n; n = rb_next(n)) {
341 node = rb_entry(n, struct msm_iova_data, node);
342 if (node->domain == domain) {
343 domain_num = node->domain_num;
344 break;
345 }
346 }
347 mutex_unlock(&domain_mutex);
348 return domain_num;
349}
350EXPORT_SYMBOL(msm_find_domain_no);
351
Olav Hauganffa22b82013-04-26 13:47:11 -0700352static struct msm_iova_data *msm_domain_to_iova_data(struct iommu_domain
353 const *domain)
354{
355 struct rb_root *root = &domain_root;
356 struct rb_node *n;
357 struct msm_iova_data *node;
358 struct msm_iova_data *iova_data = ERR_PTR(-EINVAL);
359
360 mutex_lock(&domain_mutex);
361
362 for (n = rb_first(root); n; n = rb_next(n)) {
363 node = rb_entry(n, struct msm_iova_data, node);
364 if (node->domain == domain) {
365 iova_data = node;
366 break;
367 }
368 }
369 mutex_unlock(&domain_mutex);
370 return iova_data;
371}
372
Laura Abbottd01221b2012-05-16 17:52:49 -0700373int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700374 unsigned int partition_no,
375 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700376 unsigned long align,
377 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700378{
Laura Abbottd01221b2012-05-16 17:52:49 -0700379 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700380 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700381 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700382
Laura Abbottd01221b2012-05-16 17:52:49 -0700383 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700384
Laura Abbottd01221b2012-05-16 17:52:49 -0700385 if (!data)
386 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700387
Laura Abbottd01221b2012-05-16 17:52:49 -0700388 if (partition_no >= data->npools)
389 return -EINVAL;
390
391 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700392
393 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700394 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700395
Laura Abbott594e9cd2013-04-27 18:27:38 -0700396 mutex_lock(&pool->pool_mutex);
Laura Abbottd01221b2012-05-16 17:52:49 -0700397 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
Laura Abbott594e9cd2013-04-27 18:27:38 -0700398 mutex_unlock(&pool->pool_mutex);
Laura Abbottd01221b2012-05-16 17:52:49 -0700399 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700400 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700401 /* Offset because genpool can't handle 0 addresses */
402 if (pool->paddr == 0)
403 va -= SZ_4K;
404 *iova = va;
405 return 0;
406 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700407
Laura Abbottd01221b2012-05-16 17:52:49 -0700408 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700409}
410
411void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700412 unsigned int iommu_domain,
413 unsigned int partition_no,
414 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700415{
Laura Abbottd01221b2012-05-16 17:52:49 -0700416 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700417 struct mem_pool *pool;
418
Laura Abbottd01221b2012-05-16 17:52:49 -0700419 data = find_domain(iommu_domain);
420
421 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700422 WARN(1, "Invalid domain %d\n", iommu_domain);
423 return;
424 }
425
Laura Abbottd01221b2012-05-16 17:52:49 -0700426 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700427 WARN(1, "Invalid partition %d for domain %d\n",
428 partition_no, iommu_domain);
429 return;
430 }
431
Laura Abbottd01221b2012-05-16 17:52:49 -0700432 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700433
434 if (!pool)
435 return;
436
437 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700438
439 /* Offset because genpool can't handle 0 addresses */
440 if (pool->paddr == 0)
441 iova += SZ_4K;
442
Laura Abbott594e9cd2013-04-27 18:27:38 -0700443 mutex_lock(&pool->pool_mutex);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700444 gen_pool_free(pool->gpool, iova, size);
Laura Abbott594e9cd2013-04-27 18:27:38 -0700445 mutex_unlock(&pool->pool_mutex);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700446}
447
Laura Abbottd01221b2012-05-16 17:52:49 -0700448int msm_register_domain(struct msm_iova_layout *layout)
449{
450 int i;
451 struct msm_iova_data *data;
452 struct mem_pool *pools;
Laura Abbott6a6ca552012-08-30 11:26:31 -0700453 struct bus_type *bus;
Laura Abbottd01221b2012-05-16 17:52:49 -0700454
455 if (!layout)
456 return -EINVAL;
457
458 data = kmalloc(sizeof(*data), GFP_KERNEL);
459
460 if (!data)
461 return -ENOMEM;
462
Olav Hauganffa22b82013-04-26 13:47:11 -0700463 pools = kzalloc(sizeof(struct mem_pool) * layout->npartitions,
Laura Abbottd01221b2012-05-16 17:52:49 -0700464 GFP_KERNEL);
465
466 if (!pools)
Olav Hauganffa22b82013-04-26 13:47:11 -0700467 goto free_data;
Laura Abbottd01221b2012-05-16 17:52:49 -0700468
469 for (i = 0; i < layout->npartitions; i++) {
470 if (layout->partitions[i].size == 0)
471 continue;
472
473 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
474
475 if (!pools[i].gpool)
476 continue;
477
478 pools[i].paddr = layout->partitions[i].start;
479 pools[i].size = layout->partitions[i].size;
Laura Abbott594e9cd2013-04-27 18:27:38 -0700480 mutex_init(&pools[i].pool_mutex);
Laura Abbottd01221b2012-05-16 17:52:49 -0700481
482 /*
483 * genalloc can't handle a pool starting at address 0.
484 * For now, solve this problem by offsetting the value
485 * put in by 4k.
486 * gen pool address = actual address + 4k
487 */
488 if (pools[i].paddr == 0)
489 layout->partitions[i].start += SZ_4K;
490
491 if (gen_pool_add(pools[i].gpool,
492 layout->partitions[i].start,
493 layout->partitions[i].size, -1)) {
494 gen_pool_destroy(pools[i].gpool);
495 pools[i].gpool = NULL;
496 continue;
497 }
498 }
499
Laura Abbott6a6ca552012-08-30 11:26:31 -0700500 bus = layout->is_secure == MSM_IOMMU_DOMAIN_SECURE ?
501 &msm_iommu_sec_bus_type :
502 &platform_bus_type;
503
Laura Abbottd01221b2012-05-16 17:52:49 -0700504 data->pools = pools;
505 data->npools = layout->npartitions;
Olav Hauganffa22b82013-04-26 13:47:11 -0700506 data->domain_num = ida_simple_get(&domain_nums, 0, 0, GFP_KERNEL);
507 if (data->domain_num < 0)
508 goto free_pools;
509
Laura Abbott6a6ca552012-08-30 11:26:31 -0700510 data->domain = iommu_domain_alloc(bus, layout->domain_flags);
Olav Haugane7837212013-03-22 14:49:47 -0700511 if (!data->domain)
Olav Hauganffa22b82013-04-26 13:47:11 -0700512 goto free_domain_num;
Laura Abbottd01221b2012-05-16 17:52:49 -0700513
Olav Haugana9820ce2013-03-22 14:50:28 -0700514 msm_iommu_set_client_name(data->domain, layout->client_name);
515
Laura Abbottd01221b2012-05-16 17:52:49 -0700516 add_domain(data);
517
518 return data->domain_num;
519
Olav Hauganffa22b82013-04-26 13:47:11 -0700520free_domain_num:
521 ida_simple_remove(&domain_nums, data->domain_num);
522
523free_pools:
524 for (i = 0; i < layout->npartitions; i++) {
525 if (pools[i].gpool)
526 gen_pool_destroy(pools[i].gpool);
527 }
528 kfree(pools);
529free_data:
Laura Abbottd01221b2012-05-16 17:52:49 -0700530 kfree(data);
531
532 return -EINVAL;
533}
Laura Abbott33b30be2012-07-09 09:48:54 -0700534EXPORT_SYMBOL(msm_register_domain);
Laura Abbottd01221b2012-05-16 17:52:49 -0700535
Olav Hauganffa22b82013-04-26 13:47:11 -0700536int msm_unregister_domain(struct iommu_domain *domain)
537{
538 unsigned int i;
539 struct msm_iova_data *data = msm_domain_to_iova_data(domain);
540
541 if (IS_ERR_OR_NULL(data)) {
542 pr_err("%s: Could not find iova_data\n", __func__);
543 return -EINVAL;
544 }
545
546 if (remove_domain(data->domain)) {
547 pr_err("%s: Domain not found. Failed to remove domain\n",
548 __func__);
549 }
550
551 iommu_domain_free(domain);
552
553 ida_simple_remove(&domain_nums, data->domain_num);
554
555 for (i = 0; i < data->npools; ++i)
556 gen_pool_destroy(data->pools[i].gpool);
557
558 kfree(data->pools);
559 kfree(data);
560 return 0;
561}
562EXPORT_SYMBOL(msm_unregister_domain);
563
Olav Haugan35deadc2012-12-10 18:28:27 -0800564static int find_and_add_contexts(struct iommu_group *group,
565 const struct device_node *node,
566 unsigned int num_contexts)
567{
568 unsigned int i;
569 struct device *ctx;
570 const char *name;
571 struct device_node *ctx_node;
572 int ret_val = 0;
573
574 for (i = 0; i < num_contexts; ++i) {
575 ctx_node = of_parse_phandle((struct device_node *) node,
576 "qcom,iommu-contexts", i);
577 if (!ctx_node) {
578 pr_err("Unable to parse phandle #%u\n", i);
579 ret_val = -EINVAL;
580 goto out;
581 }
582 if (of_property_read_string(ctx_node, "label", &name)) {
583 pr_err("Could not find label property\n");
584 ret_val = -EINVAL;
585 goto out;
586 }
587 ctx = msm_iommu_get_ctx(name);
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700588 if (IS_ERR(ctx)) {
589 ret_val = PTR_ERR(ctx);
Olav Haugan35deadc2012-12-10 18:28:27 -0800590 goto out;
591 }
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700592
593 ret_val = iommu_group_add_device(group, ctx);
594 if (ret_val)
595 goto out;
Olav Haugan35deadc2012-12-10 18:28:27 -0800596 }
597out:
598 return ret_val;
599}
600
601static int create_and_add_domain(struct iommu_group *group,
Olav Haugana9820ce2013-03-22 14:50:28 -0700602 struct device_node const *node,
603 char const *name)
Olav Haugan35deadc2012-12-10 18:28:27 -0800604{
605 unsigned int ret_val = 0;
Shalaj Jain47969302013-02-09 17:28:17 -0800606 unsigned int i, j;
Olav Haugan35deadc2012-12-10 18:28:27 -0800607 struct msm_iova_layout l;
608 struct msm_iova_partition *part = 0;
609 struct iommu_domain *domain = 0;
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700610 unsigned int *addr_array = 0;
Olav Haugan35deadc2012-12-10 18:28:27 -0800611 unsigned int array_size;
612 int domain_no;
613 int secure_domain;
614 int l2_redirect;
615
616 if (of_get_property(node, "qcom,virtual-addr-pool", &array_size)) {
617 l.npartitions = array_size / sizeof(unsigned int) / 2;
618 part = kmalloc(
619 sizeof(struct msm_iova_partition) * l.npartitions,
620 GFP_KERNEL);
621 if (!part) {
622 pr_err("%s: could not allocate space for partition",
623 __func__);
624 ret_val = -ENOMEM;
625 goto out;
626 }
627 addr_array = kmalloc(array_size, GFP_KERNEL);
628 if (!addr_array) {
629 pr_err("%s: could not allocate space for partition",
630 __func__);
631 ret_val = -ENOMEM;
632 goto free_mem;
633 }
634
635 ret_val = of_property_read_u32_array(node,
636 "qcom,virtual-addr-pool",
637 addr_array,
638 array_size/sizeof(unsigned int));
639 if (ret_val) {
640 ret_val = -EINVAL;
641 goto free_mem;
642 }
643
Shalaj Jain47969302013-02-09 17:28:17 -0800644 for (i = 0, j = 0; j < l.npartitions * 2; i++, j += 2) {
645 part[i].start = addr_array[j];
646 part[i].size = addr_array[j+1];
Olav Haugan35deadc2012-12-10 18:28:27 -0800647 }
648 } else {
649 l.npartitions = 1;
650 part = kmalloc(
651 sizeof(struct msm_iova_partition) * l.npartitions,
652 GFP_KERNEL);
653 if (!part) {
654 pr_err("%s: could not allocate space for partition",
655 __func__);
656 ret_val = -ENOMEM;
657 goto out;
658 }
659 part[0].start = 0x0;
660 part[0].size = 0xFFFFFFFF;
661 }
662
Olav Haugana9820ce2013-03-22 14:50:28 -0700663 l.client_name = name;
Olav Haugan35deadc2012-12-10 18:28:27 -0800664 l.partitions = part;
665
666 secure_domain = of_property_read_bool(node, "qcom,secure-domain");
667 l.is_secure = (secure_domain) ? MSM_IOMMU_DOMAIN_SECURE : 0;
668
669 l2_redirect = of_property_read_bool(node, "qcom,l2-redirect");
670 l.domain_flags = (l2_redirect) ? MSM_IOMMU_DOMAIN_PT_CACHEABLE : 0;
671
672 domain_no = msm_register_domain(&l);
673 if (domain_no >= 0)
674 domain = msm_get_iommu_domain(domain_no);
675 else
676 ret_val = domain_no;
677
678 iommu_group_set_iommudata(group, domain, NULL);
679
680free_mem:
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700681 kfree(addr_array);
Olav Haugan35deadc2012-12-10 18:28:27 -0800682 kfree(part);
683out:
684 return ret_val;
685}
686
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700687static int __msm_group_get_domain(struct device *dev, void *data)
688{
689 struct msm_iommu_data_entry *list_entry;
690 struct list_head *dev_list = data;
691 int ret_val = 0;
692
693 list_entry = kmalloc(sizeof(*list_entry), GFP_KERNEL);
694 if (list_entry) {
695 list_entry->data = dev;
696 list_add(&list_entry->list, dev_list);
697 } else {
698 ret_val = -ENOMEM;
699 }
700
701 return ret_val;
702}
703
704static void __msm_iommu_group_remove_device(struct iommu_group *grp)
705{
706 struct msm_iommu_data_entry *tmp;
707 struct msm_iommu_data_entry *list_entry;
708 struct list_head dev_list;
709
710 INIT_LIST_HEAD(&dev_list);
711 iommu_group_for_each_dev(grp, &dev_list, __msm_group_get_domain);
712
713 list_for_each_entry_safe(list_entry, tmp, &dev_list, list) {
714 iommu_group_remove_device(list_entry->data);
715 list_del(&list_entry->list);
716 kfree(list_entry);
717 }
718}
719
720
Olav Haugan35deadc2012-12-10 18:28:27 -0800721static int iommu_domain_parse_dt(const struct device_node *dt_node)
722{
723 struct device_node *node;
724 int sz;
725 unsigned int num_contexts;
726 int ret_val = 0;
727 struct iommu_group *group = 0;
728 const char *name;
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700729 struct msm_iommu_data_entry *grp_list_entry;
730 struct msm_iommu_data_entry *tmp;
731 struct list_head iommu_group_list;
732 INIT_LIST_HEAD(&iommu_group_list);
Olav Haugan35deadc2012-12-10 18:28:27 -0800733
734 for_each_child_of_node(dt_node, node) {
735 group = iommu_group_alloc();
736 if (IS_ERR(group)) {
737 ret_val = PTR_ERR(group);
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700738 group = 0;
739 goto free_group;
Olav Haugan35deadc2012-12-10 18:28:27 -0800740 }
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700741
742 /* This is only needed to clean up memory if something fails */
743 grp_list_entry = kmalloc(sizeof(*grp_list_entry),
744 GFP_KERNEL);
745 if (grp_list_entry) {
746 grp_list_entry->data = group;
747 list_add(&grp_list_entry->list, &iommu_group_list);
748 } else {
749 ret_val = -ENOMEM;
750 goto free_group;
751 }
752
Olav Haugan35deadc2012-12-10 18:28:27 -0800753 if (of_property_read_string(node, "label", &name)) {
754 ret_val = -EINVAL;
755 goto free_group;
756 }
757 iommu_group_set_name(group, name);
758
759 if (!of_get_property(node, "qcom,iommu-contexts", &sz)) {
760 pr_err("Could not find qcom,iommu-contexts property\n");
761 ret_val = -EINVAL;
762 goto free_group;
763 }
764 num_contexts = sz / sizeof(unsigned int);
765
766 ret_val = find_and_add_contexts(group, node, num_contexts);
767 if (ret_val) {
Olav Haugan35deadc2012-12-10 18:28:27 -0800768 goto free_group;
769 }
Olav Haugana9820ce2013-03-22 14:50:28 -0700770 ret_val = create_and_add_domain(group, node, name);
Olav Haugan35deadc2012-12-10 18:28:27 -0800771 if (ret_val) {
772 ret_val = -EINVAL;
773 goto free_group;
774 }
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700775
776 /* Remove reference to the group that is taken when the group
777 * is allocated. This will ensure that when all the devices in
778 * the group are removed the group will be released.
779 */
780 iommu_group_put(group);
Olav Haugan35deadc2012-12-10 18:28:27 -0800781 }
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700782
783 list_for_each_entry_safe(grp_list_entry, tmp, &iommu_group_list, list) {
784 list_del(&grp_list_entry->list);
785 kfree(grp_list_entry);
786 }
787 goto out;
788
Olav Haugan35deadc2012-12-10 18:28:27 -0800789free_group:
Olav Haugan08fb8ac2013-05-15 15:30:07 -0700790 list_for_each_entry_safe(grp_list_entry, tmp, &iommu_group_list, list) {
791 struct iommu_domain *d;
792
793 d = iommu_group_get_iommudata(grp_list_entry->data);
794 if (d)
795 msm_unregister_domain(d);
796
797 __msm_iommu_group_remove_device(grp_list_entry->data);
798 list_del(&grp_list_entry->list);
799 kfree(grp_list_entry);
800 }
801 iommu_group_put(group);
Olav Haugan35deadc2012-12-10 18:28:27 -0800802out:
803 return ret_val;
804}
805
806static int iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700808 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700809 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700810
Chintan Pandyace27e562012-07-06 23:07:57 +0530811 if (!msm_use_iommu())
812 return -ENODEV;
813
Olav Haugan35deadc2012-12-10 18:28:27 -0800814 if (pdev->dev.of_node)
815 return iommu_domain_parse_dt(pdev->dev.of_node);
816 else if (!p)
Laura Abbott0577d7b2012-04-17 11:14:30 -0700817 return -ENODEV;
818
Laura Abbottd01221b2012-05-16 17:52:49 -0700819 for (i = 0; i < p->ndomains; i++) {
820 struct msm_iova_layout l;
821 struct msm_iova_partition *part;
822 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700823
Laura Abbottd01221b2012-05-16 17:52:49 -0700824 domains = p->domains;
825 l.npartitions = domains[i].npools;
826 part = kmalloc(
827 sizeof(struct msm_iova_partition) * l.npartitions,
828 GFP_KERNEL);
829
830 if (!part) {
831 pr_info("%s: could not allocate space for domain %d",
832 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700834 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700835
836 for (j = 0; j < l.npartitions; j++) {
837 part[j].start = p->domains[i].iova_pools[j].paddr;
838 part[j].size = p->domains[i].iova_pools[j].size;
839 }
840
841 l.partitions = part;
842
843 msm_register_domain(&l);
844
845 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 }
847
Laura Abbott0577d7b2012-04-17 11:14:30 -0700848 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700849 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700850 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700851 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700852
853 if (!ctx)
854 continue;
855
Laura Abbottd01221b2012-05-16 17:52:49 -0700856 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857
Laura Abbottd01221b2012-05-16 17:52:49 -0700858 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 continue;
860
Laura Abbottd01221b2012-05-16 17:52:49 -0700861 if (iommu_attach_device(domain, ctx)) {
Olav Haugan35deadc2012-12-10 18:28:27 -0800862 WARN(1, "%s: could not attach domain %p to context %s. iommu programming will not occur.\n",
863 __func__, domain, p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700864 continue;
865 }
866 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700867 return 0;
868}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700869
Olav Haugan35deadc2012-12-10 18:28:27 -0800870static int __devexit iommu_domain_exit(struct platform_device *pdev)
871{
872 return 0;
873}
874
875static struct of_device_id msm_iommu_domain_match_table[] = {
876 { .name = "qcom,iommu-domains", },
877 {}
878};
879
Laura Abbott0577d7b2012-04-17 11:14:30 -0700880static struct platform_driver iommu_domain_driver = {
881 .driver = {
882 .name = "iommu_domains",
Olav Haugan35deadc2012-12-10 18:28:27 -0800883 .of_match_table = msm_iommu_domain_match_table,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700884 .owner = THIS_MODULE
885 },
Olav Haugan35deadc2012-12-10 18:28:27 -0800886 .probe = iommu_domain_probe,
887 .remove = __devexit_p(iommu_domain_exit),
Laura Abbott0577d7b2012-04-17 11:14:30 -0700888};
889
890static int __init msm_subsystem_iommu_init(void)
891{
Olav Haugan35deadc2012-12-10 18:28:27 -0800892 int ret;
893 ret = platform_driver_register(&iommu_domain_driver);
894 if (ret != 0)
895 pr_err("Failed to register IOMMU domain driver\n");
896 return ret;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700897}
Olav Haugan35deadc2012-12-10 18:28:27 -0800898
899static void __exit msm_subsystem_iommu_exit(void)
900{
901 platform_driver_unregister(&iommu_domain_driver);
902}
903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904device_initcall(msm_subsystem_iommu_init);
Olav Haugan35deadc2012-12-10 18:28:27 -0800905module_exit(msm_subsystem_iommu_exit);
906