blob: a7e06ba015bc31afe529b63eeb74af102b8b77a1 [file] [log] [blame]
Olav Hauganab77b1b2012-02-28 09:19:22 -08001/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/module.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070014#include <linux/init.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070015#include <linux/iommu.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070016#include <linux/memory_alloc.h>
Laura Abbott0577d7b2012-04-17 11:14:30 -070017#include <linux/platform_device.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/vmalloc.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070019#include <linux/rbtree.h>
20#include <linux/slab.h>
Olav Haugan16cdb412012-03-27 13:02:17 -070021#include <linux/vmalloc.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022#include <asm/sizes.h>
23#include <asm/page.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <mach/iommu.h>
25#include <mach/iommu_domains.h>
Laura Abbott9f4a8e62011-08-29 19:08:07 -070026#include <mach/socinfo.h>
Laura Abbottd01221b2012-05-16 17:52:49 -070027#include <mach/msm_subsystem_map.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
Olav Haugan8726caf2012-05-10 15:11:35 -070029/* dummy 64K for overmapping */
30char iommu_dummy[2*SZ_64K-4];
Laura Abbotte956cce2011-10-25 13:33:20 -070031
Laura Abbottd01221b2012-05-16 17:52:49 -070032struct msm_iova_data {
33 struct rb_node node;
34 struct mem_pool *pools;
35 int npools;
36 struct iommu_domain *domain;
37 int domain_num;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038};
39
Laura Abbottd01221b2012-05-16 17:52:49 -070040static struct rb_root domain_root;
41DEFINE_MUTEX(domain_mutex);
42static atomic_t domain_nums = ATOMIC_INIT(-1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
Laura Abbotte956cce2011-10-25 13:33:20 -070044int msm_iommu_map_extra(struct iommu_domain *domain,
45 unsigned long start_iova,
46 unsigned long size,
Olav Haugan8726caf2012-05-10 15:11:35 -070047 unsigned long page_size,
Laura Abbotte956cce2011-10-25 13:33:20 -070048 int cached)
49{
Olav Haugan8726caf2012-05-10 15:11:35 -070050 int i, ret_value = 0;
51 unsigned long order = get_order(page_size);
52 unsigned long aligned_size = ALIGN(size, page_size);
53 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
54 unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
55 unsigned long temp_iova = start_iova;
Laura Abbotte956cce2011-10-25 13:33:20 -070056
Olav Haugan8726caf2012-05-10 15:11:35 -070057 for (i = 0; i < nrpages; i++) {
Steve Mucklef132c6c2012-06-06 18:30:57 -070058 int ret = iommu_map(domain, temp_iova, phy_addr, page_size,
59 cached);
Olav Haugan8726caf2012-05-10 15:11:35 -070060 if (ret) {
61 pr_err("%s: could not map %lx in domain %p, error: %d\n",
62 __func__, start_iova, domain, ret);
63 ret_value = -EAGAIN;
64 goto out;
65 }
66 temp_iova += page_size;
Laura Abbotte956cce2011-10-25 13:33:20 -070067 }
Olav Haugan8726caf2012-05-10 15:11:35 -070068 return ret_value;
69out:
70 for (; i > 0; --i) {
71 temp_iova -= page_size;
Steve Mucklef132c6c2012-06-06 18:30:57 -070072 iommu_unmap(domain, start_iova, page_size);
Olav Haugan16cdb412012-03-27 13:02:17 -070073 }
Olav Haugan8726caf2012-05-10 15:11:35 -070074 return ret_value;
75}
Laura Abbotte956cce2011-10-25 13:33:20 -070076
Olav Haugan8726caf2012-05-10 15:11:35 -070077void msm_iommu_unmap_extra(struct iommu_domain *domain,
78 unsigned long start_iova,
79 unsigned long size,
80 unsigned long page_size)
81{
82 int i;
83 unsigned long order = get_order(page_size);
84 unsigned long aligned_size = ALIGN(size, page_size);
85 unsigned long nrpages = aligned_size >> (PAGE_SHIFT + order);
86 unsigned long temp_iova = start_iova;
87
88 for (i = 0; i < nrpages; ++i) {
Steve Mucklef132c6c2012-06-06 18:30:57 -070089 iommu_unmap(domain, temp_iova, page_size);
Olav Haugan8726caf2012-05-10 15:11:35 -070090 temp_iova += page_size;
91 }
Laura Abbotte956cce2011-10-25 13:33:20 -070092}
93
Laura Abbottd027fdb2012-04-17 16:22:24 -070094static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
95 unsigned long iova,
96 unsigned long phys,
97 unsigned long size,
98 int cached)
99{
100 int ret;
101 struct scatterlist *sglist;
Laura Abbotte543cfc2012-06-07 17:51:53 -0700102 int prot = IOMMU_WRITE | IOMMU_READ;
103 prot |= cached ? IOMMU_CACHE : 0;
Laura Abbottd027fdb2012-04-17 16:22:24 -0700104
105 sglist = vmalloc(sizeof(*sglist));
106 if (!sglist) {
107 ret = -ENOMEM;
108 goto err1;
109 }
110
111 sg_init_table(sglist, 1);
112 sglist->length = size;
113 sglist->offset = 0;
114 sglist->dma_address = phys;
115
Laura Abbotte543cfc2012-06-07 17:51:53 -0700116 ret = iommu_map_range(domain, iova, sglist, size, prot);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700117 if (ret) {
118 pr_err("%s: could not map extra %lx in domain %p\n",
119 __func__, iova, domain);
120 }
121
122 vfree(sglist);
123err1:
124 return ret;
125
126}
127
128int msm_iommu_map_contig_buffer(unsigned long phys,
129 unsigned int domain_no,
130 unsigned int partition_no,
131 unsigned long size,
132 unsigned long align,
133 unsigned long cached,
134 unsigned long *iova_val)
135{
136 unsigned long iova;
137 int ret;
138
139 if (size & (align - 1))
140 return -EINVAL;
141
Laura Abbottd01221b2012-05-16 17:52:49 -0700142 ret = msm_allocate_iova_address(domain_no, partition_no, size, align,
143 &iova);
Laura Abbottd027fdb2012-04-17 16:22:24 -0700144
Laura Abbottd01221b2012-05-16 17:52:49 -0700145 if (ret)
Laura Abbottd027fdb2012-04-17 16:22:24 -0700146 return -ENOMEM;
147
148 ret = msm_iommu_map_iova_phys(msm_get_iommu_domain(domain_no), iova,
149 phys, size, cached);
150
151 if (ret)
152 msm_free_iova_address(iova, domain_no, partition_no, size);
153 else
154 *iova_val = iova;
155
156 return ret;
157}
158
159void msm_iommu_unmap_contig_buffer(unsigned long iova,
160 unsigned int domain_no,
161 unsigned int partition_no,
162 unsigned long size)
163{
164 iommu_unmap_range(msm_get_iommu_domain(domain_no), iova, size);
165 msm_free_iova_address(iova, domain_no, partition_no, size);
166}
Laura Abbotte956cce2011-10-25 13:33:20 -0700167
Laura Abbottd01221b2012-05-16 17:52:49 -0700168static struct msm_iova_data *find_domain(int domain_num)
169{
170 struct rb_root *root = &domain_root;
171 struct rb_node *p = root->rb_node;
172
173 mutex_lock(&domain_mutex);
174
175 while (p) {
176 struct msm_iova_data *node;
177
178 node = rb_entry(p, struct msm_iova_data, node);
179 if (domain_num < node->domain_num)
180 p = p->rb_left;
Laura Abbott723970d2012-06-05 15:01:16 -0700181 else if (domain_num > node->domain_num)
Laura Abbottd01221b2012-05-16 17:52:49 -0700182 p = p->rb_right;
183 else {
184 mutex_unlock(&domain_mutex);
185 return node;
186 }
187 }
188 mutex_unlock(&domain_mutex);
189 return NULL;
190}
191
192static int add_domain(struct msm_iova_data *node)
193{
194 struct rb_root *root = &domain_root;
195 struct rb_node **p = &root->rb_node;
196 struct rb_node *parent = NULL;
197
198 mutex_lock(&domain_mutex);
199 while (*p) {
200 struct msm_iova_data *tmp;
201 parent = *p;
202
203 tmp = rb_entry(parent, struct msm_iova_data, node);
204
205 if (node->domain_num < tmp->domain_num)
206 p = &(*p)->rb_left;
207 else if (node->domain_num > tmp->domain_num)
208 p = &(*p)->rb_right;
209 else
210 BUG();
211 }
212 rb_link_node(&node->node, parent, p);
213 rb_insert_color(&node->node, root);
214 mutex_unlock(&domain_mutex);
215 return 0;
216}
217
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700218struct iommu_domain *msm_get_iommu_domain(int domain_num)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219{
Laura Abbottd01221b2012-05-16 17:52:49 -0700220 struct msm_iova_data *data;
221
222 data = find_domain(domain_num);
223
224 if (data)
225 return data->domain;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700226 else
227 return NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228}
229
Laura Abbottd01221b2012-05-16 17:52:49 -0700230int msm_allocate_iova_address(unsigned int iommu_domain,
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700231 unsigned int partition_no,
232 unsigned long size,
Laura Abbottd01221b2012-05-16 17:52:49 -0700233 unsigned long align,
234 unsigned long *iova)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700235{
Laura Abbottd01221b2012-05-16 17:52:49 -0700236 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700237 struct mem_pool *pool;
Laura Abbottd01221b2012-05-16 17:52:49 -0700238 unsigned long va;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700239
Laura Abbottd01221b2012-05-16 17:52:49 -0700240 data = find_domain(iommu_domain);
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700241
Laura Abbottd01221b2012-05-16 17:52:49 -0700242 if (!data)
243 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700244
Laura Abbottd01221b2012-05-16 17:52:49 -0700245 if (partition_no >= data->npools)
246 return -EINVAL;
247
248 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700249
250 if (!pool->gpool)
Laura Abbottd01221b2012-05-16 17:52:49 -0700251 return -EINVAL;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700252
Laura Abbottd01221b2012-05-16 17:52:49 -0700253 va = gen_pool_alloc_aligned(pool->gpool, size, ilog2(align));
254 if (va) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700255 pool->free -= size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700256 /* Offset because genpool can't handle 0 addresses */
257 if (pool->paddr == 0)
258 va -= SZ_4K;
259 *iova = va;
260 return 0;
261 }
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700262
Laura Abbottd01221b2012-05-16 17:52:49 -0700263 return -ENOMEM;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700264}
265
266void msm_free_iova_address(unsigned long iova,
Laura Abbottd01221b2012-05-16 17:52:49 -0700267 unsigned int iommu_domain,
268 unsigned int partition_no,
269 unsigned long size)
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700270{
Laura Abbottd01221b2012-05-16 17:52:49 -0700271 struct msm_iova_data *data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700272 struct mem_pool *pool;
273
Laura Abbottd01221b2012-05-16 17:52:49 -0700274 data = find_domain(iommu_domain);
275
276 if (!data) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700277 WARN(1, "Invalid domain %d\n", iommu_domain);
278 return;
279 }
280
Laura Abbottd01221b2012-05-16 17:52:49 -0700281 if (partition_no >= data->npools) {
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700282 WARN(1, "Invalid partition %d for domain %d\n",
283 partition_no, iommu_domain);
284 return;
285 }
286
Laura Abbottd01221b2012-05-16 17:52:49 -0700287 pool = &data->pools[partition_no];
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700288
289 if (!pool)
290 return;
291
292 pool->free += size;
Laura Abbottd01221b2012-05-16 17:52:49 -0700293
294 /* Offset because genpool can't handle 0 addresses */
295 if (pool->paddr == 0)
296 iova += SZ_4K;
297
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700298 gen_pool_free(pool->gpool, iova, size);
299}
300
Laura Abbottd01221b2012-05-16 17:52:49 -0700301int msm_register_domain(struct msm_iova_layout *layout)
302{
303 int i;
304 struct msm_iova_data *data;
305 struct mem_pool *pools;
306
307 if (!layout)
308 return -EINVAL;
309
310 data = kmalloc(sizeof(*data), GFP_KERNEL);
311
312 if (!data)
313 return -ENOMEM;
314
315 pools = kmalloc(sizeof(struct mem_pool) * layout->npartitions,
316 GFP_KERNEL);
317
318 if (!pools)
319 goto out;
320
321 for (i = 0; i < layout->npartitions; i++) {
322 if (layout->partitions[i].size == 0)
323 continue;
324
325 pools[i].gpool = gen_pool_create(PAGE_SHIFT, -1);
326
327 if (!pools[i].gpool)
328 continue;
329
330 pools[i].paddr = layout->partitions[i].start;
331 pools[i].size = layout->partitions[i].size;
332
333 /*
334 * genalloc can't handle a pool starting at address 0.
335 * For now, solve this problem by offsetting the value
336 * put in by 4k.
337 * gen pool address = actual address + 4k
338 */
339 if (pools[i].paddr == 0)
340 layout->partitions[i].start += SZ_4K;
341
342 if (gen_pool_add(pools[i].gpool,
343 layout->partitions[i].start,
344 layout->partitions[i].size, -1)) {
345 gen_pool_destroy(pools[i].gpool);
346 pools[i].gpool = NULL;
347 continue;
348 }
349 }
350
351 data->pools = pools;
352 data->npools = layout->npartitions;
353 data->domain_num = atomic_inc_return(&domain_nums);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700354 data->domain = iommu_domain_alloc(&platform_bus_type,
355 layout->domain_flags);
Laura Abbottd01221b2012-05-16 17:52:49 -0700356
357 add_domain(data);
358
359 return data->domain_num;
360
361out:
362 kfree(data);
363
364 return -EINVAL;
365}
366
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700367int msm_use_iommu()
368{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700369 return iommu_present(&platform_bus_type);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700370}
371
Laura Abbott0577d7b2012-04-17 11:14:30 -0700372static int __init iommu_domain_probe(struct platform_device *pdev)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373{
Laura Abbott0577d7b2012-04-17 11:14:30 -0700374 struct iommu_domains_pdata *p = pdev->dev.platform_data;
Laura Abbott9f4a8e62011-08-29 19:08:07 -0700375 int i, j;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376
Laura Abbott0577d7b2012-04-17 11:14:30 -0700377 if (!p)
378 return -ENODEV;
379
Laura Abbottd01221b2012-05-16 17:52:49 -0700380 for (i = 0; i < p->ndomains; i++) {
381 struct msm_iova_layout l;
382 struct msm_iova_partition *part;
383 struct msm_iommu_domain *domains;
Laura Abbott0577d7b2012-04-17 11:14:30 -0700384
Laura Abbottd01221b2012-05-16 17:52:49 -0700385 domains = p->domains;
386 l.npartitions = domains[i].npools;
387 part = kmalloc(
388 sizeof(struct msm_iova_partition) * l.npartitions,
389 GFP_KERNEL);
390
391 if (!part) {
392 pr_info("%s: could not allocate space for domain %d",
393 __func__, i);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700394 continue;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 }
Laura Abbottd01221b2012-05-16 17:52:49 -0700396
397 for (j = 0; j < l.npartitions; j++) {
398 part[j].start = p->domains[i].iova_pools[j].paddr;
399 part[j].size = p->domains[i].iova_pools[j].size;
400 }
401
402 l.partitions = part;
403
404 msm_register_domain(&l);
405
406 kfree(part);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 }
408
Laura Abbott0577d7b2012-04-17 11:14:30 -0700409 for (i = 0; i < p->nnames; i++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410 struct device *ctx = msm_iommu_get_ctx(
Laura Abbott0577d7b2012-04-17 11:14:30 -0700411 p->domain_names[i].name);
Laura Abbottd01221b2012-05-16 17:52:49 -0700412 struct iommu_domain *domain;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
414 if (!ctx)
415 continue;
416
Laura Abbottd01221b2012-05-16 17:52:49 -0700417 domain = msm_get_iommu_domain(p->domain_names[i].domain);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
Laura Abbottd01221b2012-05-16 17:52:49 -0700419 if (!domain)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 continue;
421
Laura Abbottd01221b2012-05-16 17:52:49 -0700422 if (iommu_attach_device(domain, ctx)) {
423 WARN(1, "%s: could not attach domain %p to context %s."
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424 " iommu programming will not occur.\n",
Laura Abbottd01221b2012-05-16 17:52:49 -0700425 __func__, domain,
Laura Abbott0577d7b2012-04-17 11:14:30 -0700426 p->domain_names[i].name);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 continue;
428 }
429 }
430
431 return 0;
432}
Laura Abbott0577d7b2012-04-17 11:14:30 -0700433
434static struct platform_driver iommu_domain_driver = {
435 .driver = {
436 .name = "iommu_domains",
437 .owner = THIS_MODULE
438 },
439};
440
441static int __init msm_subsystem_iommu_init(void)
442{
443 return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
444}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445device_initcall(msm_subsystem_iommu_init);