blob: 5e7f97dc76ef28c2a4db1ad766faf0853141ce59 [file] [log] [blame]
Hiroshi DOYU69d3a842009-01-28 21:32:08 +02001/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020015#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +030018#include <linux/iommu.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020019
20#include <asm/cacheflush.h>
21#include <asm/mach/map.h>
22
Tony Lindgrence491cf2009-10-20 09:40:47 -070023#include <plat/iommu.h>
24#include <plat/iovmm.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020025
Ohad Ben-Cohenfcf3a6e2011-08-15 23:21:41 +030026#include <plat/iopgtable.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020027
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020028static struct kmem_cache *iovm_area_cachep;
29
30/* return total bytes of sg buffers */
31static size_t sgtable_len(const struct sg_table *sgt)
32{
33 unsigned int i, total = 0;
34 struct scatterlist *sg;
35
36 if (!sgt)
37 return 0;
38
39 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
40 size_t bytes;
41
Ohad Ben-Cohen66cf4022011-06-08 09:06:11 +030042 bytes = sg->length;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020043
44 if (!iopgsz_ok(bytes)) {
45 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
46 __func__, i, bytes);
47 return 0;
48 }
49
50 total += bytes;
51 }
52
53 return total;
54}
55#define sgtable_ok(x) (!!sgtable_len(x))
56
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +000057static unsigned max_alignment(u32 addr)
58{
59 int i;
60 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
61 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
62 ;
63 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
64}
65
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020066/*
67 * calculate the optimal number sg elements from total bytes based on
68 * iommu superpages
69 */
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +000070static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020071{
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +000072 unsigned nr_entries = 0, ent_sz;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020073
74 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
75 pr_err("%s: wrong size %08x\n", __func__, bytes);
76 return 0;
77 }
78
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +000079 while (bytes) {
80 ent_sz = max_alignment(da | pa);
81 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
82 nr_entries++;
83 da += ent_sz;
84 pa += ent_sz;
85 bytes -= ent_sz;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020086 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020087
88 return nr_entries;
89}
90
91/* allocate and initialize sg_table header(a kind of 'superblock') */
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +000092static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
93 u32 da, u32 pa)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020094{
95 unsigned int nr_entries;
96 int err;
97 struct sg_table *sgt;
98
99 if (!bytes)
100 return ERR_PTR(-EINVAL);
101
102 if (!IS_ALIGNED(bytes, PAGE_SIZE))
103 return ERR_PTR(-EINVAL);
104
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000105 if (flags & IOVMF_LINEAR) {
106 nr_entries = sgtable_nents(bytes, da, pa);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200107 if (!nr_entries)
108 return ERR_PTR(-EINVAL);
109 } else
110 nr_entries = bytes / PAGE_SIZE;
111
112 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
113 if (!sgt)
114 return ERR_PTR(-ENOMEM);
115
116 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
Satish7f1225b2010-06-09 13:21:27 +0300117 if (err) {
118 kfree(sgt);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200119 return ERR_PTR(err);
Satish7f1225b2010-06-09 13:21:27 +0300120 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200121
122 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
123
124 return sgt;
125}
126
127/* free sg_table header(a kind of superblock) */
128static void sgtable_free(struct sg_table *sgt)
129{
130 if (!sgt)
131 return;
132
133 sg_free_table(sgt);
134 kfree(sgt);
135
136 pr_debug("%s: sgt:%p\n", __func__, sgt);
137}
138
139/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
140static void *vmap_sg(const struct sg_table *sgt)
141{
142 u32 va;
143 size_t total;
144 unsigned int i;
145 struct scatterlist *sg;
146 struct vm_struct *new;
147 const struct mem_type *mtype;
148
149 mtype = get_mem_type(MT_DEVICE);
150 if (!mtype)
151 return ERR_PTR(-EINVAL);
152
153 total = sgtable_len(sgt);
154 if (!total)
155 return ERR_PTR(-EINVAL);
156
157 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
158 if (!new)
159 return ERR_PTR(-ENOMEM);
160 va = (u32)new->addr;
161
162 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
163 size_t bytes;
164 u32 pa;
165 int err;
166
167 pa = sg_phys(sg);
Ohad Ben-Cohen66cf4022011-06-08 09:06:11 +0300168 bytes = sg->length;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200169
170 BUG_ON(bytes != PAGE_SIZE);
171
172 err = ioremap_page(va, pa, mtype);
173 if (err)
174 goto err_out;
175
176 va += bytes;
177 }
178
Sanjeev Premi6716bd02009-09-24 16:23:12 -0700179 flush_cache_vmap((unsigned long)new->addr,
180 (unsigned long)(new->addr + total));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200181 return new->addr;
182
183err_out:
184 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
185 vunmap(new->addr);
186 return ERR_PTR(-EAGAIN);
187}
188
189static inline void vunmap_sg(const void *va)
190{
191 vunmap(va);
192}
193
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300194static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
195 const u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200196{
197 struct iovm_struct *tmp;
198
199 list_for_each_entry(tmp, &obj->mmap, list) {
200 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
201 size_t len;
202
203 len = tmp->da_end - tmp->da_start;
204
205 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
206 __func__, tmp->da_start, da, tmp->da_end, len,
207 tmp->flags);
208
209 return tmp;
210 }
211 }
212
213 return NULL;
214}
215
216/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300217 * omap_find_iovm_area - find iovma which includes @da
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200218 * @da: iommu device virtual address
219 *
220 * Find the existing iovma starting at @da
221 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300222struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200223{
224 struct iovm_struct *area;
225
226 mutex_lock(&obj->mmap_lock);
227 area = __find_iovm_area(obj, da);
228 mutex_unlock(&obj->mmap_lock);
229
230 return area;
231}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300232EXPORT_SYMBOL_GPL(omap_find_iovm_area);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200233
234/*
235 * This finds the hole(area) which fits the requested address and len
236 * in iovmas mmap, and returns the new allocated iovma.
237 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300238static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200239 size_t bytes, u32 flags)
240{
241 struct iovm_struct *new, *tmp;
Michael Jones4359d382011-03-09 09:17:32 +0000242 u32 start, prev_end, alignment;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200243
244 if (!obj || !bytes)
245 return ERR_PTR(-EINVAL);
246
247 start = da;
Michael Jones4359d382011-03-09 09:17:32 +0000248 alignment = PAGE_SIZE;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200249
David Cohend038aee2011-03-09 09:17:33 +0000250 if (~flags & IOVMF_DA_FIXED) {
Michael Jones4359d382011-03-09 09:17:32 +0000251 /* Don't map address 0 */
252 start = obj->da_start ? obj->da_start : alignment;
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000253
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200254 if (flags & IOVMF_LINEAR)
Michael Jones4359d382011-03-09 09:17:32 +0000255 alignment = iopgsz_max(bytes);
256 start = roundup(start, alignment);
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000257 } else if (start < obj->da_start || start > obj->da_end ||
258 obj->da_end - start < bytes) {
259 return ERR_PTR(-EINVAL);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200260 }
261
262 tmp = NULL;
263 if (list_empty(&obj->mmap))
264 goto found;
265
266 prev_end = 0;
267 list_for_each_entry(tmp, &obj->mmap, list) {
268
Guzman Lugo, Fernandoba6e1f42010-12-15 00:54:00 +0000269 if (prev_end > start)
Hiroshi DOYUe0a42e42010-05-06 17:09:25 +0300270 break;
271
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000272 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200273 goto found;
274
David Cohend038aee2011-03-09 09:17:33 +0000275 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
Michael Jones4359d382011-03-09 09:17:32 +0000276 start = roundup(tmp->da_end + 1, alignment);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200277
278 prev_end = tmp->da_end;
279 }
280
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000281 if ((start >= prev_end) && (obj->da_end - start >= bytes))
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200282 goto found;
283
284 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
285 __func__, da, bytes, flags);
286
287 return ERR_PTR(-EINVAL);
288
289found:
290 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
291 if (!new)
292 return ERR_PTR(-ENOMEM);
293
294 new->iommu = obj;
295 new->da_start = start;
296 new->da_end = start + bytes;
297 new->flags = flags;
298
299 /*
300 * keep ascending order of iovmas
301 */
302 if (tmp)
303 list_add_tail(&new->list, &tmp->list);
304 else
305 list_add(&new->list, &obj->mmap);
306
307 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
308 __func__, new->da_start, start, new->da_end, bytes, flags);
309
310 return new;
311}
312
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300313static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200314{
315 size_t bytes;
316
317 BUG_ON(!obj || !area);
318
319 bytes = area->da_end - area->da_start;
320
321 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
322 __func__, area->da_start, area->da_end, bytes, area->flags);
323
324 list_del(&area->list);
325 kmem_cache_free(iovm_area_cachep, area);
326}
327
328/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300329 * omap_da_to_va - convert (d) to (v)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200330 * @obj: objective iommu
331 * @da: iommu device virtual address
332 * @va: mpu virtual address
333 *
334 * Returns mpu virtual addr which corresponds to a given device virtual addr
335 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300336void *omap_da_to_va(struct omap_iommu *obj, u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200337{
338 void *va = NULL;
339 struct iovm_struct *area;
340
341 mutex_lock(&obj->mmap_lock);
342
343 area = __find_iovm_area(obj, da);
344 if (!area) {
345 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
346 goto out;
347 }
348 va = area->va;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200349out:
Daniel Walker26548902009-10-05 13:31:45 -0700350 mutex_unlock(&obj->mmap_lock);
351
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200352 return va;
353}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300354EXPORT_SYMBOL_GPL(omap_da_to_va);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200355
356static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
357{
358 unsigned int i;
359 struct scatterlist *sg;
360 void *va = _va;
361 void *va_end;
362
363 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
364 struct page *pg;
365 const size_t bytes = PAGE_SIZE;
366
367 /*
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300368 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200369 */
370 pg = vmalloc_to_page(va);
371 BUG_ON(!pg);
372 sg_set_page(sg, pg, bytes, 0);
373
374 va += bytes;
375 }
376
377 va_end = _va + PAGE_SIZE * i;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200378}
379
380static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
381{
382 /*
383 * Actually this is not necessary at all, just exists for
Hiroshi DOYUba6a11792009-10-05 13:31:45 -0700384 * consistency of the code readability.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200385 */
386 BUG_ON(!sgt);
387}
388
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200389/* create 'da' <-> 'pa' mapping from 'sgt' */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300390static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
391 const struct sg_table *sgt, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200392{
393 int err;
394 unsigned int i, j;
395 struct scatterlist *sg;
396 u32 da = new->da_start;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300397 int order;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200398
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300399 if (!domain || !sgt)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200400 return -EINVAL;
401
402 BUG_ON(!sgtable_ok(sgt));
403
404 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
405 u32 pa;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200406 size_t bytes;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200407
408 pa = sg_phys(sg);
Ohad Ben-Cohen66cf4022011-06-08 09:06:11 +0300409 bytes = sg->length;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200410
411 flags &= ~IOVMF_PGSZ_MASK;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300412
413 if (bytes_to_iopgsz(bytes) < 0)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200414 goto err_out;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300415
416 order = get_order(bytes);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200417
418 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
419 i, da, pa, bytes);
420
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300421 err = iommu_map(domain, da, pa, order, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200422 if (err)
423 goto err_out;
424
425 da += bytes;
426 }
427 return 0;
428
429err_out:
430 da = new->da_start;
431
432 for_each_sg(sgt->sgl, sg, i, j) {
433 size_t bytes;
434
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300435 bytes = sg->length;
436 order = get_order(bytes);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200437
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300438 /* ignore failures.. we're already handling one */
439 iommu_unmap(domain, da, order);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200440
441 da += bytes;
442 }
443 return err;
444}
445
446/* release 'da' <-> 'pa' mapping */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300447static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300448 struct iovm_struct *area)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200449{
450 u32 start;
451 size_t total = area->da_end - area->da_start;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300452 const struct sg_table *sgt = area->sgt;
453 struct scatterlist *sg;
454 int i, err;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200455
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300456 BUG_ON(!sgtable_ok(sgt));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200457 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
458
459 start = area->da_start;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300460 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200461 size_t bytes;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300462 int order;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200463
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300464 bytes = sg->length;
465 order = get_order(bytes);
466
467 err = iommu_unmap(domain, start, order);
468 if (err)
469 break;
470
471 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200472 __func__, start, bytes, area->flags);
473
474 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
475
476 total -= bytes;
477 start += bytes;
478 }
479 BUG_ON(total);
480}
481
482/* template function for all unmapping */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300483static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300484 struct omap_iommu *obj, const u32 da,
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200485 void (*fn)(const void *), u32 flags)
486{
487 struct sg_table *sgt = NULL;
488 struct iovm_struct *area;
489
490 if (!IS_ALIGNED(da, PAGE_SIZE)) {
491 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
492 return NULL;
493 }
494
495 mutex_lock(&obj->mmap_lock);
496
497 area = __find_iovm_area(obj, da);
498 if (!area) {
499 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
500 goto out;
501 }
502
503 if ((area->flags & flags) != flags) {
504 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
505 area->flags);
506 goto out;
507 }
508 sgt = (struct sg_table *)area->sgt;
509
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300510 unmap_iovm_area(domain, obj, area);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200511
512 fn(area->va);
513
514 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
515 area->da_start, da, area->da_end,
516 area->da_end - area->da_start, area->flags);
517
518 free_iovm_area(obj, area);
519out:
520 mutex_unlock(&obj->mmap_lock);
521
522 return sgt;
523}
524
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300525static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300526 u32 da, const struct sg_table *sgt, void *va,
527 size_t bytes, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200528{
529 int err = -ENOMEM;
530 struct iovm_struct *new;
531
532 mutex_lock(&obj->mmap_lock);
533
534 new = alloc_iovm_area(obj, da, bytes, flags);
535 if (IS_ERR(new)) {
536 err = PTR_ERR(new);
537 goto err_alloc_iovma;
538 }
539 new->va = va;
540 new->sgt = sgt;
541
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300542 if (map_iovm_area(domain, new, sgt, new->flags))
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200543 goto err_map;
544
545 mutex_unlock(&obj->mmap_lock);
546
547 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
548 __func__, new->da_start, bytes, new->flags, va);
549
550 return new->da_start;
551
552err_map:
553 free_iovm_area(obj, new);
554err_alloc_iovma:
555 mutex_unlock(&obj->mmap_lock);
556 return err;
557}
558
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300559static inline u32
560__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300561 u32 da, const struct sg_table *sgt,
562 void *va, size_t bytes, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200563{
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300564 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200565}
566
567/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300568 * omap_iommu_vmap - (d)-(p)-(v) address mapper
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200569 * @obj: objective iommu
570 * @sgt: address of scatter gather table
571 * @flags: iovma and page property
572 *
573 * Creates 1-n-1 mapping with given @sgt and returns @da.
574 * All @sgt element must be io page size aligned.
575 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300576u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300577 const struct sg_table *sgt, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200578{
579 size_t bytes;
Hiroshi DOYU935e4732009-11-22 10:11:02 -0800580 void *va = NULL;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200581
582 if (!obj || !obj->dev || !sgt)
583 return -EINVAL;
584
585 bytes = sgtable_len(sgt);
586 if (!bytes)
587 return -EINVAL;
588 bytes = PAGE_ALIGN(bytes);
589
Hiroshi DOYU935e4732009-11-22 10:11:02 -0800590 if (flags & IOVMF_MMIO) {
591 va = vmap_sg(sgt);
592 if (IS_ERR(va))
593 return PTR_ERR(va);
594 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200595
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200596 flags |= IOVMF_DISCONT;
597 flags |= IOVMF_MMIO;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200598
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300599 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200600 if (IS_ERR_VALUE(da))
601 vunmap_sg(va);
602
603 return da;
604}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300605EXPORT_SYMBOL_GPL(omap_iommu_vmap);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200606
607/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300608 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200609 * @obj: objective iommu
610 * @da: iommu device virtual address
611 *
612 * Free the iommu virtually contiguous memory area starting at
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300613 * @da, which was returned by 'omap_iommu_vmap()'.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200614 */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300615struct sg_table *
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300616omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200617{
618 struct sg_table *sgt;
619 /*
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300620 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200621 * Just returns 'sgt' to the caller to free
622 */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300623 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
624 IOVMF_DISCONT | IOVMF_MMIO);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200625 if (!sgt)
626 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
627 return sgt;
628}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300629EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200630
631/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300632 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200633 * @obj: objective iommu
634 * @da: contiguous iommu virtual memory
635 * @bytes: allocation size
636 * @flags: iovma and page property
637 *
638 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
David Cohend038aee2011-03-09 09:17:33 +0000639 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200640 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300641u32
642omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300643 size_t bytes, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200644{
645 void *va;
646 struct sg_table *sgt;
647
648 if (!obj || !obj->dev || !bytes)
649 return -EINVAL;
650
651 bytes = PAGE_ALIGN(bytes);
652
653 va = vmalloc(bytes);
654 if (!va)
655 return -ENOMEM;
656
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000657 flags |= IOVMF_DISCONT;
658 flags |= IOVMF_ALLOC;
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000659
660 sgt = sgtable_alloc(bytes, flags, da, 0);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200661 if (IS_ERR(sgt)) {
662 da = PTR_ERR(sgt);
663 goto err_sgt_alloc;
664 }
665 sgtable_fill_vmalloc(sgt, va);
666
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300667 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200668 if (IS_ERR_VALUE(da))
669 goto err_iommu_vmap;
670
671 return da;
672
673err_iommu_vmap:
674 sgtable_drain_vmalloc(sgt);
675 sgtable_free(sgt);
676err_sgt_alloc:
677 vfree(va);
678 return da;
679}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300680EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200681
682/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300683 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200684 * @obj: objective iommu
685 * @da: iommu device virtual address
686 *
687 * Frees the iommu virtually continuous memory area starting at
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300688 * @da, as obtained from 'omap_iommu_vmalloc()'.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200689 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300690void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
691 const u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200692{
693 struct sg_table *sgt;
694
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300695 sgt = unmap_vm_area(domain, obj, da, vfree,
696 IOVMF_DISCONT | IOVMF_ALLOC);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200697 if (!sgt)
698 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
699 sgtable_free(sgt);
700}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300701EXPORT_SYMBOL_GPL(omap_iommu_vfree);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200702
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200703static int __init iovmm_init(void)
704{
705 const unsigned long flags = SLAB_HWCACHE_ALIGN;
706 struct kmem_cache *p;
707
708 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
709 flags, NULL);
710 if (!p)
711 return -ENOMEM;
712 iovm_area_cachep = p;
713
714 return 0;
715}
716module_init(iovmm_init);
717
718static void __exit iovmm_exit(void)
719{
720 kmem_cache_destroy(iovm_area_cachep);
721}
722module_exit(iovmm_exit);
723
724MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
725MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
726MODULE_LICENSE("GPL v2");