blob: 93a34d92b3a2206517c65bbc86d2e0bf183c41b5 [file] [log] [blame]
Hiroshi DOYU69d3a842009-01-28 21:32:08 +02001/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020015#include <linux/vmalloc.h>
16#include <linux/device.h>
17#include <linux/scatterlist.h>
18
19#include <asm/cacheflush.h>
20#include <asm/mach/map.h>
21
Tony Lindgrence491cf2009-10-20 09:40:47 -070022#include <plat/iommu.h>
23#include <plat/iovmm.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020024
25#include "iopgtable.h"
26
27/*
28 * A device driver needs to create address mappings between:
29 *
30 * - iommu/device address
31 * - physical address
32 * - mpu virtual address
33 *
34 * There are 4 possible patterns for them:
35 *
36 * |iova/ mapping iommu_ page
37 * | da pa va (d)-(p)-(v) function type
38 * ---------------------------------------------------------------------------
39 * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s
40 * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s
41 * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s
42 * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n*
43 *
44 *
45 * 'iova': device iommu virtual address
46 * 'da': alias of 'iova'
47 * 'pa': physical address
48 * 'va': mpu virtual address
49 *
50 * 'c': contiguous memory area
Hiroshi DOYUba6a1172009-10-05 13:31:45 -070051 * 'd': discontiguous memory area
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020052 * 'a': anonymous memory allocation
53 * '()': optional feature
54 *
55 * 'n': a normal page(4KB) size is used.
56 * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used.
57 *
58 * '*': not yet, but feasible.
59 */
60
61static struct kmem_cache *iovm_area_cachep;
62
63/* return total bytes of sg buffers */
64static size_t sgtable_len(const struct sg_table *sgt)
65{
66 unsigned int i, total = 0;
67 struct scatterlist *sg;
68
69 if (!sgt)
70 return 0;
71
72 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
73 size_t bytes;
74
75 bytes = sg_dma_len(sg);
76
77 if (!iopgsz_ok(bytes)) {
78 pr_err("%s: sg[%d] not iommu pagesize(%x)\n",
79 __func__, i, bytes);
80 return 0;
81 }
82
83 total += bytes;
84 }
85
86 return total;
87}
88#define sgtable_ok(x) (!!sgtable_len(x))
89
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +000090static unsigned max_alignment(u32 addr)
91{
92 int i;
93 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
94 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
95 ;
96 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
97}
98
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020099/*
100 * calculate the optimal number sg elements from total bytes based on
101 * iommu superpages
102 */
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000103static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200104{
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000105 unsigned nr_entries = 0, ent_sz;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200106
107 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
108 pr_err("%s: wrong size %08x\n", __func__, bytes);
109 return 0;
110 }
111
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000112 while (bytes) {
113 ent_sz = max_alignment(da | pa);
114 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
115 nr_entries++;
116 da += ent_sz;
117 pa += ent_sz;
118 bytes -= ent_sz;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200119 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200120
121 return nr_entries;
122}
123
124/* allocate and initialize sg_table header(a kind of 'superblock') */
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000125static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
126 u32 da, u32 pa)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200127{
128 unsigned int nr_entries;
129 int err;
130 struct sg_table *sgt;
131
132 if (!bytes)
133 return ERR_PTR(-EINVAL);
134
135 if (!IS_ALIGNED(bytes, PAGE_SIZE))
136 return ERR_PTR(-EINVAL);
137
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000138 if (flags & IOVMF_LINEAR) {
139 nr_entries = sgtable_nents(bytes, da, pa);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200140 if (!nr_entries)
141 return ERR_PTR(-EINVAL);
142 } else
143 nr_entries = bytes / PAGE_SIZE;
144
145 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
146 if (!sgt)
147 return ERR_PTR(-ENOMEM);
148
149 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
Satish7f1225b2010-06-09 13:21:27 +0300150 if (err) {
151 kfree(sgt);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200152 return ERR_PTR(err);
Satish7f1225b2010-06-09 13:21:27 +0300153 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200154
155 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
156
157 return sgt;
158}
159
160/* free sg_table header(a kind of superblock) */
161static void sgtable_free(struct sg_table *sgt)
162{
163 if (!sgt)
164 return;
165
166 sg_free_table(sgt);
167 kfree(sgt);
168
169 pr_debug("%s: sgt:%p\n", __func__, sgt);
170}
171
172/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
173static void *vmap_sg(const struct sg_table *sgt)
174{
175 u32 va;
176 size_t total;
177 unsigned int i;
178 struct scatterlist *sg;
179 struct vm_struct *new;
180 const struct mem_type *mtype;
181
182 mtype = get_mem_type(MT_DEVICE);
183 if (!mtype)
184 return ERR_PTR(-EINVAL);
185
186 total = sgtable_len(sgt);
187 if (!total)
188 return ERR_PTR(-EINVAL);
189
190 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
191 if (!new)
192 return ERR_PTR(-ENOMEM);
193 va = (u32)new->addr;
194
195 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
196 size_t bytes;
197 u32 pa;
198 int err;
199
200 pa = sg_phys(sg);
201 bytes = sg_dma_len(sg);
202
203 BUG_ON(bytes != PAGE_SIZE);
204
205 err = ioremap_page(va, pa, mtype);
206 if (err)
207 goto err_out;
208
209 va += bytes;
210 }
211
Sanjeev Premi6716bd02009-09-24 16:23:12 -0700212 flush_cache_vmap((unsigned long)new->addr,
213 (unsigned long)(new->addr + total));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200214 return new->addr;
215
216err_out:
217 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
218 vunmap(new->addr);
219 return ERR_PTR(-EAGAIN);
220}
221
222static inline void vunmap_sg(const void *va)
223{
224 vunmap(va);
225}
226
227static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da)
228{
229 struct iovm_struct *tmp;
230
231 list_for_each_entry(tmp, &obj->mmap, list) {
232 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
233 size_t len;
234
235 len = tmp->da_end - tmp->da_start;
236
237 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
238 __func__, tmp->da_start, da, tmp->da_end, len,
239 tmp->flags);
240
241 return tmp;
242 }
243 }
244
245 return NULL;
246}
247
248/**
249 * find_iovm_area - find iovma which includes @da
250 * @da: iommu device virtual address
251 *
252 * Find the existing iovma starting at @da
253 */
254struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da)
255{
256 struct iovm_struct *area;
257
258 mutex_lock(&obj->mmap_lock);
259 area = __find_iovm_area(obj, da);
260 mutex_unlock(&obj->mmap_lock);
261
262 return area;
263}
264EXPORT_SYMBOL_GPL(find_iovm_area);
265
266/*
267 * This finds the hole(area) which fits the requested address and len
268 * in iovmas mmap, and returns the new allocated iovma.
269 */
270static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da,
271 size_t bytes, u32 flags)
272{
273 struct iovm_struct *new, *tmp;
274 u32 start, prev_end, alignement;
275
276 if (!obj || !bytes)
277 return ERR_PTR(-EINVAL);
278
279 start = da;
280 alignement = PAGE_SIZE;
281
282 if (flags & IOVMF_DA_ANON) {
283 /*
284 * Reserve the first page for NULL
285 */
286 start = PAGE_SIZE;
287 if (flags & IOVMF_LINEAR)
288 alignement = iopgsz_max(bytes);
289 start = roundup(start, alignement);
290 }
291
292 tmp = NULL;
293 if (list_empty(&obj->mmap))
294 goto found;
295
296 prev_end = 0;
297 list_for_each_entry(tmp, &obj->mmap, list) {
298
Guzman Lugo, Fernandoba6e1f42010-12-15 00:54:00 +0000299 if (prev_end > start)
Hiroshi DOYUe0a42e42010-05-06 17:09:25 +0300300 break;
301
Guzman Lugo, Fernandoba6e1f42010-12-15 00:54:00 +0000302 if (start + bytes <= tmp->da_start)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200303 goto found;
304
305 if (flags & IOVMF_DA_ANON)
Hiroshi DOYUfa460b82010-05-06 16:10:18 +0300306 start = roundup(tmp->da_end + 1, alignement);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200307
308 prev_end = tmp->da_end;
309 }
310
Guzman Lugo, Fernandoba6e1f42010-12-15 00:54:00 +0000311 if ((start >= prev_end) && (ULONG_MAX - start + 1 >= bytes))
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200312 goto found;
313
314 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
315 __func__, da, bytes, flags);
316
317 return ERR_PTR(-EINVAL);
318
319found:
320 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
321 if (!new)
322 return ERR_PTR(-ENOMEM);
323
324 new->iommu = obj;
325 new->da_start = start;
326 new->da_end = start + bytes;
327 new->flags = flags;
328
329 /*
330 * keep ascending order of iovmas
331 */
332 if (tmp)
333 list_add_tail(&new->list, &tmp->list);
334 else
335 list_add(&new->list, &obj->mmap);
336
337 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
338 __func__, new->da_start, start, new->da_end, bytes, flags);
339
340 return new;
341}
342
343static void free_iovm_area(struct iommu *obj, struct iovm_struct *area)
344{
345 size_t bytes;
346
347 BUG_ON(!obj || !area);
348
349 bytes = area->da_end - area->da_start;
350
351 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
352 __func__, area->da_start, area->da_end, bytes, area->flags);
353
354 list_del(&area->list);
355 kmem_cache_free(iovm_area_cachep, area);
356}
357
358/**
359 * da_to_va - convert (d) to (v)
360 * @obj: objective iommu
361 * @da: iommu device virtual address
362 * @va: mpu virtual address
363 *
364 * Returns mpu virtual addr which corresponds to a given device virtual addr
365 */
366void *da_to_va(struct iommu *obj, u32 da)
367{
368 void *va = NULL;
369 struct iovm_struct *area;
370
371 mutex_lock(&obj->mmap_lock);
372
373 area = __find_iovm_area(obj, da);
374 if (!area) {
375 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
376 goto out;
377 }
378 va = area->va;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200379out:
Daniel Walker26548902009-10-05 13:31:45 -0700380 mutex_unlock(&obj->mmap_lock);
381
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200382 return va;
383}
384EXPORT_SYMBOL_GPL(da_to_va);
385
386static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
387{
388 unsigned int i;
389 struct scatterlist *sg;
390 void *va = _va;
391 void *va_end;
392
393 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
394 struct page *pg;
395 const size_t bytes = PAGE_SIZE;
396
397 /*
398 * iommu 'superpage' isn't supported with 'iommu_vmalloc()'
399 */
400 pg = vmalloc_to_page(va);
401 BUG_ON(!pg);
402 sg_set_page(sg, pg, bytes, 0);
403
404 va += bytes;
405 }
406
407 va_end = _va + PAGE_SIZE * i;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200408}
409
410static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
411{
412 /*
413 * Actually this is not necessary at all, just exists for
Hiroshi DOYUba6a1172009-10-05 13:31:45 -0700414 * consistency of the code readability.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200415 */
416 BUG_ON(!sgt);
417}
418
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000419static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
420 size_t len)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200421{
422 unsigned int i;
423 struct scatterlist *sg;
424 void *va;
425
426 va = phys_to_virt(pa);
427
428 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000429 unsigned bytes;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200430
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000431 bytes = max_alignment(da | pa);
432 bytes = min_t(unsigned, bytes, iopgsz_max(len));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200433
434 BUG_ON(!iopgsz_ok(bytes));
435
436 sg_set_buf(sg, phys_to_virt(pa), bytes);
437 /*
438 * 'pa' is cotinuous(linear).
439 */
440 pa += bytes;
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000441 da += bytes;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200442 len -= bytes;
443 }
444 BUG_ON(len);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200445}
446
447static inline void sgtable_drain_kmalloc(struct sg_table *sgt)
448{
449 /*
450 * Actually this is not necessary at all, just exists for
Hiroshi DOYUba6a1172009-10-05 13:31:45 -0700451 * consistency of the code readability
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200452 */
453 BUG_ON(!sgt);
454}
455
456/* create 'da' <-> 'pa' mapping from 'sgt' */
457static int map_iovm_area(struct iommu *obj, struct iovm_struct *new,
458 const struct sg_table *sgt, u32 flags)
459{
460 int err;
461 unsigned int i, j;
462 struct scatterlist *sg;
463 u32 da = new->da_start;
464
Julia Lawall20e11c22009-11-22 10:11:16 -0800465 if (!obj || !sgt)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200466 return -EINVAL;
467
468 BUG_ON(!sgtable_ok(sgt));
469
470 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
471 u32 pa;
472 int pgsz;
473 size_t bytes;
474 struct iotlb_entry e;
475
476 pa = sg_phys(sg);
477 bytes = sg_dma_len(sg);
478
479 flags &= ~IOVMF_PGSZ_MASK;
480 pgsz = bytes_to_iopgsz(bytes);
481 if (pgsz < 0)
482 goto err_out;
483 flags |= pgsz;
484
485 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
486 i, da, pa, bytes);
487
488 iotlb_init_entry(&e, da, pa, flags);
489 err = iopgtable_store_entry(obj, &e);
490 if (err)
491 goto err_out;
492
493 da += bytes;
494 }
495 return 0;
496
497err_out:
498 da = new->da_start;
499
500 for_each_sg(sgt->sgl, sg, i, j) {
501 size_t bytes;
502
503 bytes = iopgtable_clear_entry(obj, da);
504
505 BUG_ON(!iopgsz_ok(bytes));
506
507 da += bytes;
508 }
509 return err;
510}
511
512/* release 'da' <-> 'pa' mapping */
513static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area)
514{
515 u32 start;
516 size_t total = area->da_end - area->da_start;
517
518 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
519
520 start = area->da_start;
521 while (total > 0) {
522 size_t bytes;
523
524 bytes = iopgtable_clear_entry(obj, start);
525 if (bytes == 0)
526 bytes = PAGE_SIZE;
527 else
528 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
529 __func__, start, bytes, area->flags);
530
531 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
532
533 total -= bytes;
534 start += bytes;
535 }
536 BUG_ON(total);
537}
538
539/* template function for all unmapping */
540static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da,
541 void (*fn)(const void *), u32 flags)
542{
543 struct sg_table *sgt = NULL;
544 struct iovm_struct *area;
545
546 if (!IS_ALIGNED(da, PAGE_SIZE)) {
547 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
548 return NULL;
549 }
550
551 mutex_lock(&obj->mmap_lock);
552
553 area = __find_iovm_area(obj, da);
554 if (!area) {
555 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
556 goto out;
557 }
558
559 if ((area->flags & flags) != flags) {
560 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
561 area->flags);
562 goto out;
563 }
564 sgt = (struct sg_table *)area->sgt;
565
566 unmap_iovm_area(obj, area);
567
568 fn(area->va);
569
570 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
571 area->da_start, da, area->da_end,
572 area->da_end - area->da_start, area->flags);
573
574 free_iovm_area(obj, area);
575out:
576 mutex_unlock(&obj->mmap_lock);
577
578 return sgt;
579}
580
581static u32 map_iommu_region(struct iommu *obj, u32 da,
582 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
583{
584 int err = -ENOMEM;
585 struct iovm_struct *new;
586
587 mutex_lock(&obj->mmap_lock);
588
589 new = alloc_iovm_area(obj, da, bytes, flags);
590 if (IS_ERR(new)) {
591 err = PTR_ERR(new);
592 goto err_alloc_iovma;
593 }
594 new->va = va;
595 new->sgt = sgt;
596
597 if (map_iovm_area(obj, new, sgt, new->flags))
598 goto err_map;
599
600 mutex_unlock(&obj->mmap_lock);
601
602 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
603 __func__, new->da_start, bytes, new->flags, va);
604
605 return new->da_start;
606
607err_map:
608 free_iovm_area(obj, new);
609err_alloc_iovma:
610 mutex_unlock(&obj->mmap_lock);
611 return err;
612}
613
614static inline u32 __iommu_vmap(struct iommu *obj, u32 da,
615 const struct sg_table *sgt, void *va, size_t bytes, u32 flags)
616{
617 return map_iommu_region(obj, da, sgt, va, bytes, flags);
618}
619
620/**
621 * iommu_vmap - (d)-(p)-(v) address mapper
622 * @obj: objective iommu
623 * @sgt: address of scatter gather table
624 * @flags: iovma and page property
625 *
626 * Creates 1-n-1 mapping with given @sgt and returns @da.
627 * All @sgt element must be io page size aligned.
628 */
629u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
630 u32 flags)
631{
632 size_t bytes;
Hiroshi DOYU935e4732009-11-22 10:11:02 -0800633 void *va = NULL;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200634
635 if (!obj || !obj->dev || !sgt)
636 return -EINVAL;
637
638 bytes = sgtable_len(sgt);
639 if (!bytes)
640 return -EINVAL;
641 bytes = PAGE_ALIGN(bytes);
642
Hiroshi DOYU935e4732009-11-22 10:11:02 -0800643 if (flags & IOVMF_MMIO) {
644 va = vmap_sg(sgt);
645 if (IS_ERR(va))
646 return PTR_ERR(va);
647 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200648
649 flags &= IOVMF_HW_MASK;
650 flags |= IOVMF_DISCONT;
651 flags |= IOVMF_MMIO;
652 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
653
654 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
655 if (IS_ERR_VALUE(da))
656 vunmap_sg(va);
657
658 return da;
659}
660EXPORT_SYMBOL_GPL(iommu_vmap);
661
662/**
663 * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()'
664 * @obj: objective iommu
665 * @da: iommu device virtual address
666 *
667 * Free the iommu virtually contiguous memory area starting at
668 * @da, which was returned by 'iommu_vmap()'.
669 */
670struct sg_table *iommu_vunmap(struct iommu *obj, u32 da)
671{
672 struct sg_table *sgt;
673 /*
674 * 'sgt' is allocated before 'iommu_vmalloc()' is called.
675 * Just returns 'sgt' to the caller to free
676 */
677 sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO);
678 if (!sgt)
679 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
680 return sgt;
681}
682EXPORT_SYMBOL_GPL(iommu_vunmap);
683
684/**
685 * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
686 * @obj: objective iommu
687 * @da: contiguous iommu virtual memory
688 * @bytes: allocation size
689 * @flags: iovma and page property
690 *
691 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
692 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
693 */
694u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
695{
696 void *va;
697 struct sg_table *sgt;
698
699 if (!obj || !obj->dev || !bytes)
700 return -EINVAL;
701
702 bytes = PAGE_ALIGN(bytes);
703
704 va = vmalloc(bytes);
705 if (!va)
706 return -ENOMEM;
707
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000708 flags &= IOVMF_HW_MASK;
709 flags |= IOVMF_DISCONT;
710 flags |= IOVMF_ALLOC;
711 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
712
713 sgt = sgtable_alloc(bytes, flags, da, 0);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200714 if (IS_ERR(sgt)) {
715 da = PTR_ERR(sgt);
716 goto err_sgt_alloc;
717 }
718 sgtable_fill_vmalloc(sgt, va);
719
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200720 da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
721 if (IS_ERR_VALUE(da))
722 goto err_iommu_vmap;
723
724 return da;
725
726err_iommu_vmap:
727 sgtable_drain_vmalloc(sgt);
728 sgtable_free(sgt);
729err_sgt_alloc:
730 vfree(va);
731 return da;
732}
733EXPORT_SYMBOL_GPL(iommu_vmalloc);
734
735/**
736 * iommu_vfree - release memory allocated by 'iommu_vmalloc()'
737 * @obj: objective iommu
738 * @da: iommu device virtual address
739 *
740 * Frees the iommu virtually continuous memory area starting at
741 * @da, as obtained from 'iommu_vmalloc()'.
742 */
743void iommu_vfree(struct iommu *obj, const u32 da)
744{
745 struct sg_table *sgt;
746
747 sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC);
748 if (!sgt)
749 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
750 sgtable_free(sgt);
751}
752EXPORT_SYMBOL_GPL(iommu_vfree);
753
754static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
755 size_t bytes, u32 flags)
756{
757 struct sg_table *sgt;
758
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000759 sgt = sgtable_alloc(bytes, flags, da, pa);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200760 if (IS_ERR(sgt))
761 return PTR_ERR(sgt);
762
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000763 sgtable_fill_kmalloc(sgt, pa, da, bytes);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200764
765 da = map_iommu_region(obj, da, sgt, va, bytes, flags);
766 if (IS_ERR_VALUE(da)) {
767 sgtable_drain_kmalloc(sgt);
768 sgtable_free(sgt);
769 }
770
771 return da;
772}
773
774/**
775 * iommu_kmap - (d)-(p)-(v) address mapper
776 * @obj: objective iommu
777 * @da: contiguous iommu virtual memory
778 * @pa: contiguous physical memory
779 * @flags: iovma and page property
780 *
781 * Creates 1-1-1 mapping and returns @da again, which can be
782 * adjusted if 'IOVMF_DA_ANON' is set.
783 */
784u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
785 u32 flags)
786{
787 void *va;
788
789 if (!obj || !obj->dev || !bytes)
790 return -EINVAL;
791
792 bytes = PAGE_ALIGN(bytes);
793
794 va = ioremap(pa, bytes);
795 if (!va)
796 return -ENOMEM;
797
798 flags &= IOVMF_HW_MASK;
799 flags |= IOVMF_LINEAR;
800 flags |= IOVMF_MMIO;
801 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
802
803 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
804 if (IS_ERR_VALUE(da))
805 iounmap(va);
806
807 return da;
808}
809EXPORT_SYMBOL_GPL(iommu_kmap);
810
811/**
812 * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()'
813 * @obj: objective iommu
814 * @da: iommu device virtual address
815 *
816 * Frees the iommu virtually contiguous memory area starting at
817 * @da, which was passed to and was returned by'iommu_kmap()'.
818 */
819void iommu_kunmap(struct iommu *obj, u32 da)
820{
821 struct sg_table *sgt;
822 typedef void (*func_t)(const void *);
823
824 sgt = unmap_vm_area(obj, da, (func_t)__iounmap,
825 IOVMF_LINEAR | IOVMF_MMIO);
826 if (!sgt)
827 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
828 sgtable_free(sgt);
829}
830EXPORT_SYMBOL_GPL(iommu_kunmap);
831
832/**
833 * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper
834 * @obj: objective iommu
835 * @da: contiguous iommu virtual memory
836 * @bytes: bytes for allocation
837 * @flags: iovma and page property
838 *
839 * Allocate @bytes linearly and creates 1-1-1 mapping and returns
840 * @da again, which might be adjusted if 'IOVMF_DA_ANON' is set.
841 */
842u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
843{
844 void *va;
845 u32 pa;
846
847 if (!obj || !obj->dev || !bytes)
848 return -EINVAL;
849
850 bytes = PAGE_ALIGN(bytes);
851
852 va = kmalloc(bytes, GFP_KERNEL | GFP_DMA);
853 if (!va)
854 return -ENOMEM;
855 pa = virt_to_phys(va);
856
857 flags &= IOVMF_HW_MASK;
858 flags |= IOVMF_LINEAR;
859 flags |= IOVMF_ALLOC;
860 flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
861
862 da = __iommu_kmap(obj, da, pa, va, bytes, flags);
863 if (IS_ERR_VALUE(da))
864 kfree(va);
865
866 return da;
867}
868EXPORT_SYMBOL_GPL(iommu_kmalloc);
869
870/**
871 * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()'
872 * @obj: objective iommu
873 * @da: iommu device virtual address
874 *
875 * Frees the iommu virtually contiguous memory area starting at
876 * @da, which was passed to and was returned by'iommu_kmalloc()'.
877 */
878void iommu_kfree(struct iommu *obj, u32 da)
879{
880 struct sg_table *sgt;
881
882 sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC);
883 if (!sgt)
884 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
885 sgtable_free(sgt);
886}
887EXPORT_SYMBOL_GPL(iommu_kfree);
888
889
890static int __init iovmm_init(void)
891{
892 const unsigned long flags = SLAB_HWCACHE_ALIGN;
893 struct kmem_cache *p;
894
895 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
896 flags, NULL);
897 if (!p)
898 return -ENOMEM;
899 iovm_area_cachep = p;
900
901 return 0;
902}
903module_init(iovmm_init);
904
905static void __exit iovmm_exit(void)
906{
907 kmem_cache_destroy(iovm_area_cachep);
908}
909module_exit(iovmm_exit);
910
911MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
912MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
913MODULE_LICENSE("GPL v2");