blob: 46d87569073951c7c63cbd13a354e9ff91a0941f [file] [log] [blame]
Hiroshi DOYU69d3a842009-01-28 21:32:08 +02001/*
2 * omap iommu: simple virtual address space management
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
Ming Lei08f2e632011-11-08 18:29:15 +080013#include <linux/module.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020014#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020016#include <linux/vmalloc.h>
17#include <linux/device.h>
18#include <linux/scatterlist.h>
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +030019#include <linux/iommu.h>
Tony Lindgrenc8d35c82012-11-02 12:24:03 -070020#include <linux/omap-iommu.h>
Tony Lindgren2ab7c842012-11-02 12:24:14 -070021#include <linux/platform_data/iommu-omap.h>
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020022
23#include <asm/cacheflush.h>
24#include <asm/mach/map.h>
25
Ido Yariv2f7702a2012-11-02 12:24:00 -070026#include "omap-iopgtable.h"
Tony Lindgrened1c7de2012-11-02 12:24:06 -070027#include "omap-iommu.h"
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020028
Tony Lindgrenc8d35c82012-11-02 12:24:03 -070029/*
30 * IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
31 *
32 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
33 */
34#define IOVMF_SW_SHIFT 16
35
36/*
37 * iovma: h/w flags derived from cam and ram attribute
38 */
39#define IOVMF_CAM_MASK (~((1 << 10) - 1))
40#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
41
42#define IOVMF_PGSZ_MASK (3 << 0)
43#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
44#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
45#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
46#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
47
48#define IOVMF_ENDIAN_MASK (1 << 9)
49#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
50
51#define IOVMF_ELSZ_MASK (3 << 7)
52#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
53#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
54#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
55
56#define IOVMF_MIXED_MASK (1 << 6)
57#define IOVMF_MIXED MMU_RAM_MIXED
58
59/*
60 * iovma: s/w flags, used for mapping and umapping internally.
61 */
62#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
63#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
64#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
65
66/* "superpages" is supported just with physically linear pages */
67#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
68#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
69#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
70
71#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
72
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020073static struct kmem_cache *iovm_area_cachep;
74
Laurent Pinchart329d8d32011-09-02 13:32:30 -040075/* return the offset of the first scatterlist entry in a sg table */
76static unsigned int sgtable_offset(const struct sg_table *sgt)
77{
78 if (!sgt || !sgt->nents)
79 return 0;
80
81 return sgt->sgl->offset;
82}
83
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020084/* return total bytes of sg buffers */
85static size_t sgtable_len(const struct sg_table *sgt)
86{
87 unsigned int i, total = 0;
88 struct scatterlist *sg;
89
90 if (!sgt)
91 return 0;
92
93 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
94 size_t bytes;
95
Laurent Pinchart329d8d32011-09-02 13:32:30 -040096 bytes = sg->length + sg->offset;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020097
98 if (!iopgsz_ok(bytes)) {
Laurent Pinchart329d8d32011-09-02 13:32:30 -040099 pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
100 __func__, i, bytes, sg->offset);
101 return 0;
102 }
103
104 if (i && sg->offset) {
105 pr_err("%s: sg[%d] offset not allowed in internal "
106 "entries\n", __func__, i);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200107 return 0;
108 }
109
110 total += bytes;
111 }
112
113 return total;
114}
115#define sgtable_ok(x) (!!sgtable_len(x))
116
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000117static unsigned max_alignment(u32 addr)
118{
119 int i;
120 unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
121 for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
122 ;
123 return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
124}
125
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200126/*
127 * calculate the optimal number sg elements from total bytes based on
128 * iommu superpages
129 */
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000130static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200131{
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000132 unsigned nr_entries = 0, ent_sz;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200133
134 if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
135 pr_err("%s: wrong size %08x\n", __func__, bytes);
136 return 0;
137 }
138
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000139 while (bytes) {
140 ent_sz = max_alignment(da | pa);
141 ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
142 nr_entries++;
143 da += ent_sz;
144 pa += ent_sz;
145 bytes -= ent_sz;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200146 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200147
148 return nr_entries;
149}
150
151/* allocate and initialize sg_table header(a kind of 'superblock') */
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000152static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
153 u32 da, u32 pa)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200154{
155 unsigned int nr_entries;
156 int err;
157 struct sg_table *sgt;
158
159 if (!bytes)
160 return ERR_PTR(-EINVAL);
161
162 if (!IS_ALIGNED(bytes, PAGE_SIZE))
163 return ERR_PTR(-EINVAL);
164
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000165 if (flags & IOVMF_LINEAR) {
166 nr_entries = sgtable_nents(bytes, da, pa);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200167 if (!nr_entries)
168 return ERR_PTR(-EINVAL);
169 } else
170 nr_entries = bytes / PAGE_SIZE;
171
172 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
173 if (!sgt)
174 return ERR_PTR(-ENOMEM);
175
176 err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
Satish7f1225b2010-06-09 13:21:27 +0300177 if (err) {
178 kfree(sgt);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200179 return ERR_PTR(err);
Satish7f1225b2010-06-09 13:21:27 +0300180 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200181
182 pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
183
184 return sgt;
185}
186
187/* free sg_table header(a kind of superblock) */
188static void sgtable_free(struct sg_table *sgt)
189{
190 if (!sgt)
191 return;
192
193 sg_free_table(sgt);
194 kfree(sgt);
195
196 pr_debug("%s: sgt:%p\n", __func__, sgt);
197}
198
199/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
200static void *vmap_sg(const struct sg_table *sgt)
201{
202 u32 va;
203 size_t total;
204 unsigned int i;
205 struct scatterlist *sg;
206 struct vm_struct *new;
207 const struct mem_type *mtype;
208
209 mtype = get_mem_type(MT_DEVICE);
210 if (!mtype)
211 return ERR_PTR(-EINVAL);
212
213 total = sgtable_len(sgt);
214 if (!total)
215 return ERR_PTR(-EINVAL);
216
217 new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
218 if (!new)
219 return ERR_PTR(-ENOMEM);
220 va = (u32)new->addr;
221
222 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
223 size_t bytes;
224 u32 pa;
225 int err;
226
Laurent Pinchart329d8d32011-09-02 13:32:30 -0400227 pa = sg_phys(sg) - sg->offset;
228 bytes = sg->length + sg->offset;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200229
230 BUG_ON(bytes != PAGE_SIZE);
231
232 err = ioremap_page(va, pa, mtype);
233 if (err)
234 goto err_out;
235
236 va += bytes;
237 }
238
Sanjeev Premi6716bd02009-09-24 16:23:12 -0700239 flush_cache_vmap((unsigned long)new->addr,
240 (unsigned long)(new->addr + total));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200241 return new->addr;
242
243err_out:
244 WARN_ON(1); /* FIXME: cleanup some mpu mappings */
245 vunmap(new->addr);
246 return ERR_PTR(-EAGAIN);
247}
248
249static inline void vunmap_sg(const void *va)
250{
251 vunmap(va);
252}
253
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300254static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
255 const u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200256{
257 struct iovm_struct *tmp;
258
259 list_for_each_entry(tmp, &obj->mmap, list) {
260 if ((da >= tmp->da_start) && (da < tmp->da_end)) {
261 size_t len;
262
263 len = tmp->da_end - tmp->da_start;
264
265 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
266 __func__, tmp->da_start, da, tmp->da_end, len,
267 tmp->flags);
268
269 return tmp;
270 }
271 }
272
273 return NULL;
274}
275
276/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300277 * omap_find_iovm_area - find iovma which includes @da
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200278 * @dev: client device
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200279 * @da: iommu device virtual address
280 *
281 * Find the existing iovma starting at @da
282 */
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200283struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200284{
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200285 struct omap_iommu *obj = dev_to_omap_iommu(dev);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200286 struct iovm_struct *area;
287
288 mutex_lock(&obj->mmap_lock);
289 area = __find_iovm_area(obj, da);
290 mutex_unlock(&obj->mmap_lock);
291
292 return area;
293}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300294EXPORT_SYMBOL_GPL(omap_find_iovm_area);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200295
296/*
297 * This finds the hole(area) which fits the requested address and len
298 * in iovmas mmap, and returns the new allocated iovma.
299 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300300static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200301 size_t bytes, u32 flags)
302{
303 struct iovm_struct *new, *tmp;
Michael Jones4359d382011-03-09 09:17:32 +0000304 u32 start, prev_end, alignment;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200305
306 if (!obj || !bytes)
307 return ERR_PTR(-EINVAL);
308
309 start = da;
Michael Jones4359d382011-03-09 09:17:32 +0000310 alignment = PAGE_SIZE;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200311
David Cohend038aee2011-03-09 09:17:33 +0000312 if (~flags & IOVMF_DA_FIXED) {
Michael Jones4359d382011-03-09 09:17:32 +0000313 /* Don't map address 0 */
314 start = obj->da_start ? obj->da_start : alignment;
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000315
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200316 if (flags & IOVMF_LINEAR)
Michael Jones4359d382011-03-09 09:17:32 +0000317 alignment = iopgsz_max(bytes);
318 start = roundup(start, alignment);
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000319 } else if (start < obj->da_start || start > obj->da_end ||
320 obj->da_end - start < bytes) {
321 return ERR_PTR(-EINVAL);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200322 }
323
324 tmp = NULL;
325 if (list_empty(&obj->mmap))
326 goto found;
327
328 prev_end = 0;
329 list_for_each_entry(tmp, &obj->mmap, list) {
330
Guzman Lugo, Fernandoba6e1f42010-12-15 00:54:00 +0000331 if (prev_end > start)
Hiroshi DOYUe0a42e42010-05-06 17:09:25 +0300332 break;
333
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000334 if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200335 goto found;
336
David Cohend038aee2011-03-09 09:17:33 +0000337 if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
Michael Jones4359d382011-03-09 09:17:32 +0000338 start = roundup(tmp->da_end + 1, alignment);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200339
340 prev_end = tmp->da_end;
341 }
342
Guzman Lugo, Fernandoc7f4ab22010-12-15 00:54:03 +0000343 if ((start >= prev_end) && (obj->da_end - start >= bytes))
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200344 goto found;
345
346 dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
347 __func__, da, bytes, flags);
348
349 return ERR_PTR(-EINVAL);
350
351found:
352 new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
353 if (!new)
354 return ERR_PTR(-ENOMEM);
355
356 new->iommu = obj;
357 new->da_start = start;
358 new->da_end = start + bytes;
359 new->flags = flags;
360
361 /*
362 * keep ascending order of iovmas
363 */
364 if (tmp)
365 list_add_tail(&new->list, &tmp->list);
366 else
367 list_add(&new->list, &obj->mmap);
368
369 dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
370 __func__, new->da_start, start, new->da_end, bytes, flags);
371
372 return new;
373}
374
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300375static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200376{
377 size_t bytes;
378
379 BUG_ON(!obj || !area);
380
381 bytes = area->da_end - area->da_start;
382
383 dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
384 __func__, area->da_start, area->da_end, bytes, area->flags);
385
386 list_del(&area->list);
387 kmem_cache_free(iovm_area_cachep, area);
388}
389
390/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300391 * omap_da_to_va - convert (d) to (v)
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200392 * @dev: client device
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200393 * @da: iommu device virtual address
394 * @va: mpu virtual address
395 *
396 * Returns mpu virtual addr which corresponds to a given device virtual addr
397 */
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200398void *omap_da_to_va(struct device *dev, u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200399{
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200400 struct omap_iommu *obj = dev_to_omap_iommu(dev);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200401 void *va = NULL;
402 struct iovm_struct *area;
403
404 mutex_lock(&obj->mmap_lock);
405
406 area = __find_iovm_area(obj, da);
407 if (!area) {
408 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
409 goto out;
410 }
411 va = area->va;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200412out:
Daniel Walker26548902009-10-05 13:31:45 -0700413 mutex_unlock(&obj->mmap_lock);
414
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200415 return va;
416}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300417EXPORT_SYMBOL_GPL(omap_da_to_va);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200418
419static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
420{
421 unsigned int i;
422 struct scatterlist *sg;
423 void *va = _va;
424 void *va_end;
425
426 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
427 struct page *pg;
428 const size_t bytes = PAGE_SIZE;
429
430 /*
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300431 * iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200432 */
433 pg = vmalloc_to_page(va);
434 BUG_ON(!pg);
435 sg_set_page(sg, pg, bytes, 0);
436
437 va += bytes;
438 }
439
440 va_end = _va + PAGE_SIZE * i;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200441}
442
443static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
444{
445 /*
446 * Actually this is not necessary at all, just exists for
Hiroshi DOYUba6a1172009-10-05 13:31:45 -0700447 * consistency of the code readability.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200448 */
449 BUG_ON(!sgt);
450}
451
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200452/* create 'da' <-> 'pa' mapping from 'sgt' */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300453static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
454 const struct sg_table *sgt, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200455{
456 int err;
457 unsigned int i, j;
458 struct scatterlist *sg;
459 u32 da = new->da_start;
460
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300461 if (!domain || !sgt)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200462 return -EINVAL;
463
464 BUG_ON(!sgtable_ok(sgt));
465
466 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
467 u32 pa;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200468 size_t bytes;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200469
Laurent Pinchart329d8d32011-09-02 13:32:30 -0400470 pa = sg_phys(sg) - sg->offset;
471 bytes = sg->length + sg->offset;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200472
473 flags &= ~IOVMF_PGSZ_MASK;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300474
475 if (bytes_to_iopgsz(bytes) < 0)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200476 goto err_out;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300477
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200478 pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
479 i, da, pa, bytes);
480
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200481 err = iommu_map(domain, da, pa, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200482 if (err)
483 goto err_out;
484
485 da += bytes;
486 }
487 return 0;
488
489err_out:
490 da = new->da_start;
491
492 for_each_sg(sgt->sgl, sg, i, j) {
493 size_t bytes;
494
Laurent Pinchart329d8d32011-09-02 13:32:30 -0400495 bytes = sg->length + sg->offset;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200496
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300497 /* ignore failures.. we're already handling one */
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200498 iommu_unmap(domain, da, bytes);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200499
500 da += bytes;
501 }
502 return err;
503}
504
505/* release 'da' <-> 'pa' mapping */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300506static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300507 struct iovm_struct *area)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200508{
509 u32 start;
510 size_t total = area->da_end - area->da_start;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300511 const struct sg_table *sgt = area->sgt;
512 struct scatterlist *sg;
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200513 int i;
514 size_t unmapped;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200515
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300516 BUG_ON(!sgtable_ok(sgt));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200517 BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
518
519 start = area->da_start;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300520 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200521 size_t bytes;
522
Laurent Pinchart329d8d32011-09-02 13:32:30 -0400523 bytes = sg->length + sg->offset;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300524
Ohad Ben-Cohen7d3002c2011-11-10 11:32:26 +0200525 unmapped = iommu_unmap(domain, start, bytes);
526 if (unmapped < bytes)
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300527 break;
528
529 dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200530 __func__, start, bytes, area->flags);
531
532 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
533
534 total -= bytes;
535 start += bytes;
536 }
537 BUG_ON(total);
538}
539
540/* template function for all unmapping */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300541static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300542 struct omap_iommu *obj, const u32 da,
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200543 void (*fn)(const void *), u32 flags)
544{
545 struct sg_table *sgt = NULL;
546 struct iovm_struct *area;
547
548 if (!IS_ALIGNED(da, PAGE_SIZE)) {
549 dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
550 return NULL;
551 }
552
553 mutex_lock(&obj->mmap_lock);
554
555 area = __find_iovm_area(obj, da);
556 if (!area) {
557 dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
558 goto out;
559 }
560
561 if ((area->flags & flags) != flags) {
562 dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
563 area->flags);
564 goto out;
565 }
566 sgt = (struct sg_table *)area->sgt;
567
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300568 unmap_iovm_area(domain, obj, area);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200569
570 fn(area->va);
571
572 dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
573 area->da_start, da, area->da_end,
574 area->da_end - area->da_start, area->flags);
575
576 free_iovm_area(obj, area);
577out:
578 mutex_unlock(&obj->mmap_lock);
579
580 return sgt;
581}
582
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300583static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300584 u32 da, const struct sg_table *sgt, void *va,
585 size_t bytes, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200586{
587 int err = -ENOMEM;
588 struct iovm_struct *new;
589
590 mutex_lock(&obj->mmap_lock);
591
592 new = alloc_iovm_area(obj, da, bytes, flags);
593 if (IS_ERR(new)) {
594 err = PTR_ERR(new);
595 goto err_alloc_iovma;
596 }
597 new->va = va;
598 new->sgt = sgt;
599
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300600 if (map_iovm_area(domain, new, sgt, new->flags))
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200601 goto err_map;
602
603 mutex_unlock(&obj->mmap_lock);
604
605 dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
606 __func__, new->da_start, bytes, new->flags, va);
607
608 return new->da_start;
609
610err_map:
611 free_iovm_area(obj, new);
612err_alloc_iovma:
613 mutex_unlock(&obj->mmap_lock);
614 return err;
615}
616
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300617static inline u32
618__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300619 u32 da, const struct sg_table *sgt,
620 void *va, size_t bytes, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200621{
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300622 return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200623}
624
625/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300626 * omap_iommu_vmap - (d)-(p)-(v) address mapper
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200627 * @domain: iommu domain
628 * @dev: client device
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200629 * @sgt: address of scatter gather table
630 * @flags: iovma and page property
631 *
632 * Creates 1-n-1 mapping with given @sgt and returns @da.
633 * All @sgt element must be io page size aligned.
634 */
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200635u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300636 const struct sg_table *sgt, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200637{
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200638 struct omap_iommu *obj = dev_to_omap_iommu(dev);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200639 size_t bytes;
Hiroshi DOYU935e4732009-11-22 10:11:02 -0800640 void *va = NULL;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200641
642 if (!obj || !obj->dev || !sgt)
643 return -EINVAL;
644
645 bytes = sgtable_len(sgt);
646 if (!bytes)
647 return -EINVAL;
648 bytes = PAGE_ALIGN(bytes);
649
Hiroshi DOYU935e4732009-11-22 10:11:02 -0800650 if (flags & IOVMF_MMIO) {
651 va = vmap_sg(sgt);
652 if (IS_ERR(va))
653 return PTR_ERR(va);
654 }
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200655
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200656 flags |= IOVMF_DISCONT;
657 flags |= IOVMF_MMIO;
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200658
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300659 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200660 if (IS_ERR_VALUE(da))
661 vunmap_sg(va);
662
Laurent Pinchart329d8d32011-09-02 13:32:30 -0400663 return da + sgtable_offset(sgt);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200664}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300665EXPORT_SYMBOL_GPL(omap_iommu_vmap);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200666
667/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300668 * omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200669 * @domain: iommu domain
670 * @dev: client device
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200671 * @da: iommu device virtual address
672 *
673 * Free the iommu virtually contiguous memory area starting at
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300674 * @da, which was returned by 'omap_iommu_vmap()'.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200675 */
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300676struct sg_table *
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200677omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200678{
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200679 struct omap_iommu *obj = dev_to_omap_iommu(dev);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200680 struct sg_table *sgt;
681 /*
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300682 * 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200683 * Just returns 'sgt' to the caller to free
684 */
Laurent Pinchart329d8d32011-09-02 13:32:30 -0400685 da &= PAGE_MASK;
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300686 sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
687 IOVMF_DISCONT | IOVMF_MMIO);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200688 if (!sgt)
689 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
690 return sgt;
691}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300692EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200693
694/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300695 * omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200696 * @dev: client device
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200697 * @da: contiguous iommu virtual memory
698 * @bytes: allocation size
699 * @flags: iovma and page property
700 *
701 * Allocate @bytes linearly and creates 1-n-1 mapping and returns
David Cohend038aee2011-03-09 09:17:33 +0000702 * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200703 */
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300704u32
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200705omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300706 size_t bytes, u32 flags)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200707{
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200708 struct omap_iommu *obj = dev_to_omap_iommu(dev);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200709 void *va;
710 struct sg_table *sgt;
711
712 if (!obj || !obj->dev || !bytes)
713 return -EINVAL;
714
715 bytes = PAGE_ALIGN(bytes);
716
717 va = vmalloc(bytes);
718 if (!va)
719 return -ENOMEM;
720
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000721 flags |= IOVMF_DISCONT;
722 flags |= IOVMF_ALLOC;
Guzman Lugo, Fernandoad108122010-12-15 00:54:01 +0000723
724 sgt = sgtable_alloc(bytes, flags, da, 0);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200725 if (IS_ERR(sgt)) {
726 da = PTR_ERR(sgt);
727 goto err_sgt_alloc;
728 }
729 sgtable_fill_vmalloc(sgt, va);
730
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300731 da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200732 if (IS_ERR_VALUE(da))
733 goto err_iommu_vmap;
734
735 return da;
736
737err_iommu_vmap:
738 sgtable_drain_vmalloc(sgt);
739 sgtable_free(sgt);
740err_sgt_alloc:
741 vfree(va);
742 return da;
743}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300744EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200745
746/**
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300747 * omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200748 * @dev: client device
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200749 * @da: iommu device virtual address
750 *
751 * Frees the iommu virtually continuous memory area starting at
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300752 * @da, as obtained from 'omap_iommu_vmalloc()'.
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200753 */
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200754void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300755 const u32 da)
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200756{
Ohad Ben-Cohenfabdbca2011-10-11 00:18:33 +0200757 struct omap_iommu *obj = dev_to_omap_iommu(dev);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200758 struct sg_table *sgt;
759
Ohad Ben-Cohenf626b522011-06-02 01:46:12 +0300760 sgt = unmap_vm_area(domain, obj, da, vfree,
761 IOVMF_DISCONT | IOVMF_ALLOC);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200762 if (!sgt)
763 dev_dbg(obj->dev, "%s: No sgt\n", __func__);
764 sgtable_free(sgt);
765}
Ohad Ben-Cohen6c32df42011-08-17 22:57:56 +0300766EXPORT_SYMBOL_GPL(omap_iommu_vfree);
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200767
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200768static int __init iovmm_init(void)
769{
770 const unsigned long flags = SLAB_HWCACHE_ALIGN;
771 struct kmem_cache *p;
772
773 p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
774 flags, NULL);
775 if (!p)
776 return -ENOMEM;
777 iovm_area_cachep = p;
778
779 return 0;
780}
781module_init(iovmm_init);
782
783static void __exit iovmm_exit(void)
784{
785 kmem_cache_destroy(iovm_area_cachep);
786}
787module_exit(iovmm_exit);
788
789MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
790MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
791MODULE_LICENSE("GPL v2");