blob: 1143c4d5c56730e12221a6944acc6dc505465be1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
Erik Hovland3a2916a2006-03-22 21:02:11 +00008 * RAM, the remainder of memory is at the top and the DMA memory
Simon Arlott6cbdc8c2007-05-11 20:40:30 +01009 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
Nicolas Pitre58edb512008-09-09 15:54:13 -040028#include <linux/page-flags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/list.h>
FUJITA Tomonori9f2326b2007-10-23 09:11:41 +020033#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Russell King14eb75b2005-06-20 16:56:08 +010035#include <asm/cacheflush.h>
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#undef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +000038
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#ifdef STATS
40#define DO_STATS(X) do { X ; } while (0)
41#else
42#define DO_STATS(X) do { } while (0)
43#endif
44
45/* ************************************************** */
46
47struct safe_buffer {
48 struct list_head node;
49
50 /* original request */
51 void *ptr;
52 size_t size;
53 int direction;
54
55 /* safe buffer info */
Russell Kingcb7610d2005-10-30 21:12:08 +000056 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 void *safe;
58 dma_addr_t safe_dma_addr;
59};
60
Russell Kingcb7610d2005-10-30 21:12:08 +000061struct dmabounce_pool {
62 unsigned long size;
63 struct dma_pool *pool;
64#ifdef STATS
65 unsigned long allocs;
66#endif
67};
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069struct dmabounce_device_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 struct list_head safe_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 unsigned long total_allocs;
74 unsigned long map_op_count;
75 unsigned long bounce_count;
Russell King017cc022007-02-12 10:53:50 +000076 int attr_res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#endif
Russell Kingcb7610d2005-10-30 21:12:08 +000078 struct dmabounce_pool small;
79 struct dmabounce_pool large;
Kevin Hilman823588c12006-06-22 22:27:14 +010080
81 rwlock_t lock;
Russell King0703ed22011-07-04 08:32:21 +010082
83 int (*needs_bounce)(struct device *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084};
85
Linus Torvalds1da177e2005-04-16 15:20:36 -070086#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +000087static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
88 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
Russell King017cc022007-02-12 10:53:50 +000090 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
91 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
92 device_info->small.allocs,
93 device_info->large.allocs,
Russell Kingcb7610d2005-10-30 21:12:08 +000094 device_info->total_allocs - device_info->small.allocs -
95 device_info->large.allocs,
Russell King017cc022007-02-12 10:53:50 +000096 device_info->total_allocs,
97 device_info->map_op_count,
98 device_info->bounce_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
Russell King017cc022007-02-12 10:53:50 +0000100
101static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102#endif
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105/* allocate a 'safe' buffer and keep track of it */
106static inline struct safe_buffer *
107alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
Russell Kingcb7610d2005-10-30 21:12:08 +0000108 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 struct safe_buffer *buf;
Russell Kingcb7610d2005-10-30 21:12:08 +0000111 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 struct device *dev = device_info->dev;
Kevin Hilman823588c12006-06-22 22:27:14 +0100113 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
115 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
116 __func__, ptr, size, dir);
117
Russell Kingcb7610d2005-10-30 21:12:08 +0000118 if (size <= device_info->small.size) {
119 pool = &device_info->small;
120 } else if (size <= device_info->large.size) {
121 pool = &device_info->large;
122 } else {
123 pool = NULL;
124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
127 if (buf == NULL) {
128 dev_warn(dev, "%s: kmalloc failed\n", __func__);
129 return NULL;
130 }
131
Russell Kingcb7610d2005-10-30 21:12:08 +0000132 buf->ptr = ptr;
133 buf->size = size;
134 buf->direction = dir;
135 buf->pool = pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Russell Kingcb7610d2005-10-30 21:12:08 +0000137 if (pool) {
138 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
139 &buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 } else {
Russell Kingcb7610d2005-10-30 21:12:08 +0000141 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
142 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 }
144
Russell Kingcb7610d2005-10-30 21:12:08 +0000145 if (buf->safe == NULL) {
146 dev_warn(dev,
147 "%s: could not alloc dma memory (size=%d)\n",
148 __func__, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 kfree(buf);
150 return NULL;
151 }
152
153#ifdef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +0000154 if (pool)
155 pool->allocs++;
156 device_info->total_allocs++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#endif
158
Kevin Hilman823588c12006-06-22 22:27:14 +0100159 write_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 list_add(&buf->node, &device_info->safe_buffers);
Kevin Hilman823588c12006-06-22 22:27:14 +0100161 write_unlock_irqrestore(&device_info->lock, flags);
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 return buf;
164}
165
166/* determine if a buffer is from our "safe" pool */
167static inline struct safe_buffer *
168find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
169{
Kevin Hilmane2785f02006-08-18 15:32:14 +0100170 struct safe_buffer *b, *rb = NULL;
Kevin Hilman823588c12006-06-22 22:27:14 +0100171 unsigned long flags;
172
173 read_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
Russell Kingb46a58f2005-06-22 21:25:58 +0100175 list_for_each_entry(b, &device_info->safe_buffers, node)
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100176 if (b->safe_dma_addr <= safe_dma_addr &&
177 b->safe_dma_addr + b->size > safe_dma_addr) {
Kevin Hilmane2785f02006-08-18 15:32:14 +0100178 rb = b;
Kevin Hilman823588c12006-06-22 22:27:14 +0100179 break;
Kevin Hilmane2785f02006-08-18 15:32:14 +0100180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Kevin Hilman823588c12006-06-22 22:27:14 +0100182 read_unlock_irqrestore(&device_info->lock, flags);
Kevin Hilmane2785f02006-08-18 15:32:14 +0100183 return rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184}
185
186static inline void
187free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188{
Kevin Hilman823588c12006-06-22 22:27:14 +0100189 unsigned long flags;
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
192
Kevin Hilman823588c12006-06-22 22:27:14 +0100193 write_lock_irqsave(&device_info->lock, flags);
194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 list_del(&buf->node);
196
Kevin Hilman823588c12006-06-22 22:27:14 +0100197 write_unlock_irqrestore(&device_info->lock, flags);
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (buf->pool)
Russell Kingcb7610d2005-10-30 21:12:08 +0000200 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 else
202 dma_free_coherent(device_info->dev, buf->size, buf->safe,
203 buf->safe_dma_addr);
204
205 kfree(buf);
206}
207
208/* ************************************************** */
209
Russell King125ab122008-09-25 22:16:22 +0100210static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
211 dma_addr_t dma_addr, const char *where)
212{
213 if (!dev || !dev->archdata.dmabounce)
214 return NULL;
215 if (dma_mapping_error(dev, dma_addr)) {
Russell Kinge2f521e2011-07-03 23:53:13 +0100216 dev_err(dev, "Trying to %s invalid mapping\n", where);
Russell King125ab122008-09-25 22:16:22 +0100217 return NULL;
218 }
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220}
221
Russell King23bc9872011-07-03 22:28:32 +0100222static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223{
224 if (!dev || !dev->archdata.dmabounce)
225 return 0;
226
227 if (dev->dma_mask) {
228 unsigned long limit, mask = *dev->dma_mask;
229
230 limit = (mask + 1) & ~mask;
231 if (limit && size > limit) {
232 dev_err(dev, "DMA mapping too big (requested %#x "
233 "mask %#Lx)\n", size, *dev->dma_mask);
234 return -E2BIG;
235 }
236
237 /* Figure out if we need to bounce from the DMA mask. */
238 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
239 return 1;
240 }
241
Russell King0703ed22011-07-04 08:32:21 +0100242 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
Russell King23bc9872011-07-03 22:28:32 +0100243}
244
Russell King3216a972008-09-25 22:23:31 +0100245static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 enum dma_data_direction dir)
247{
Russell Kingab2c2152007-02-12 10:28:24 +0000248 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Russell Kingdd3641f2011-07-03 22:39:43 +0100249 struct safe_buffer *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 if (device_info)
252 DO_STATS ( device_info->map_op_count++ );
253
Russell Kingdd3641f2011-07-03 22:39:43 +0100254 buf = alloc_safe_buffer(device_info, ptr, size, dir);
Russell Kingdfa322f2011-07-03 23:54:34 +0100255 if (buf == NULL) {
Russell Kingdd3641f2011-07-03 22:39:43 +0100256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257 __func__, ptr);
Marek Szyprowski553ac782012-02-29 14:45:28 +0100258 return DMA_ERROR_CODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
260
Russell Kingdd3641f2011-07-03 22:39:43 +0100261 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
262 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
263 buf->safe, buf->safe_dma_addr);
264
265 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
266 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
267 __func__, ptr, buf->safe, size);
268 memcpy(buf->safe, ptr, size);
269 }
270
271 return buf->safe_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
Russell Kingdd3641f2011-07-03 22:39:43 +0100274static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
Russell King3216a972008-09-25 22:23:31 +0100275 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
Russell Kingdd3641f2011-07-03 22:39:43 +0100277 BUG_ON(buf->size != size);
278 BUG_ON(buf->direction != dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Russell Kingdd3641f2011-07-03 22:39:43 +0100280 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
281 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
282 buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Russell Kingdd3641f2011-07-03 22:39:43 +0100284 DO_STATS(dev->archdata.dmabounce->bounce_count++);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Russell Kingdd3641f2011-07-03 22:39:43 +0100286 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
287 void *ptr = buf->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Russell Kingdd3641f2011-07-03 22:39:43 +0100289 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
290 __func__, buf->safe, ptr, size);
291 memcpy(ptr, buf->safe, size);
Russell King5abc1002005-06-20 12:31:14 +0100292
Russell Kingdd3641f2011-07-03 22:39:43 +0100293 /*
294 * Since we may have written to a page cache page,
295 * we need to ensure that the data will be coherent
296 * with user mappings.
297 */
298 __cpuc_flush_dcache_area(ptr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 }
Russell Kingdd3641f2011-07-03 22:39:43 +0100300 free_safe_buffer(dev->archdata.dmabounce, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
303/* ************************************************** */
304
305/*
306 * see if a buffer address is in an 'unsafe' range. if it is
307 * allocate a 'safe' buffer and copy the unsafe buffer into it.
308 * substitute the safe buffer for the unsafe one.
309 * (basically move the buffer from an unsafe area to a safe one)
310 */
Marek Szyprowski15237e12012-02-10 19:55:20 +0100311static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
312 unsigned long offset, size_t size, enum dma_data_direction dir,
313 struct dma_attrs *attrs)
Russell King56f55f82008-09-25 20:59:12 +0100314{
Russell Kingdd3641f2011-07-03 22:39:43 +0100315 dma_addr_t dma_addr;
316 int ret;
317
Russell King56f55f82008-09-25 20:59:12 +0100318 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
319 __func__, page, offset, size, dir);
320
Russell Kingdd3641f2011-07-03 22:39:43 +0100321 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
322
323 ret = needs_bounce(dev, dma_addr, size);
324 if (ret < 0)
Marek Szyprowski553ac782012-02-29 14:45:28 +0100325 return DMA_ERROR_CODE;
Russell Kingdd3641f2011-07-03 22:39:43 +0100326
327 if (ret == 0) {
Marek Szyprowski15237e12012-02-10 19:55:20 +0100328 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
Russell Kingdd3641f2011-07-03 22:39:43 +0100329 return dma_addr;
330 }
331
Nicolas Pitre58edb512008-09-09 15:54:13 -0400332 if (PageHighMem(page)) {
Russell Kingdd3641f2011-07-03 22:39:43 +0100333 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
Marek Szyprowski553ac782012-02-29 14:45:28 +0100334 return DMA_ERROR_CODE;
Nicolas Pitre58edb512008-09-09 15:54:13 -0400335 }
336
Russell King56f55f82008-09-25 20:59:12 +0100337 return map_single(dev, page_address(page) + offset, size, dir);
338}
Russell King56f55f82008-09-25 20:59:12 +0100339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340/*
341 * see if a mapped address was really a "safe" buffer and if so, copy
342 * the data from the safe buffer back to the unsafe buffer and free up
343 * the safe buffer. (basically return things back to the way they
344 * should be)
345 */
Marek Szyprowski15237e12012-02-10 19:55:20 +0100346static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
347 enum dma_data_direction dir, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
Russell Kingdd3641f2011-07-03 22:39:43 +0100349 struct safe_buffer *buf;
350
Russell Kingc289b2e2011-07-03 23:56:17 +0100351 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
352 __func__, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Russell Kingdd3641f2011-07-03 22:39:43 +0100354 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
355 if (!buf) {
Marek Szyprowski15237e12012-02-10 19:55:20 +0100356 arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
Russell Kingdd3641f2011-07-03 22:39:43 +0100357 return;
358 }
359
360 unmap_single(dev, buf, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361}
362
Marek Szyprowski15237e12012-02-10 19:55:20 +0100363static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100364 size_t sz, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365{
Russell King125ab122008-09-25 22:16:22 +0100366 struct safe_buffer *buf;
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100367 unsigned long off;
Russell King125ab122008-09-25 22:16:22 +0100368
Marek Szyprowskifdb11172012-06-13 14:04:58 +0200369 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
370 __func__, addr, sz, dir);
Russell King125ab122008-09-25 22:16:22 +0100371
372 buf = find_safe_buffer_dev(dev, addr, __func__);
373 if (!buf)
374 return 1;
375
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100376 off = addr - buf->safe_dma_addr;
377
Russell King0e18b5d2008-09-29 13:48:17 +0100378 BUG_ON(buf->direction != dir);
379
Marek Szyprowskifdb11172012-06-13 14:04:58 +0200380 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
381 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
Russell King125ab122008-09-25 22:16:22 +0100382 buf->safe, buf->safe_dma_addr);
383
384 DO_STATS(dev->archdata.dmabounce->bounce_count++);
385
386 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
387 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
388 __func__, buf->safe + off, buf->ptr + off, sz);
389 memcpy(buf->ptr + off, buf->safe + off, sz);
390 }
391 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Marek Szyprowski15237e12012-02-10 19:55:20 +0100394static void dmabounce_sync_for_cpu(struct device *dev,
395 dma_addr_t handle, size_t size, enum dma_data_direction dir)
396{
397 if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
398 return;
399
400 arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
401}
402
403static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100404 size_t sz, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Russell King125ab122008-09-25 22:16:22 +0100406 struct safe_buffer *buf;
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100407 unsigned long off;
Russell King125ab122008-09-25 22:16:22 +0100408
Marek Szyprowskifdb11172012-06-13 14:04:58 +0200409 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
410 __func__, addr, sz, dir);
Russell King125ab122008-09-25 22:16:22 +0100411
412 buf = find_safe_buffer_dev(dev, addr, __func__);
413 if (!buf)
414 return 1;
415
Marek Szyprowskia227fb92012-02-10 19:55:20 +0100416 off = addr - buf->safe_dma_addr;
417
Russell King0e18b5d2008-09-29 13:48:17 +0100418 BUG_ON(buf->direction != dir);
419
Marek Szyprowskifdb11172012-06-13 14:04:58 +0200420 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
421 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
Russell King125ab122008-09-25 22:16:22 +0100422 buf->safe, buf->safe_dma_addr);
423
424 DO_STATS(dev->archdata.dmabounce->bounce_count++);
425
426 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
427 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
428 __func__,buf->ptr + off, buf->safe + off, sz);
429 memcpy(buf->safe + off, buf->ptr + off, sz);
430 }
431 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
Marek Szyprowski15237e12012-02-10 19:55:20 +0100433
434static void dmabounce_sync_for_device(struct device *dev,
435 dma_addr_t handle, size_t size, enum dma_data_direction dir)
436{
437 if (!__dmabounce_sync_for_device(dev, handle, size, dir))
438 return;
439
440 arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
441}
442
443static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
444{
445 if (dev->archdata.dmabounce)
446 return 0;
447
448 return arm_dma_ops.set_dma_mask(dev, dma_mask);
449}
450
451static struct dma_map_ops dmabounce_ops = {
Marek Szyprowskif99d6032012-05-16 18:31:23 +0200452 .alloc = arm_dma_alloc,
453 .free = arm_dma_free,
454 .mmap = arm_dma_mmap,
Marek Szyprowskidc2832e2012-06-13 10:01:15 +0200455 .get_sgtable = arm_dma_get_sgtable,
Marek Szyprowski15237e12012-02-10 19:55:20 +0100456 .map_page = dmabounce_map_page,
457 .unmap_page = dmabounce_unmap_page,
458 .sync_single_for_cpu = dmabounce_sync_for_cpu,
459 .sync_single_for_device = dmabounce_sync_for_device,
460 .map_sg = arm_dma_map_sg,
461 .unmap_sg = arm_dma_unmap_sg,
462 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
463 .sync_sg_for_device = arm_dma_sync_sg_for_device,
464 .set_dma_mask = dmabounce_set_mask,
465};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Russell King3216a972008-09-25 22:23:31 +0100467static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
468 const char *name, unsigned long size)
Russell Kingcb7610d2005-10-30 21:12:08 +0000469{
470 pool->size = size;
471 DO_STATS(pool->allocs = 0);
472 pool->pool = dma_pool_create(name, dev, size,
473 0 /* byte alignment */,
474 0 /* no page-crossing issues */);
475
476 return pool->pool ? 0 : -ENOMEM;
477}
478
Russell King3216a972008-09-25 22:23:31 +0100479int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
Russell King0703ed22011-07-04 08:32:21 +0100480 unsigned long large_buffer_size,
481 int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
483 struct dmabounce_device_info *device_info;
Russell Kingcb7610d2005-10-30 21:12:08 +0000484 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
487 if (!device_info) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200488 dev_err(dev,
489 "Could not allocated dmabounce_device_info\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 return -ENOMEM;
491 }
492
Russell Kingcb7610d2005-10-30 21:12:08 +0000493 ret = dmabounce_init_pool(&device_info->small, dev,
494 "small_dmabounce_pool", small_buffer_size);
495 if (ret) {
496 dev_err(dev,
497 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
498 small_buffer_size);
499 goto err_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 }
501
502 if (large_buffer_size) {
Russell Kingcb7610d2005-10-30 21:12:08 +0000503 ret = dmabounce_init_pool(&device_info->large, dev,
504 "large_dmabounce_pool",
505 large_buffer_size);
506 if (ret) {
507 dev_err(dev,
508 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
509 large_buffer_size);
510 goto err_destroy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 }
512 }
513
514 device_info->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 INIT_LIST_HEAD(&device_info->safe_buffers);
Kevin Hilman823588c12006-06-22 22:27:14 +0100516 rwlock_init(&device_info->lock);
Russell King0703ed22011-07-04 08:32:21 +0100517 device_info->needs_bounce = needs_bounce_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 device_info->total_allocs = 0;
521 device_info->map_op_count = 0;
522 device_info->bounce_count = 0;
Russell King017cc022007-02-12 10:53:50 +0000523 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524#endif
525
Russell Kingab2c2152007-02-12 10:28:24 +0000526 dev->archdata.dmabounce = device_info;
Marek Szyprowski15237e12012-02-10 19:55:20 +0100527 set_dma_ops(dev, &dmabounce_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200529 dev_info(dev, "dmabounce: registered device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 return 0;
Russell Kingcb7610d2005-10-30 21:12:08 +0000532
533 err_destroy:
534 dma_pool_destroy(device_info->small.pool);
535 err_free:
536 kfree(device_info);
537 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538}
Russell King3216a972008-09-25 22:23:31 +0100539EXPORT_SYMBOL(dmabounce_register_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Russell King3216a972008-09-25 22:23:31 +0100541void dmabounce_unregister_dev(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
Russell Kingab2c2152007-02-12 10:28:24 +0000543 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
544
545 dev->archdata.dmabounce = NULL;
Marek Szyprowski15237e12012-02-10 19:55:20 +0100546 set_dma_ops(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
548 if (!device_info) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200549 dev_warn(dev,
550 "Never registered with dmabounce but attempting"
551 "to unregister!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 return;
553 }
554
555 if (!list_empty(&device_info->safe_buffers)) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200556 dev_err(dev,
557 "Removing from dmabounce with pending buffers!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 BUG();
559 }
560
Russell Kingcb7610d2005-10-30 21:12:08 +0000561 if (device_info->small.pool)
562 dma_pool_destroy(device_info->small.pool);
563 if (device_info->large.pool)
564 dma_pool_destroy(device_info->large.pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +0000567 if (device_info->attr_res == 0)
568 device_remove_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569#endif
570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 kfree(device_info);
572
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200573 dev_info(dev, "dmabounce: device unregistered\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575EXPORT_SYMBOL(dmabounce_unregister_dev);
576
577MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
578MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
579MODULE_LICENSE("GPL");