blob: b4a8759e0fa4213576ebabf633dbd2329d33dd14 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
Erik Hovland3a2916a2006-03-22 21:02:11 +00008 * RAM, the remainder of memory is at the top and the DMA memory
Simon Arlott6cbdc8c2007-05-11 20:40:30 +01009 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
Nicolas Pitre58edb512008-09-09 15:54:13 -040028#include <linux/page-flags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/list.h>
FUJITA Tomonori9f2326b2007-10-23 09:11:41 +020033#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Russell King14eb75b2005-06-20 16:56:08 +010035#include <asm/cacheflush.h>
36
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#undef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +000038
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#ifdef STATS
40#define DO_STATS(X) do { X ; } while (0)
41#else
42#define DO_STATS(X) do { } while (0)
43#endif
44
45/* ************************************************** */
46
47struct safe_buffer {
48 struct list_head node;
49
50 /* original request */
51 void *ptr;
52 size_t size;
53 int direction;
54
55 /* safe buffer info */
Russell Kingcb7610d2005-10-30 21:12:08 +000056 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 void *safe;
58 dma_addr_t safe_dma_addr;
59};
60
Russell Kingcb7610d2005-10-30 21:12:08 +000061struct dmabounce_pool {
62 unsigned long size;
63 struct dma_pool *pool;
64#ifdef STATS
65 unsigned long allocs;
66#endif
67};
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069struct dmabounce_device_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 struct list_head safe_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 unsigned long total_allocs;
74 unsigned long map_op_count;
75 unsigned long bounce_count;
Russell King017cc022007-02-12 10:53:50 +000076 int attr_res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#endif
Russell Kingcb7610d2005-10-30 21:12:08 +000078 struct dmabounce_pool small;
79 struct dmabounce_pool large;
Kevin Hilman823588c12006-06-22 22:27:14 +010080
81 rwlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082};
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +000085static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
86 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Russell King017cc022007-02-12 10:53:50 +000088 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
89 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
90 device_info->small.allocs,
91 device_info->large.allocs,
Russell Kingcb7610d2005-10-30 21:12:08 +000092 device_info->total_allocs - device_info->small.allocs -
93 device_info->large.allocs,
Russell King017cc022007-02-12 10:53:50 +000094 device_info->total_allocs,
95 device_info->map_op_count,
96 device_info->bounce_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
Russell King017cc022007-02-12 10:53:50 +000098
99static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#endif
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103/* allocate a 'safe' buffer and keep track of it */
104static inline struct safe_buffer *
105alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
Russell Kingcb7610d2005-10-30 21:12:08 +0000106 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107{
108 struct safe_buffer *buf;
Russell Kingcb7610d2005-10-30 21:12:08 +0000109 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 struct device *dev = device_info->dev;
Kevin Hilman823588c12006-06-22 22:27:14 +0100111 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
114 __func__, ptr, size, dir);
115
Russell Kingcb7610d2005-10-30 21:12:08 +0000116 if (size <= device_info->small.size) {
117 pool = &device_info->small;
118 } else if (size <= device_info->large.size) {
119 pool = &device_info->large;
120 } else {
121 pool = NULL;
122 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
125 if (buf == NULL) {
126 dev_warn(dev, "%s: kmalloc failed\n", __func__);
127 return NULL;
128 }
129
Russell Kingcb7610d2005-10-30 21:12:08 +0000130 buf->ptr = ptr;
131 buf->size = size;
132 buf->direction = dir;
133 buf->pool = pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Russell Kingcb7610d2005-10-30 21:12:08 +0000135 if (pool) {
136 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
137 &buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 } else {
Russell Kingcb7610d2005-10-30 21:12:08 +0000139 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
140 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 }
142
Russell Kingcb7610d2005-10-30 21:12:08 +0000143 if (buf->safe == NULL) {
144 dev_warn(dev,
145 "%s: could not alloc dma memory (size=%d)\n",
146 __func__, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 kfree(buf);
148 return NULL;
149 }
150
151#ifdef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +0000152 if (pool)
153 pool->allocs++;
154 device_info->total_allocs++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155#endif
156
Kevin Hilman823588c12006-06-22 22:27:14 +0100157 write_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 list_add(&buf->node, &device_info->safe_buffers);
Kevin Hilman823588c12006-06-22 22:27:14 +0100159 write_unlock_irqrestore(&device_info->lock, flags);
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return buf;
162}
163
164/* determine if a buffer is from our "safe" pool */
165static inline struct safe_buffer *
166find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
167{
Kevin Hilmane2785f02006-08-18 15:32:14 +0100168 struct safe_buffer *b, *rb = NULL;
Kevin Hilman823588c12006-06-22 22:27:14 +0100169 unsigned long flags;
170
171 read_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Russell Kingb46a58f2005-06-22 21:25:58 +0100173 list_for_each_entry(b, &device_info->safe_buffers, node)
Kevin Hilmane2785f02006-08-18 15:32:14 +0100174 if (b->safe_dma_addr == safe_dma_addr) {
175 rb = b;
Kevin Hilman823588c12006-06-22 22:27:14 +0100176 break;
Kevin Hilmane2785f02006-08-18 15:32:14 +0100177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Kevin Hilman823588c12006-06-22 22:27:14 +0100179 read_unlock_irqrestore(&device_info->lock, flags);
Kevin Hilmane2785f02006-08-18 15:32:14 +0100180 return rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static inline void
184free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
185{
Kevin Hilman823588c12006-06-22 22:27:14 +0100186 unsigned long flags;
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
189
Kevin Hilman823588c12006-06-22 22:27:14 +0100190 write_lock_irqsave(&device_info->lock, flags);
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 list_del(&buf->node);
193
Kevin Hilman823588c12006-06-22 22:27:14 +0100194 write_unlock_irqrestore(&device_info->lock, flags);
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (buf->pool)
Russell Kingcb7610d2005-10-30 21:12:08 +0000197 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 else
199 dma_free_coherent(device_info->dev, buf->size, buf->safe,
200 buf->safe_dma_addr);
201
202 kfree(buf);
203}
204
205/* ************************************************** */
206
Russell King125ab122008-09-25 22:16:22 +0100207static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
208 dma_addr_t dma_addr, const char *where)
209{
210 if (!dev || !dev->archdata.dmabounce)
211 return NULL;
212 if (dma_mapping_error(dev, dma_addr)) {
Russell Kinge2f521e2011-07-03 23:53:13 +0100213 dev_err(dev, "Trying to %s invalid mapping\n", where);
Russell King125ab122008-09-25 22:16:22 +0100214 return NULL;
215 }
216 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
217}
218
Russell King23bc9872011-07-03 22:28:32 +0100219static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
220{
221 if (!dev || !dev->archdata.dmabounce)
222 return 0;
223
224 if (dev->dma_mask) {
225 unsigned long limit, mask = *dev->dma_mask;
226
227 limit = (mask + 1) & ~mask;
228 if (limit && size > limit) {
229 dev_err(dev, "DMA mapping too big (requested %#x "
230 "mask %#Lx)\n", size, *dev->dma_mask);
231 return -E2BIG;
232 }
233
234 /* Figure out if we need to bounce from the DMA mask. */
235 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
236 return 1;
237 }
238
239 return dma_needs_bounce(dev, dma_addr, size) ? 1 : 0;
240}
241
Russell King3216a972008-09-25 22:23:31 +0100242static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 enum dma_data_direction dir)
244{
Russell Kingab2c2152007-02-12 10:28:24 +0000245 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Russell Kingdd3641f2011-07-03 22:39:43 +0100246 struct safe_buffer *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 if (device_info)
249 DO_STATS ( device_info->map_op_count++ );
250
Russell Kingdd3641f2011-07-03 22:39:43 +0100251 buf = alloc_safe_buffer(device_info, ptr, size, dir);
Russell Kingdfa322f2011-07-03 23:54:34 +0100252 if (buf == NULL) {
Russell Kingdd3641f2011-07-03 22:39:43 +0100253 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
254 __func__, ptr);
Russell King23bc9872011-07-03 22:28:32 +0100255 return ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257
Russell Kingdd3641f2011-07-03 22:39:43 +0100258 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
259 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
260 buf->safe, buf->safe_dma_addr);
261
262 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
263 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
264 __func__, ptr, buf->safe, size);
265 memcpy(buf->safe, ptr, size);
266 }
267
268 return buf->safe_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Russell Kingdd3641f2011-07-03 22:39:43 +0100271static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
Russell King3216a972008-09-25 22:23:31 +0100272 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
Russell Kingdd3641f2011-07-03 22:39:43 +0100274 BUG_ON(buf->size != size);
275 BUG_ON(buf->direction != dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Russell Kingdd3641f2011-07-03 22:39:43 +0100277 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
278 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
279 buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Russell Kingdd3641f2011-07-03 22:39:43 +0100281 DO_STATS(dev->archdata.dmabounce->bounce_count++);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Russell Kingdd3641f2011-07-03 22:39:43 +0100283 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
284 void *ptr = buf->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Russell Kingdd3641f2011-07-03 22:39:43 +0100286 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
287 __func__, buf->safe, ptr, size);
288 memcpy(ptr, buf->safe, size);
Russell King5abc1002005-06-20 12:31:14 +0100289
Russell Kingdd3641f2011-07-03 22:39:43 +0100290 /*
291 * Since we may have written to a page cache page,
292 * we need to ensure that the data will be coherent
293 * with user mappings.
294 */
295 __cpuc_flush_dcache_area(ptr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 }
Russell Kingdd3641f2011-07-03 22:39:43 +0100297 free_safe_buffer(dev->archdata.dmabounce, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300/* ************************************************** */
301
302/*
303 * see if a buffer address is in an 'unsafe' range. if it is
304 * allocate a 'safe' buffer and copy the unsafe buffer into it.
305 * substitute the safe buffer for the unsafe one.
306 * (basically move the buffer from an unsafe area to a safe one)
307 */
Russell King24056f52011-01-03 11:29:28 +0000308dma_addr_t __dma_map_page(struct device *dev, struct page *page,
Russell King3216a972008-09-25 22:23:31 +0100309 unsigned long offset, size_t size, enum dma_data_direction dir)
Russell King56f55f82008-09-25 20:59:12 +0100310{
Russell Kingdd3641f2011-07-03 22:39:43 +0100311 dma_addr_t dma_addr;
312 int ret;
313
Russell King56f55f82008-09-25 20:59:12 +0100314 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
315 __func__, page, offset, size, dir);
316
Russell Kingdd3641f2011-07-03 22:39:43 +0100317 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
318
319 ret = needs_bounce(dev, dma_addr, size);
320 if (ret < 0)
321 return ~0;
322
323 if (ret == 0) {
324 __dma_page_cpu_to_dev(page, offset, size, dir);
325 return dma_addr;
326 }
327
Nicolas Pitre58edb512008-09-09 15:54:13 -0400328 if (PageHighMem(page)) {
Russell Kingdd3641f2011-07-03 22:39:43 +0100329 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
Nicolas Pitre58edb512008-09-09 15:54:13 -0400330 return ~0;
331 }
332
Russell King56f55f82008-09-25 20:59:12 +0100333 return map_single(dev, page_address(page) + offset, size, dir);
334}
Russell King24056f52011-01-03 11:29:28 +0000335EXPORT_SYMBOL(__dma_map_page);
Russell King56f55f82008-09-25 20:59:12 +0100336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337/*
338 * see if a mapped address was really a "safe" buffer and if so, copy
339 * the data from the safe buffer back to the unsafe buffer and free up
340 * the safe buffer. (basically return things back to the way they
341 * should be)
342 */
Russell King24056f52011-01-03 11:29:28 +0000343void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
Russell King3216a972008-09-25 22:23:31 +0100344 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
Russell Kingdd3641f2011-07-03 22:39:43 +0100346 struct safe_buffer *buf;
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
349 __func__, (void *) dma_addr, size, dir);
350
Russell Kingdd3641f2011-07-03 22:39:43 +0100351 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
352 if (!buf) {
353 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
354 dma_addr & ~PAGE_MASK, size, dir);
355 return;
356 }
357
358 unmap_single(dev, buf, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
Russell King24056f52011-01-03 11:29:28 +0000360EXPORT_SYMBOL(__dma_unmap_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
Russell King2638b4d2008-09-25 21:38:41 +0100362int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
363 unsigned long off, size_t sz, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
Russell King125ab122008-09-25 22:16:22 +0100365 struct safe_buffer *buf;
366
367 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
Russell King2638b4d2008-09-25 21:38:41 +0100368 __func__, addr, off, sz, dir);
Russell King125ab122008-09-25 22:16:22 +0100369
370 buf = find_safe_buffer_dev(dev, addr, __func__);
371 if (!buf)
372 return 1;
373
Russell King0e18b5d2008-09-29 13:48:17 +0100374 BUG_ON(buf->direction != dir);
375
Russell King125ab122008-09-25 22:16:22 +0100376 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
377 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
378 buf->safe, buf->safe_dma_addr);
379
380 DO_STATS(dev->archdata.dmabounce->bounce_count++);
381
382 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
383 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
384 __func__, buf->safe + off, buf->ptr + off, sz);
385 memcpy(buf->ptr + off, buf->safe + off, sz);
386 }
387 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
Russell King2638b4d2008-09-25 21:38:41 +0100389EXPORT_SYMBOL(dmabounce_sync_for_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Russell King2638b4d2008-09-25 21:38:41 +0100391int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
392 unsigned long off, size_t sz, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Russell King125ab122008-09-25 22:16:22 +0100394 struct safe_buffer *buf;
395
396 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
Russell King2638b4d2008-09-25 21:38:41 +0100397 __func__, addr, off, sz, dir);
Russell King125ab122008-09-25 22:16:22 +0100398
399 buf = find_safe_buffer_dev(dev, addr, __func__);
400 if (!buf)
401 return 1;
402
Russell King0e18b5d2008-09-29 13:48:17 +0100403 BUG_ON(buf->direction != dir);
404
Russell King125ab122008-09-25 22:16:22 +0100405 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
406 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
407 buf->safe, buf->safe_dma_addr);
408
409 DO_STATS(dev->archdata.dmabounce->bounce_count++);
410
411 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
412 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
413 __func__,buf->ptr + off, buf->safe + off, sz);
414 memcpy(buf->safe + off, buf->ptr + off, sz);
415 }
416 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
Russell King2638b4d2008-09-25 21:38:41 +0100418EXPORT_SYMBOL(dmabounce_sync_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Russell King3216a972008-09-25 22:23:31 +0100420static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
421 const char *name, unsigned long size)
Russell Kingcb7610d2005-10-30 21:12:08 +0000422{
423 pool->size = size;
424 DO_STATS(pool->allocs = 0);
425 pool->pool = dma_pool_create(name, dev, size,
426 0 /* byte alignment */,
427 0 /* no page-crossing issues */);
428
429 return pool->pool ? 0 : -ENOMEM;
430}
431
Russell King3216a972008-09-25 22:23:31 +0100432int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
433 unsigned long large_buffer_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 struct dmabounce_device_info *device_info;
Russell Kingcb7610d2005-10-30 21:12:08 +0000436 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
439 if (!device_info) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200440 dev_err(dev,
441 "Could not allocated dmabounce_device_info\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 return -ENOMEM;
443 }
444
Russell Kingcb7610d2005-10-30 21:12:08 +0000445 ret = dmabounce_init_pool(&device_info->small, dev,
446 "small_dmabounce_pool", small_buffer_size);
447 if (ret) {
448 dev_err(dev,
449 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
450 small_buffer_size);
451 goto err_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 }
453
454 if (large_buffer_size) {
Russell Kingcb7610d2005-10-30 21:12:08 +0000455 ret = dmabounce_init_pool(&device_info->large, dev,
456 "large_dmabounce_pool",
457 large_buffer_size);
458 if (ret) {
459 dev_err(dev,
460 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
461 large_buffer_size);
462 goto err_destroy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464 }
465
466 device_info->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 INIT_LIST_HEAD(&device_info->safe_buffers);
Kevin Hilman823588c12006-06-22 22:27:14 +0100468 rwlock_init(&device_info->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 device_info->total_allocs = 0;
472 device_info->map_op_count = 0;
473 device_info->bounce_count = 0;
Russell King017cc022007-02-12 10:53:50 +0000474 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475#endif
476
Russell Kingab2c2152007-02-12 10:28:24 +0000477 dev->archdata.dmabounce = device_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200479 dev_info(dev, "dmabounce: registered device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 return 0;
Russell Kingcb7610d2005-10-30 21:12:08 +0000482
483 err_destroy:
484 dma_pool_destroy(device_info->small.pool);
485 err_free:
486 kfree(device_info);
487 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
Russell King3216a972008-09-25 22:23:31 +0100489EXPORT_SYMBOL(dmabounce_register_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Russell King3216a972008-09-25 22:23:31 +0100491void dmabounce_unregister_dev(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492{
Russell Kingab2c2152007-02-12 10:28:24 +0000493 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
494
495 dev->archdata.dmabounce = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 if (!device_info) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200498 dev_warn(dev,
499 "Never registered with dmabounce but attempting"
500 "to unregister!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 return;
502 }
503
504 if (!list_empty(&device_info->safe_buffers)) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200505 dev_err(dev,
506 "Removing from dmabounce with pending buffers!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 BUG();
508 }
509
Russell Kingcb7610d2005-10-30 21:12:08 +0000510 if (device_info->small.pool)
511 dma_pool_destroy(device_info->small.pool);
512 if (device_info->large.pool)
513 dma_pool_destroy(device_info->large.pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
515#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +0000516 if (device_info->attr_res == 0)
517 device_remove_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518#endif
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 kfree(device_info);
521
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200522 dev_info(dev, "dmabounce: device unregistered\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524EXPORT_SYMBOL(dmabounce_unregister_dev);
525
526MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
527MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
528MODULE_LICENSE("GPL");