blob: d4b0c608fdeea5784a8767516893ecdad72c7495 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
Erik Hovland3a2916a2006-03-22 21:02:11 +00008 * RAM, the remainder of memory is at the top and the DMA memory
Simon Arlott6cbdc8c2007-05-11 20:40:30 +01009 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/device.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h>
31#include <linux/list.h>
FUJITA Tomonori9f2326b2007-10-23 09:11:41 +020032#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Russell King14eb75b2005-06-20 16:56:08 +010034#include <asm/cacheflush.h>
35
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#undef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +000037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#ifdef STATS
39#define DO_STATS(X) do { X ; } while (0)
40#else
41#define DO_STATS(X) do { } while (0)
42#endif
43
44/* ************************************************** */
45
46struct safe_buffer {
47 struct list_head node;
48
49 /* original request */
50 void *ptr;
51 size_t size;
52 int direction;
53
54 /* safe buffer info */
Russell Kingcb7610d2005-10-30 21:12:08 +000055 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 void *safe;
57 dma_addr_t safe_dma_addr;
58};
59
Russell Kingcb7610d2005-10-30 21:12:08 +000060struct dmabounce_pool {
61 unsigned long size;
62 struct dma_pool *pool;
63#ifdef STATS
64 unsigned long allocs;
65#endif
66};
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068struct dmabounce_device_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 struct list_head safe_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 unsigned long total_allocs;
73 unsigned long map_op_count;
74 unsigned long bounce_count;
Russell King017cc022007-02-12 10:53:50 +000075 int attr_res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#endif
Russell Kingcb7610d2005-10-30 21:12:08 +000077 struct dmabounce_pool small;
78 struct dmabounce_pool large;
Kevin Hilman823588c2006-06-22 22:27:14 +010079
80 rwlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081};
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +000084static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
85 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
Russell King017cc022007-02-12 10:53:50 +000087 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
88 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
89 device_info->small.allocs,
90 device_info->large.allocs,
Russell Kingcb7610d2005-10-30 21:12:08 +000091 device_info->total_allocs - device_info->small.allocs -
92 device_info->large.allocs,
Russell King017cc022007-02-12 10:53:50 +000093 device_info->total_allocs,
94 device_info->map_op_count,
95 device_info->bounce_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
Russell King017cc022007-02-12 10:53:50 +000097
98static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099#endif
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102/* allocate a 'safe' buffer and keep track of it */
103static inline struct safe_buffer *
104alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
Russell Kingcb7610d2005-10-30 21:12:08 +0000105 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
107 struct safe_buffer *buf;
Russell Kingcb7610d2005-10-30 21:12:08 +0000108 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 struct device *dev = device_info->dev;
Kevin Hilman823588c2006-06-22 22:27:14 +0100110 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
113 __func__, ptr, size, dir);
114
Russell Kingcb7610d2005-10-30 21:12:08 +0000115 if (size <= device_info->small.size) {
116 pool = &device_info->small;
117 } else if (size <= device_info->large.size) {
118 pool = &device_info->large;
119 } else {
120 pool = NULL;
121 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
124 if (buf == NULL) {
125 dev_warn(dev, "%s: kmalloc failed\n", __func__);
126 return NULL;
127 }
128
Russell Kingcb7610d2005-10-30 21:12:08 +0000129 buf->ptr = ptr;
130 buf->size = size;
131 buf->direction = dir;
132 buf->pool = pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Russell Kingcb7610d2005-10-30 21:12:08 +0000134 if (pool) {
135 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
136 &buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 } else {
Russell Kingcb7610d2005-10-30 21:12:08 +0000138 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
139 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 }
141
Russell Kingcb7610d2005-10-30 21:12:08 +0000142 if (buf->safe == NULL) {
143 dev_warn(dev,
144 "%s: could not alloc dma memory (size=%d)\n",
145 __func__, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 kfree(buf);
147 return NULL;
148 }
149
150#ifdef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +0000151 if (pool)
152 pool->allocs++;
153 device_info->total_allocs++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154#endif
155
Kevin Hilman823588c2006-06-22 22:27:14 +0100156 write_lock_irqsave(&device_info->lock, flags);
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 list_add(&buf->node, &device_info->safe_buffers);
159
Kevin Hilman823588c2006-06-22 22:27:14 +0100160 write_unlock_irqrestore(&device_info->lock, flags);
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return buf;
163}
164
165/* determine if a buffer is from our "safe" pool */
166static inline struct safe_buffer *
167find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
168{
Kevin Hilmane2785f02006-08-18 15:32:14 +0100169 struct safe_buffer *b, *rb = NULL;
Kevin Hilman823588c2006-06-22 22:27:14 +0100170 unsigned long flags;
171
172 read_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Russell Kingb46a58fd2005-06-22 21:25:58 +0100174 list_for_each_entry(b, &device_info->safe_buffers, node)
Kevin Hilmane2785f02006-08-18 15:32:14 +0100175 if (b->safe_dma_addr == safe_dma_addr) {
176 rb = b;
Kevin Hilman823588c2006-06-22 22:27:14 +0100177 break;
Kevin Hilmane2785f02006-08-18 15:32:14 +0100178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Kevin Hilman823588c2006-06-22 22:27:14 +0100180 read_unlock_irqrestore(&device_info->lock, flags);
Kevin Hilmane2785f02006-08-18 15:32:14 +0100181 return rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
184static inline void
185free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
186{
Kevin Hilman823588c2006-06-22 22:27:14 +0100187 unsigned long flags;
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
190
Kevin Hilman823588c2006-06-22 22:27:14 +0100191 write_lock_irqsave(&device_info->lock, flags);
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 list_del(&buf->node);
194
Kevin Hilman823588c2006-06-22 22:27:14 +0100195 write_unlock_irqrestore(&device_info->lock, flags);
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if (buf->pool)
Russell Kingcb7610d2005-10-30 21:12:08 +0000198 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 else
200 dma_free_coherent(device_info->dev, buf->size, buf->safe,
201 buf->safe_dma_addr);
202
203 kfree(buf);
204}
205
206/* ************************************************** */
207
Russell King125ab122008-09-25 22:16:22 +0100208static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
209 dma_addr_t dma_addr, const char *where)
210{
211 if (!dev || !dev->archdata.dmabounce)
212 return NULL;
213 if (dma_mapping_error(dev, dma_addr)) {
214 if (dev)
215 dev_err(dev, "Trying to %s invalid mapping\n", where);
216 else
217 pr_err("unknown device: Trying to %s invalid mapping\n", where);
218 return NULL;
219 }
220 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223static inline dma_addr_t
224map_single(struct device *dev, void *ptr, size_t size,
225 enum dma_data_direction dir)
226{
Russell Kingab2c2152007-02-12 10:28:24 +0000227 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 dma_addr_t dma_addr;
229 int needs_bounce = 0;
230
231 if (device_info)
232 DO_STATS ( device_info->map_op_count++ );
233
234 dma_addr = virt_to_dma(dev, ptr);
235
236 if (dev->dma_mask) {
237 unsigned long mask = *dev->dma_mask;
238 unsigned long limit;
239
240 limit = (mask + 1) & ~mask;
241 if (limit && size > limit) {
242 dev_err(dev, "DMA mapping too big (requested %#x "
243 "mask %#Lx)\n", size, *dev->dma_mask);
244 return ~0;
245 }
246
247 /*
248 * Figure out if we need to bounce from the DMA mask.
249 */
250 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
251 }
252
253 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
254 struct safe_buffer *buf;
255
256 buf = alloc_safe_buffer(device_info, ptr, size, dir);
257 if (buf == 0) {
258 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
259 __func__, ptr);
260 return 0;
261 }
262
263 dev_dbg(dev,
Russell King98ed7d42008-08-10 12:10:49 +0100264 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
265 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
266 buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 if ((dir == DMA_TO_DEVICE) ||
269 (dir == DMA_BIDIRECTIONAL)) {
270 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
271 __func__, ptr, buf->safe, size);
272 memcpy(buf->safe, ptr, size);
273 }
Russell Kingcb7610d2005-10-30 21:12:08 +0000274 ptr = buf->safe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276 dma_addr = buf->safe_dma_addr;
Russell King7f8e3352007-02-06 17:29:53 +0000277 } else {
278 /*
279 * We don't need to sync the DMA buffer since
280 * it was allocated via the coherent allocators.
281 */
Russell King84aa4622007-10-09 14:17:01 +0100282 dma_cache_maint(ptr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 }
284
285 return dma_addr;
286}
287
288static inline void
289unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
290 enum dma_data_direction dir)
291{
Russell King125ab122008-09-25 22:16:22 +0100292 struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294 if (buf) {
295 BUG_ON(buf->size != size);
296
297 dev_dbg(dev,
Russell King98ed7d42008-08-10 12:10:49 +0100298 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
299 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
300 buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Russell King125ab122008-09-25 22:16:22 +0100302 DO_STATS(dev->archdata.dmabounce->bounce_count++);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Russell King5abc1002005-06-20 12:31:14 +0100304 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
Russell King7ae5a762007-02-06 17:39:31 +0000305 void *ptr = buf->ptr;
Russell King5abc1002005-06-20 12:31:14 +0100306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 dev_dbg(dev,
308 "%s: copy back safe %p to unsafe %p size %d\n",
Russell King7ae5a762007-02-06 17:39:31 +0000309 __func__, buf->safe, ptr, size);
310 memcpy(ptr, buf->safe, size);
Russell King5abc1002005-06-20 12:31:14 +0100311
312 /*
313 * DMA buffers must have the same cache properties
314 * as if they were really used for DMA - which means
315 * data must be written back to RAM. Note that
316 * we don't use dmac_flush_range() here for the
317 * bidirectional case because we know the cache
318 * lines will be coherent with the data written.
319 */
Russell King5abc1002005-06-20 12:31:14 +0100320 dmac_clean_range(ptr, ptr + size);
Catalin Marinas953233d2007-02-05 14:48:08 +0100321 outer_clean_range(__pa(ptr), __pa(ptr) + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 }
Russell King125ab122008-09-25 22:16:22 +0100323 free_safe_buffer(dev->archdata.dmabounce, buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 }
325}
326
327/* ************************************************** */
328
329/*
330 * see if a buffer address is in an 'unsafe' range. if it is
331 * allocate a 'safe' buffer and copy the unsafe buffer into it.
332 * substitute the safe buffer for the unsafe one.
333 * (basically move the buffer from an unsafe area to a safe one)
334 */
335dma_addr_t
336dma_map_single(struct device *dev, void *ptr, size_t size,
337 enum dma_data_direction dir)
338{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 dma_addr_t dma_addr;
340
341 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
342 __func__, ptr, size, dir);
343
344 BUG_ON(dir == DMA_NONE);
345
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 dma_addr = map_single(dev, ptr, size, dir);
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return dma_addr;
349}
350
Russell King56f55f82008-09-25 20:59:12 +0100351dma_addr_t dma_map_page(struct device *dev, struct page *page,
352 unsigned long offset, size_t size,
353 enum dma_data_direction dir)
354{
355 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
356 __func__, page, offset, size, dir);
357
358 BUG_ON(dir == DMA_NONE);
359
360 return map_single(dev, page_address(page) + offset, size, dir);
361}
362EXPORT_SYMBOL(dma_map_page);
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/*
365 * see if a mapped address was really a "safe" buffer and if so, copy
366 * the data from the safe buffer back to the unsafe buffer and free up
367 * the safe buffer. (basically return things back to the way they
368 * should be)
369 */
370
371void
372dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
373 enum dma_data_direction dir)
374{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
376 __func__, (void *) dma_addr, size, dir);
377
378 BUG_ON(dir == DMA_NONE);
379
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 unmap_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Russell King2638b4d2008-09-25 21:38:41 +0100383int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
384 unsigned long off, size_t sz, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
Russell King125ab122008-09-25 22:16:22 +0100386 struct safe_buffer *buf;
387
388 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
Russell King2638b4d2008-09-25 21:38:41 +0100389 __func__, addr, off, sz, dir);
Russell King125ab122008-09-25 22:16:22 +0100390
391 buf = find_safe_buffer_dev(dev, addr, __func__);
392 if (!buf)
393 return 1;
394
395 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
396 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
397 buf->safe, buf->safe_dma_addr);
398
399 DO_STATS(dev->archdata.dmabounce->bounce_count++);
400
401 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
402 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
403 __func__, buf->safe + off, buf->ptr + off, sz);
404 memcpy(buf->ptr + off, buf->safe + off, sz);
405 }
406 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
Russell King2638b4d2008-09-25 21:38:41 +0100408EXPORT_SYMBOL(dmabounce_sync_for_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Russell King2638b4d2008-09-25 21:38:41 +0100410int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
411 unsigned long off, size_t sz, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
Russell King125ab122008-09-25 22:16:22 +0100413 struct safe_buffer *buf;
414
415 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
Russell King2638b4d2008-09-25 21:38:41 +0100416 __func__, addr, off, sz, dir);
Russell King125ab122008-09-25 22:16:22 +0100417
418 buf = find_safe_buffer_dev(dev, addr, __func__);
419 if (!buf)
420 return 1;
421
422 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
423 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
424 buf->safe, buf->safe_dma_addr);
425
426 DO_STATS(dev->archdata.dmabounce->bounce_count++);
427
428 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
429 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
430 __func__,buf->ptr + off, buf->safe + off, sz);
431 memcpy(buf->safe + off, buf->ptr + off, sz);
432 }
433 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
Russell King2638b4d2008-09-25 21:38:41 +0100435EXPORT_SYMBOL(dmabounce_sync_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Russell Kingcb7610d2005-10-30 21:12:08 +0000437static int
438dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
439 unsigned long size)
440{
441 pool->size = size;
442 DO_STATS(pool->allocs = 0);
443 pool->pool = dma_pool_create(name, dev, size,
444 0 /* byte alignment */,
445 0 /* no page-crossing issues */);
446
447 return pool->pool ? 0 : -ENOMEM;
448}
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450int
451dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
452 unsigned long large_buffer_size)
453{
454 struct dmabounce_device_info *device_info;
Russell Kingcb7610d2005-10-30 21:12:08 +0000455 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
458 if (!device_info) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200459 dev_err(dev,
460 "Could not allocated dmabounce_device_info\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 return -ENOMEM;
462 }
463
Russell Kingcb7610d2005-10-30 21:12:08 +0000464 ret = dmabounce_init_pool(&device_info->small, dev,
465 "small_dmabounce_pool", small_buffer_size);
466 if (ret) {
467 dev_err(dev,
468 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
469 small_buffer_size);
470 goto err_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 }
472
473 if (large_buffer_size) {
Russell Kingcb7610d2005-10-30 21:12:08 +0000474 ret = dmabounce_init_pool(&device_info->large, dev,
475 "large_dmabounce_pool",
476 large_buffer_size);
477 if (ret) {
478 dev_err(dev,
479 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
480 large_buffer_size);
481 goto err_destroy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 }
483 }
484
485 device_info->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 INIT_LIST_HEAD(&device_info->safe_buffers);
Kevin Hilman823588c2006-06-22 22:27:14 +0100487 rwlock_init(&device_info->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 device_info->total_allocs = 0;
491 device_info->map_op_count = 0;
492 device_info->bounce_count = 0;
Russell King017cc022007-02-12 10:53:50 +0000493 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494#endif
495
Russell Kingab2c2152007-02-12 10:28:24 +0000496 dev->archdata.dmabounce = device_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200498 dev_info(dev, "dmabounce: registered device\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 return 0;
Russell Kingcb7610d2005-10-30 21:12:08 +0000501
502 err_destroy:
503 dma_pool_destroy(device_info->small.pool);
504 err_free:
505 kfree(device_info);
506 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507}
508
509void
510dmabounce_unregister_dev(struct device *dev)
511{
Russell Kingab2c2152007-02-12 10:28:24 +0000512 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
513
514 dev->archdata.dmabounce = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
516 if (!device_info) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200517 dev_warn(dev,
518 "Never registered with dmabounce but attempting"
519 "to unregister!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 return;
521 }
522
523 if (!list_empty(&device_info->safe_buffers)) {
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200524 dev_err(dev,
525 "Removing from dmabounce with pending buffers!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 BUG();
527 }
528
Russell Kingcb7610d2005-10-30 21:12:08 +0000529 if (device_info->small.pool)
530 dma_pool_destroy(device_info->small.pool);
531 if (device_info->large.pool)
532 dma_pool_destroy(device_info->large.pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +0000535 if (device_info->attr_res == 0)
536 device_remove_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537#endif
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 kfree(device_info);
540
Greg Kroah-Hartmanfc3a8822008-05-02 06:02:41 +0200541 dev_info(dev, "dmabounce: device unregistered\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
544
545EXPORT_SYMBOL(dma_map_single);
546EXPORT_SYMBOL(dma_unmap_single);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547EXPORT_SYMBOL(dmabounce_register_dev);
548EXPORT_SYMBOL(dmabounce_unregister_dev);
549
550MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
551MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
552MODULE_LICENSE("GPL");