blob: 4d5b031ac745eb9437fbf9e8df575fa9a01cbca1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
Erik Hovland3a2916a2006-03-22 21:02:11 +00008 * RAM, the remainder of memory is at the top and the DMA memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/device.h>
29#include <linux/dma-mapping.h>
30#include <linux/dmapool.h>
31#include <linux/list.h>
32
Russell King14eb75b2005-06-20 16:56:08 +010033#include <asm/cacheflush.h>
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#undef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +000036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#ifdef STATS
38#define DO_STATS(X) do { X ; } while (0)
39#else
40#define DO_STATS(X) do { } while (0)
41#endif
42
43/* ************************************************** */
44
45struct safe_buffer {
46 struct list_head node;
47
48 /* original request */
49 void *ptr;
50 size_t size;
51 int direction;
52
53 /* safe buffer info */
Russell Kingcb7610d2005-10-30 21:12:08 +000054 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 void *safe;
56 dma_addr_t safe_dma_addr;
57};
58
Russell Kingcb7610d2005-10-30 21:12:08 +000059struct dmabounce_pool {
60 unsigned long size;
61 struct dma_pool *pool;
62#ifdef STATS
63 unsigned long allocs;
64#endif
65};
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067struct dmabounce_device_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 struct list_head safe_buffers;
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 unsigned long total_allocs;
72 unsigned long map_op_count;
73 unsigned long bounce_count;
Russell King017cc022007-02-12 10:53:50 +000074 int attr_res;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#endif
Russell Kingcb7610d2005-10-30 21:12:08 +000076 struct dmabounce_pool small;
77 struct dmabounce_pool large;
Kevin Hilman823588c12006-06-22 22:27:14 +010078
79 rwlock_t lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -070080};
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +000083static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
84 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070085{
Russell King017cc022007-02-12 10:53:50 +000086 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
87 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
88 device_info->small.allocs,
89 device_info->large.allocs,
Russell Kingcb7610d2005-10-30 21:12:08 +000090 device_info->total_allocs - device_info->small.allocs -
91 device_info->large.allocs,
Russell King017cc022007-02-12 10:53:50 +000092 device_info->total_allocs,
93 device_info->map_op_count,
94 device_info->bounce_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
Russell King017cc022007-02-12 10:53:50 +000096
97static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098#endif
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
101/* allocate a 'safe' buffer and keep track of it */
102static inline struct safe_buffer *
103alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
Russell Kingcb7610d2005-10-30 21:12:08 +0000104 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
106 struct safe_buffer *buf;
Russell Kingcb7610d2005-10-30 21:12:08 +0000107 struct dmabounce_pool *pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 struct device *dev = device_info->dev;
Kevin Hilman823588c12006-06-22 22:27:14 +0100109 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
112 __func__, ptr, size, dir);
113
Russell Kingcb7610d2005-10-30 21:12:08 +0000114 if (size <= device_info->small.size) {
115 pool = &device_info->small;
116 } else if (size <= device_info->large.size) {
117 pool = &device_info->large;
118 } else {
119 pool = NULL;
120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
123 if (buf == NULL) {
124 dev_warn(dev, "%s: kmalloc failed\n", __func__);
125 return NULL;
126 }
127
Russell Kingcb7610d2005-10-30 21:12:08 +0000128 buf->ptr = ptr;
129 buf->size = size;
130 buf->direction = dir;
131 buf->pool = pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Russell Kingcb7610d2005-10-30 21:12:08 +0000133 if (pool) {
134 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
135 &buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 } else {
Russell Kingcb7610d2005-10-30 21:12:08 +0000137 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
138 GFP_ATOMIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 }
140
Russell Kingcb7610d2005-10-30 21:12:08 +0000141 if (buf->safe == NULL) {
142 dev_warn(dev,
143 "%s: could not alloc dma memory (size=%d)\n",
144 __func__, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 kfree(buf);
146 return NULL;
147 }
148
149#ifdef STATS
Russell Kingcb7610d2005-10-30 21:12:08 +0000150 if (pool)
151 pool->allocs++;
152 device_info->total_allocs++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153#endif
154
Kevin Hilman823588c12006-06-22 22:27:14 +0100155 write_lock_irqsave(&device_info->lock, flags);
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 list_add(&buf->node, &device_info->safe_buffers);
158
Kevin Hilman823588c12006-06-22 22:27:14 +0100159 write_unlock_irqrestore(&device_info->lock, flags);
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return buf;
162}
163
164/* determine if a buffer is from our "safe" pool */
165static inline struct safe_buffer *
166find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
167{
Kevin Hilmane2785f02006-08-18 15:32:14 +0100168 struct safe_buffer *b, *rb = NULL;
Kevin Hilman823588c12006-06-22 22:27:14 +0100169 unsigned long flags;
170
171 read_lock_irqsave(&device_info->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Russell Kingb46a58f2005-06-22 21:25:58 +0100173 list_for_each_entry(b, &device_info->safe_buffers, node)
Kevin Hilmane2785f02006-08-18 15:32:14 +0100174 if (b->safe_dma_addr == safe_dma_addr) {
175 rb = b;
Kevin Hilman823588c12006-06-22 22:27:14 +0100176 break;
Kevin Hilmane2785f02006-08-18 15:32:14 +0100177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Kevin Hilman823588c12006-06-22 22:27:14 +0100179 read_unlock_irqrestore(&device_info->lock, flags);
Kevin Hilmane2785f02006-08-18 15:32:14 +0100180 return rb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static inline void
184free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
185{
Kevin Hilman823588c12006-06-22 22:27:14 +0100186 unsigned long flags;
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
189
Kevin Hilman823588c12006-06-22 22:27:14 +0100190 write_lock_irqsave(&device_info->lock, flags);
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 list_del(&buf->node);
193
Kevin Hilman823588c12006-06-22 22:27:14 +0100194 write_unlock_irqrestore(&device_info->lock, flags);
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (buf->pool)
Russell Kingcb7610d2005-10-30 21:12:08 +0000197 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 else
199 dma_free_coherent(device_info->dev, buf->size, buf->safe,
200 buf->safe_dma_addr);
201
202 kfree(buf);
203}
204
205/* ************************************************** */
206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207static inline dma_addr_t
208map_single(struct device *dev, void *ptr, size_t size,
209 enum dma_data_direction dir)
210{
Russell Kingab2c2152007-02-12 10:28:24 +0000211 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 dma_addr_t dma_addr;
213 int needs_bounce = 0;
214
215 if (device_info)
216 DO_STATS ( device_info->map_op_count++ );
217
218 dma_addr = virt_to_dma(dev, ptr);
219
220 if (dev->dma_mask) {
221 unsigned long mask = *dev->dma_mask;
222 unsigned long limit;
223
224 limit = (mask + 1) & ~mask;
225 if (limit && size > limit) {
226 dev_err(dev, "DMA mapping too big (requested %#x "
227 "mask %#Lx)\n", size, *dev->dma_mask);
228 return ~0;
229 }
230
231 /*
232 * Figure out if we need to bounce from the DMA mask.
233 */
234 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
235 }
236
237 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
238 struct safe_buffer *buf;
239
240 buf = alloc_safe_buffer(device_info, ptr, size, dir);
241 if (buf == 0) {
242 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
243 __func__, ptr);
244 return 0;
245 }
246
247 dev_dbg(dev,
248 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
249 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
250 buf->safe, (void *) buf->safe_dma_addr);
251
252 if ((dir == DMA_TO_DEVICE) ||
253 (dir == DMA_BIDIRECTIONAL)) {
254 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
255 __func__, ptr, buf->safe, size);
256 memcpy(buf->safe, ptr, size);
257 }
Russell Kingcb7610d2005-10-30 21:12:08 +0000258 ptr = buf->safe;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 dma_addr = buf->safe_dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
262
Russell Kingcb7610d2005-10-30 21:12:08 +0000263 consistent_sync(ptr, size, dir);
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 return dma_addr;
266}
267
268static inline void
269unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
270 enum dma_data_direction dir)
271{
Russell Kingab2c2152007-02-12 10:28:24 +0000272 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 struct safe_buffer *buf = NULL;
274
275 /*
276 * Trying to unmap an invalid mapping
277 */
Russell Kingcb7610d2005-10-30 21:12:08 +0000278 if (dma_mapping_error(dma_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 dev_err(dev, "Trying to unmap invalid mapping\n");
280 return;
281 }
282
283 if (device_info)
284 buf = find_safe_buffer(device_info, dma_addr);
285
286 if (buf) {
287 BUG_ON(buf->size != size);
288
289 dev_dbg(dev,
290 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
291 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
292 buf->safe, (void *) buf->safe_dma_addr);
293
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 DO_STATS ( device_info->bounce_count++ );
295
Russell King5abc1002005-06-20 12:31:14 +0100296 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
297 unsigned long ptr;
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 dev_dbg(dev,
300 "%s: copy back safe %p to unsafe %p size %d\n",
301 __func__, buf->safe, buf->ptr, size);
302 memcpy(buf->ptr, buf->safe, size);
Russell King5abc1002005-06-20 12:31:14 +0100303
304 /*
305 * DMA buffers must have the same cache properties
306 * as if they were really used for DMA - which means
307 * data must be written back to RAM. Note that
308 * we don't use dmac_flush_range() here for the
309 * bidirectional case because we know the cache
310 * lines will be coherent with the data written.
311 */
312 ptr = (unsigned long)buf->ptr;
313 dmac_clean_range(ptr, ptr + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
315 free_safe_buffer(device_info, buf);
316 }
317}
318
319static inline void
320sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
321 enum dma_data_direction dir)
322{
Russell Kingab2c2152007-02-12 10:28:24 +0000323 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 struct safe_buffer *buf = NULL;
325
326 if (device_info)
327 buf = find_safe_buffer(device_info, dma_addr);
328
329 if (buf) {
330 /*
331 * Both of these checks from original code need to be
332 * commented out b/c some drivers rely on the following:
333 *
334 * 1) Drivers may map a large chunk of memory into DMA space
335 * but only sync a small portion of it. Good example is
336 * allocating a large buffer, mapping it, and then
337 * breaking it up into small descriptors. No point
338 * in syncing the whole buffer if you only have to
339 * touch one descriptor.
340 *
341 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
342 * usually only synced in one dir at a time.
343 *
344 * See drivers/net/eepro100.c for examples of both cases.
345 *
346 * -ds
347 *
348 * BUG_ON(buf->size != size);
349 * BUG_ON(buf->direction != dir);
350 */
351
352 dev_dbg(dev,
353 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
354 __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),
355 buf->safe, (void *) buf->safe_dma_addr);
356
357 DO_STATS ( device_info->bounce_count++ );
358
359 switch (dir) {
360 case DMA_FROM_DEVICE:
361 dev_dbg(dev,
362 "%s: copy back safe %p to unsafe %p size %d\n",
363 __func__, buf->safe, buf->ptr, size);
364 memcpy(buf->ptr, buf->safe, size);
365 break;
366 case DMA_TO_DEVICE:
367 dev_dbg(dev,
368 "%s: copy out unsafe %p to safe %p, size %d\n",
369 __func__,buf->ptr, buf->safe, size);
370 memcpy(buf->safe, buf->ptr, size);
371 break;
372 case DMA_BIDIRECTIONAL:
373 BUG(); /* is this allowed? what does it mean? */
374 default:
375 BUG();
376 }
377 consistent_sync(buf->safe, size, dir);
378 } else {
379 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
380 }
381}
382
383/* ************************************************** */
384
385/*
386 * see if a buffer address is in an 'unsafe' range. if it is
387 * allocate a 'safe' buffer and copy the unsafe buffer into it.
388 * substitute the safe buffer for the unsafe one.
389 * (basically move the buffer from an unsafe area to a safe one)
390 */
391dma_addr_t
392dma_map_single(struct device *dev, void *ptr, size_t size,
393 enum dma_data_direction dir)
394{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 dma_addr_t dma_addr;
396
397 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
398 __func__, ptr, size, dir);
399
400 BUG_ON(dir == DMA_NONE);
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 dma_addr = map_single(dev, ptr, size, dir);
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 return dma_addr;
405}
406
407/*
408 * see if a mapped address was really a "safe" buffer and if so, copy
409 * the data from the safe buffer back to the unsafe buffer and free up
410 * the safe buffer. (basically return things back to the way they
411 * should be)
412 */
413
414void
415dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
416 enum dma_data_direction dir)
417{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
419 __func__, (void *) dma_addr, size, dir);
420
421 BUG_ON(dir == DMA_NONE);
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 unmap_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424}
425
426int
427dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
428 enum dma_data_direction dir)
429{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 int i;
431
432 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
433 __func__, sg, nents, dir);
434
435 BUG_ON(dir == DMA_NONE);
436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 for (i = 0; i < nents; i++, sg++) {
438 struct page *page = sg->page;
439 unsigned int offset = sg->offset;
440 unsigned int length = sg->length;
441 void *ptr = page_address(page) + offset;
442
443 sg->dma_address =
444 map_single(dev, ptr, length, dir);
445 }
446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 return nents;
448}
449
450void
451dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
452 enum dma_data_direction dir)
453{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 int i;
455
456 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
457 __func__, sg, nents, dir);
458
459 BUG_ON(dir == DMA_NONE);
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 for (i = 0; i < nents; i++, sg++) {
462 dma_addr_t dma_addr = sg->dma_address;
463 unsigned int length = sg->length;
464
465 unmap_single(dev, dma_addr, length, dir);
466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467}
468
469void
470dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
471 enum dma_data_direction dir)
472{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
474 __func__, (void *) dma_addr, size, dir);
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 sync_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477}
478
479void
480dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
481 enum dma_data_direction dir)
482{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
484 __func__, (void *) dma_addr, size, dir);
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 sync_single(dev, dma_addr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487}
488
489void
490dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
491 enum dma_data_direction dir)
492{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 int i;
494
495 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
496 __func__, sg, nents, dir);
497
498 BUG_ON(dir == DMA_NONE);
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 for (i = 0; i < nents; i++, sg++) {
501 dma_addr_t dma_addr = sg->dma_address;
502 unsigned int length = sg->length;
503
504 sync_single(dev, dma_addr, length, dir);
505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506}
507
508void
509dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
510 enum dma_data_direction dir)
511{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 int i;
513
514 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
515 __func__, sg, nents, dir);
516
517 BUG_ON(dir == DMA_NONE);
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 for (i = 0; i < nents; i++, sg++) {
520 dma_addr_t dma_addr = sg->dma_address;
521 unsigned int length = sg->length;
522
523 sync_single(dev, dma_addr, length, dir);
524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525}
526
Russell Kingcb7610d2005-10-30 21:12:08 +0000527static int
528dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
529 unsigned long size)
530{
531 pool->size = size;
532 DO_STATS(pool->allocs = 0);
533 pool->pool = dma_pool_create(name, dev, size,
534 0 /* byte alignment */,
535 0 /* no page-crossing issues */);
536
537 return pool->pool ? 0 : -ENOMEM;
538}
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540int
541dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
542 unsigned long large_buffer_size)
543{
544 struct dmabounce_device_info *device_info;
Russell Kingcb7610d2005-10-30 21:12:08 +0000545 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
548 if (!device_info) {
549 printk(KERN_ERR
550 "Could not allocated dmabounce_device_info for %s",
551 dev->bus_id);
552 return -ENOMEM;
553 }
554
Russell Kingcb7610d2005-10-30 21:12:08 +0000555 ret = dmabounce_init_pool(&device_info->small, dev,
556 "small_dmabounce_pool", small_buffer_size);
557 if (ret) {
558 dev_err(dev,
559 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
560 small_buffer_size);
561 goto err_free;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
563
564 if (large_buffer_size) {
Russell Kingcb7610d2005-10-30 21:12:08 +0000565 ret = dmabounce_init_pool(&device_info->large, dev,
566 "large_dmabounce_pool",
567 large_buffer_size);
568 if (ret) {
569 dev_err(dev,
570 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
571 large_buffer_size);
572 goto err_destroy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
574 }
575
576 device_info->dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 INIT_LIST_HEAD(&device_info->safe_buffers);
Kevin Hilman823588c12006-06-22 22:27:14 +0100578 rwlock_init(&device_info->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580#ifdef STATS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 device_info->total_allocs = 0;
582 device_info->map_op_count = 0;
583 device_info->bounce_count = 0;
Russell King017cc022007-02-12 10:53:50 +0000584 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585#endif
586
Russell Kingab2c2152007-02-12 10:28:24 +0000587 dev->archdata.dmabounce = device_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
590 dev->bus_id, dev->bus->name);
591
592 return 0;
Russell Kingcb7610d2005-10-30 21:12:08 +0000593
594 err_destroy:
595 dma_pool_destroy(device_info->small.pool);
596 err_free:
597 kfree(device_info);
598 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599}
600
601void
602dmabounce_unregister_dev(struct device *dev)
603{
Russell Kingab2c2152007-02-12 10:28:24 +0000604 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
605
606 dev->archdata.dmabounce = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
608 if (!device_info) {
609 printk(KERN_WARNING
610 "%s: Never registered with dmabounce but attempting" \
611 "to unregister!\n", dev->bus_id);
612 return;
613 }
614
615 if (!list_empty(&device_info->safe_buffers)) {
616 printk(KERN_ERR
617 "%s: Removing from dmabounce with pending buffers!\n",
618 dev->bus_id);
619 BUG();
620 }
621
Russell Kingcb7610d2005-10-30 21:12:08 +0000622 if (device_info->small.pool)
623 dma_pool_destroy(device_info->small.pool);
624 if (device_info->large.pool)
625 dma_pool_destroy(device_info->large.pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627#ifdef STATS
Russell King017cc022007-02-12 10:53:50 +0000628 if (device_info->attr_res == 0)
629 device_remove_file(dev, &dev_attr_dmabounce_stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630#endif
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 kfree(device_info);
633
634 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
635 dev->bus_id, dev->bus->name);
636}
637
638
639EXPORT_SYMBOL(dma_map_single);
640EXPORT_SYMBOL(dma_unmap_single);
641EXPORT_SYMBOL(dma_map_sg);
642EXPORT_SYMBOL(dma_unmap_sg);
Kevin Hilman73218182006-11-02 23:44:24 +0100643EXPORT_SYMBOL(dma_sync_single_for_cpu);
644EXPORT_SYMBOL(dma_sync_single_for_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645EXPORT_SYMBOL(dma_sync_sg);
646EXPORT_SYMBOL(dmabounce_register_dev);
647EXPORT_SYMBOL(dmabounce_unregister_dev);
648
649MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
650MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
651MODULE_LICENSE("GPL");