blob: 34aaac451a96124d163a99397af386ecb4fc6a4c [file] [log] [blame]
Matthew Wilcox6182a092007-12-03 12:16:57 -05001/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050020 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
Matthew Wilcox6182a092007-12-03 12:16:57 -050023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050028#include <linux/kernel.h>
29#include <linux/list.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/module.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050031#include <linux/mutex.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070032#include <linux/poison.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040033#include <linux/sched.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050034#include <linux/slab.h>
35#include <linux/spinlock.h>
36#include <linux/string.h>
37#include <linux/types.h>
38#include <linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050040struct dma_pool { /* the pool */
41 struct list_head page_list;
42 spinlock_t lock;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050043 size_t size;
44 struct device *dev;
45 size_t allocation;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -050046 size_t boundary;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050047 char name[32];
48 wait_queue_head_t waitq;
49 struct list_head pools;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050};
51
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050052struct dma_page { /* cacheable header for 'allocation' bytes */
53 struct list_head page_list;
54 void *vaddr;
55 dma_addr_t dma;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050056 unsigned int in_use;
57 unsigned int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058};
59
60#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050062static DEFINE_MUTEX(pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64static ssize_t
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050065show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 unsigned temp;
68 unsigned size;
69 char *next;
70 struct dma_page *page;
71 struct dma_pool *pool;
72
73 next = buf;
74 size = PAGE_SIZE;
75
76 temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 size -= temp;
78 next += temp;
79
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +020080 mutex_lock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 list_for_each_entry(pool, &dev->dma_pools, pools) {
82 unsigned pages = 0;
83 unsigned blocks = 0;
84
85 list_for_each_entry(page, &pool->page_list, page_list) {
86 pages++;
87 blocks += page->in_use;
88 }
89
90 /* per-pool info, no real statistics yet */
91 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050092 pool->name, blocks,
93 pages * (pool->allocation / pool->size),
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050094 pool->size, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 size -= temp;
96 next += temp;
97 }
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +020098 mutex_unlock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100 return PAGE_SIZE - size;
101}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500102
103static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105/**
106 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
107 * @name: name of pool, for diagnostics
108 * @dev: device that will be doing the DMA
109 * @size: size of the blocks in this pool.
110 * @align: alignment requirement for blocks; must be a power of two
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500111 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * Context: !in_interrupt()
113 *
114 * Returns a dma allocation pool with the requested characteristics, or
115 * null if one can't be created. Given one of these pools, dma_pool_alloc()
116 * may be used to allocate memory. Such memory will all have "consistent"
117 * DMA mappings, accessible by the device and its driver without using
118 * cache flushing primitives. The actual size of blocks allocated may be
119 * larger than requested because of alignment.
120 *
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500121 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 * cross that size boundary. This is useful for devices which have
123 * addressing restrictions on individual DMA transfers, such as not crossing
124 * boundaries of 4KBytes.
125 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500126struct dma_pool *dma_pool_create(const char *name, struct device *dev,
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500127 size_t size, size_t align, size_t boundary)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500129 struct dma_pool *retval;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500130 size_t allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Matthew Wilcox399154b2007-12-03 12:10:24 -0500132 if (align == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 align = 1;
Matthew Wilcox399154b2007-12-03 12:10:24 -0500134 } else if (align & (align - 1)) {
135 return NULL;
136 }
137
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500138 if (size == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 return NULL;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500140 } else if (size < 4) {
141 size = 4;
142 }
Matthew Wilcox399154b2007-12-03 12:10:24 -0500143
144 if ((size % align) != 0)
145 size = ALIGN(size, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500147 allocation = max_t(size_t, size, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500149 if (!boundary) {
150 boundary = allocation;
151 } else if ((boundary < size) || (boundary & (boundary - 1))) {
152 return NULL;
153 }
154
155 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
156 if (!retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return retval;
158
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500159 strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 retval->dev = dev;
162
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500163 INIT_LIST_HEAD(&retval->page_list);
164 spin_lock_init(&retval->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 retval->size = size;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500166 retval->boundary = boundary;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 retval->allocation = allocation;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500168 init_waitqueue_head(&retval->waitq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170 if (dev) {
Cornelia Huck141ecc52006-09-22 11:37:27 +0200171 int ret;
172
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200173 mutex_lock(&pools_lock);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500174 if (list_empty(&dev->dma_pools))
175 ret = device_create_file(dev, &dev_attr_pools);
Cornelia Huck141ecc52006-09-22 11:37:27 +0200176 else
177 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 /* note: not currently insisting "name" be unique */
Cornelia Huck141ecc52006-09-22 11:37:27 +0200179 if (!ret)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500180 list_add(&retval->pools, &dev->dma_pools);
Cornelia Huck141ecc52006-09-22 11:37:27 +0200181 else {
182 kfree(retval);
183 retval = NULL;
184 }
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200185 mutex_unlock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 } else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500187 INIT_LIST_HEAD(&retval->pools);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 return retval;
190}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500191EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500193static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
194{
195 unsigned int offset = 0;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500196 unsigned int next_boundary = pool->boundary;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500197
198 do {
199 unsigned int next = offset + pool->size;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500200 if (unlikely((next + pool->size) >= next_boundary)) {
201 next = next_boundary;
202 next_boundary += pool->boundary;
203 }
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500204 *(int *)(page->vaddr + offset) = next;
205 offset = next;
206 } while (offset < pool->allocation);
207}
208
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500209static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500211 struct dma_page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500213 page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 if (!page)
215 return NULL;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500216 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500217 &page->dma, mem_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 if (page->vaddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219#ifdef CONFIG_DEBUG_SLAB
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500220 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221#endif
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500222 pool_initialise_page(pool, page);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500223 list_add(&page->page_list, &pool->page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 page->in_use = 0;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500225 page->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 } else {
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500227 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 page = NULL;
229 }
230 return page;
231}
232
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500233static inline int is_page_busy(struct dma_page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500235 return page->in_use != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236}
237
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500238static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500240 dma_addr_t dma = page->dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242#ifdef CONFIG_DEBUG_SLAB
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500243 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244#endif
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500245 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
246 list_del(&page->page_list);
247 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248}
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250/**
251 * dma_pool_destroy - destroys a pool of dma memory blocks.
252 * @pool: dma pool that will be destroyed
253 * Context: !in_interrupt()
254 *
255 * Caller guarantees that no more memory from the pool is in use,
256 * and that nothing will try to use the pool after this call.
257 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500258void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200260 mutex_lock(&pools_lock);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500261 list_del(&pool->pools);
262 if (pool->dev && list_empty(&pool->dev->dma_pools))
263 device_remove_file(pool->dev, &dev_attr_pools);
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200264 mutex_unlock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500266 while (!list_empty(&pool->page_list)) {
267 struct dma_page *page;
268 page = list_entry(pool->page_list.next,
269 struct dma_page, page_list);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500270 if (is_page_busy(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 if (pool->dev)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500272 dev_err(pool->dev,
273 "dma_pool_destroy %s, %p busy\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 pool->name, page->vaddr);
275 else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500276 printk(KERN_ERR
277 "dma_pool_destroy %s, %p busy\n",
278 pool->name, page->vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 /* leak the still-in-use consistent memory */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500280 list_del(&page->page_list);
281 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 } else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500283 pool_free_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
285
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500286 kfree(pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500288EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
290/**
291 * dma_pool_alloc - get a block of consistent memory
292 * @pool: dma pool that will produce the block
293 * @mem_flags: GFP_* bitmask
294 * @handle: pointer to dma address of block
295 *
296 * This returns the kernel virtual address of a currently unused block,
297 * and reports its dma address through the handle.
Matthew Wilcox6182a092007-12-03 12:16:57 -0500298 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500300void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
301 dma_addr_t *handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500303 unsigned long flags;
304 struct dma_page *page;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500305 size_t offset;
306 void *retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500308 spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcox2cae3672007-12-03 12:09:33 -0500309 restart:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 list_for_each_entry(page, &pool->page_list, page_list) {
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500311 if (page->offset < pool->allocation)
312 goto ready;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 }
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500314 page = pool_alloc_page(pool, GFP_ATOMIC);
315 if (!page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 if (mem_flags & __GFP_WAIT) {
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500317 DECLARE_WAITQUEUE(wait, current);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Arjan van de Vend9aaccc2007-10-16 23:29:32 -0700319 __set_current_state(TASK_INTERRUPTIBLE);
Matthew Wilcox2cae3672007-12-03 12:09:33 -0500320 __add_wait_queue(&pool->waitq, &wait);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500321 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500323 schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Matthew Wilcox2cae3672007-12-03 12:09:33 -0500325 spin_lock_irqsave(&pool->lock, flags);
326 __remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 goto restart;
328 }
329 retval = NULL;
330 goto done;
331 }
332
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500333 ready:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 page->in_use++;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500335 offset = page->offset;
336 page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 retval = offset + page->vaddr;
338 *handle = offset + page->dma;
339#ifdef CONFIG_DEBUG_SLAB
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500340 memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341#endif
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500342 done:
343 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 return retval;
345}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500346EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500348static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500350 unsigned long flags;
351 struct dma_page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500353 spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 list_for_each_entry(page, &pool->page_list, page_list) {
355 if (dma < page->dma)
356 continue;
357 if (dma < (page->dma + pool->allocation))
358 goto done;
359 }
360 page = NULL;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500361 done:
362 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 return page;
364}
365
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366/**
367 * dma_pool_free - put block back into dma pool
368 * @pool: the dma pool holding the block
369 * @vaddr: virtual address of block
370 * @dma: dma address of block
371 *
372 * Caller promises neither device nor driver will again touch this block
373 * unless it is first re-allocated.
374 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500375void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500377 struct dma_page *page;
378 unsigned long flags;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500379 unsigned int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500381 page = pool_find_page(pool, dma);
382 if (!page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 if (pool->dev)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500384 dev_err(pool->dev,
385 "dma_pool_free %s, %p/%lx (bad dma)\n",
386 pool->name, vaddr, (unsigned long)dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500388 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
389 pool->name, vaddr, (unsigned long)dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 return;
391 }
392
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500393 offset = vaddr - page->vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394#ifdef CONFIG_DEBUG_SLAB
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500395 if ((dma - page->dma) != offset) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 if (pool->dev)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500397 dev_err(pool->dev,
398 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
399 pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500401 printk(KERN_ERR
402 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
403 pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 return;
405 }
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500406 {
407 unsigned int chain = page->offset;
408 while (chain < pool->allocation) {
409 if (chain != offset) {
410 chain = *(int *)(page->vaddr + chain);
411 continue;
412 }
413 if (pool->dev)
414 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
415 "already free\n", pool->name,
416 (unsigned long long)dma);
417 else
418 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
419 "already free\n", pool->name,
420 (unsigned long long)dma);
421 return;
422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500424 memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425#endif
426
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500427 spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 page->in_use--;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500429 *(int *)vaddr = page->offset;
430 page->offset = offset;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500431 if (waitqueue_active(&pool->waitq))
Matthew Wilcox2cae3672007-12-03 12:09:33 -0500432 wake_up_locked(&pool->waitq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 /*
434 * Resist a temptation to do
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500435 * if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 * Better have a few empty pages hang around.
437 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500438 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500440EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Tejun Heo9ac78492007-01-20 16:00:26 +0900442/*
443 * Managed DMA pool
444 */
445static void dmam_pool_release(struct device *dev, void *res)
446{
447 struct dma_pool *pool = *(struct dma_pool **)res;
448
449 dma_pool_destroy(pool);
450}
451
452static int dmam_pool_match(struct device *dev, void *res, void *match_data)
453{
454 return *(struct dma_pool **)res == match_data;
455}
456
457/**
458 * dmam_pool_create - Managed dma_pool_create()
459 * @name: name of pool, for diagnostics
460 * @dev: device that will be doing the DMA
461 * @size: size of the blocks in this pool.
462 * @align: alignment requirement for blocks; must be a power of two
463 * @allocation: returned blocks won't cross this boundary (or zero)
464 *
465 * Managed dma_pool_create(). DMA pool created with this function is
466 * automatically destroyed on driver detach.
467 */
468struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
469 size_t size, size_t align, size_t allocation)
470{
471 struct dma_pool **ptr, *pool;
472
473 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
474 if (!ptr)
475 return NULL;
476
477 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
478 if (pool)
479 devres_add(dev, ptr);
480 else
481 devres_free(ptr);
482
483 return pool;
484}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500485EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo9ac78492007-01-20 16:00:26 +0900486
487/**
488 * dmam_pool_destroy - Managed dma_pool_destroy()
489 * @pool: dma pool that will be destroyed
490 *
491 * Managed dma_pool_destroy().
492 */
493void dmam_pool_destroy(struct dma_pool *pool)
494{
495 struct device *dev = pool->dev;
496
497 dma_pool_destroy(pool);
498 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
499}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500500EXPORT_SYMBOL(dmam_pool_destroy);