Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | |
| 2 | #include <linux/device.h> |
| 3 | #include <linux/mm.h> |
| 4 | #include <asm/io.h> /* Needed for i386 to build */ |
| 5 | #include <asm/scatterlist.h> /* Needed for i386 to build */ |
| 6 | #include <linux/dma-mapping.h> |
| 7 | #include <linux/dmapool.h> |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/module.h> |
| 10 | |
| 11 | /* |
| 12 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so |
| 13 | * small blocks are easily used by drivers for bus mastering controllers. |
| 14 | * This should probably be sharing the guts of the slab allocator. |
| 15 | */ |
| 16 | |
| 17 | struct dma_pool { /* the pool */ |
| 18 | struct list_head page_list; |
| 19 | spinlock_t lock; |
| 20 | size_t blocks_per_page; |
| 21 | size_t size; |
| 22 | struct device *dev; |
| 23 | size_t allocation; |
| 24 | char name [32]; |
| 25 | wait_queue_head_t waitq; |
| 26 | struct list_head pools; |
| 27 | }; |
| 28 | |
| 29 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
| 30 | struct list_head page_list; |
| 31 | void *vaddr; |
| 32 | dma_addr_t dma; |
| 33 | unsigned in_use; |
| 34 | unsigned long bitmap [0]; |
| 35 | }; |
| 36 | |
| 37 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) |
| 38 | #define POOL_POISON_FREED 0xa7 /* !inuse */ |
| 39 | #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ |
| 40 | |
| 41 | static DECLARE_MUTEX (pools_lock); |
| 42 | |
| 43 | static ssize_t |
| 44 | show_pools (struct device *dev, char *buf) |
| 45 | { |
| 46 | unsigned temp; |
| 47 | unsigned size; |
| 48 | char *next; |
| 49 | struct dma_page *page; |
| 50 | struct dma_pool *pool; |
| 51 | |
| 52 | next = buf; |
| 53 | size = PAGE_SIZE; |
| 54 | |
| 55 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); |
| 56 | size -= temp; |
| 57 | next += temp; |
| 58 | |
| 59 | down (&pools_lock); |
| 60 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
| 61 | unsigned pages = 0; |
| 62 | unsigned blocks = 0; |
| 63 | |
| 64 | list_for_each_entry(page, &pool->page_list, page_list) { |
| 65 | pages++; |
| 66 | blocks += page->in_use; |
| 67 | } |
| 68 | |
| 69 | /* per-pool info, no real statistics yet */ |
| 70 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", |
| 71 | pool->name, |
| 72 | blocks, pages * pool->blocks_per_page, |
| 73 | pool->size, pages); |
| 74 | size -= temp; |
| 75 | next += temp; |
| 76 | } |
| 77 | up (&pools_lock); |
| 78 | |
| 79 | return PAGE_SIZE - size; |
| 80 | } |
| 81 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); |
| 82 | |
| 83 | /** |
| 84 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. |
| 85 | * @name: name of pool, for diagnostics |
| 86 | * @dev: device that will be doing the DMA |
| 87 | * @size: size of the blocks in this pool. |
| 88 | * @align: alignment requirement for blocks; must be a power of two |
| 89 | * @allocation: returned blocks won't cross this boundary (or zero) |
| 90 | * Context: !in_interrupt() |
| 91 | * |
| 92 | * Returns a dma allocation pool with the requested characteristics, or |
| 93 | * null if one can't be created. Given one of these pools, dma_pool_alloc() |
| 94 | * may be used to allocate memory. Such memory will all have "consistent" |
| 95 | * DMA mappings, accessible by the device and its driver without using |
| 96 | * cache flushing primitives. The actual size of blocks allocated may be |
| 97 | * larger than requested because of alignment. |
| 98 | * |
| 99 | * If allocation is nonzero, objects returned from dma_pool_alloc() won't |
| 100 | * cross that size boundary. This is useful for devices which have |
| 101 | * addressing restrictions on individual DMA transfers, such as not crossing |
| 102 | * boundaries of 4KBytes. |
| 103 | */ |
| 104 | struct dma_pool * |
| 105 | dma_pool_create (const char *name, struct device *dev, |
| 106 | size_t size, size_t align, size_t allocation) |
| 107 | { |
| 108 | struct dma_pool *retval; |
| 109 | |
| 110 | if (align == 0) |
| 111 | align = 1; |
| 112 | if (size == 0) |
| 113 | return NULL; |
| 114 | else if (size < align) |
| 115 | size = align; |
| 116 | else if ((size % align) != 0) { |
| 117 | size += align + 1; |
| 118 | size &= ~(align - 1); |
| 119 | } |
| 120 | |
| 121 | if (allocation == 0) { |
| 122 | if (PAGE_SIZE < size) |
| 123 | allocation = size; |
| 124 | else |
| 125 | allocation = PAGE_SIZE; |
| 126 | // FIXME: round up for less fragmentation |
| 127 | } else if (allocation < size) |
| 128 | return NULL; |
| 129 | |
| 130 | if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) |
| 131 | return retval; |
| 132 | |
| 133 | strlcpy (retval->name, name, sizeof retval->name); |
| 134 | |
| 135 | retval->dev = dev; |
| 136 | |
| 137 | INIT_LIST_HEAD (&retval->page_list); |
| 138 | spin_lock_init (&retval->lock); |
| 139 | retval->size = size; |
| 140 | retval->allocation = allocation; |
| 141 | retval->blocks_per_page = allocation / size; |
| 142 | init_waitqueue_head (&retval->waitq); |
| 143 | |
| 144 | if (dev) { |
| 145 | down (&pools_lock); |
| 146 | if (list_empty (&dev->dma_pools)) |
| 147 | device_create_file (dev, &dev_attr_pools); |
| 148 | /* note: not currently insisting "name" be unique */ |
| 149 | list_add (&retval->pools, &dev->dma_pools); |
| 150 | up (&pools_lock); |
| 151 | } else |
| 152 | INIT_LIST_HEAD (&retval->pools); |
| 153 | |
| 154 | return retval; |
| 155 | } |
| 156 | |
| 157 | |
| 158 | static struct dma_page * |
| 159 | pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags) |
| 160 | { |
| 161 | struct dma_page *page; |
| 162 | int mapsize; |
| 163 | |
| 164 | mapsize = pool->blocks_per_page; |
| 165 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; |
| 166 | mapsize *= sizeof (long); |
| 167 | |
| 168 | page = (struct dma_page *) kmalloc (mapsize + sizeof *page, mem_flags); |
| 169 | if (!page) |
| 170 | return NULL; |
| 171 | page->vaddr = dma_alloc_coherent (pool->dev, |
| 172 | pool->allocation, |
| 173 | &page->dma, |
| 174 | mem_flags); |
| 175 | if (page->vaddr) { |
| 176 | memset (page->bitmap, 0xff, mapsize); // bit set == free |
| 177 | #ifdef CONFIG_DEBUG_SLAB |
| 178 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
| 179 | #endif |
| 180 | list_add (&page->page_list, &pool->page_list); |
| 181 | page->in_use = 0; |
| 182 | } else { |
| 183 | kfree (page); |
| 184 | page = NULL; |
| 185 | } |
| 186 | return page; |
| 187 | } |
| 188 | |
| 189 | |
| 190 | static inline int |
| 191 | is_page_busy (int blocks, unsigned long *bitmap) |
| 192 | { |
| 193 | while (blocks > 0) { |
| 194 | if (*bitmap++ != ~0UL) |
| 195 | return 1; |
| 196 | blocks -= BITS_PER_LONG; |
| 197 | } |
| 198 | return 0; |
| 199 | } |
| 200 | |
| 201 | static void |
| 202 | pool_free_page (struct dma_pool *pool, struct dma_page *page) |
| 203 | { |
| 204 | dma_addr_t dma = page->dma; |
| 205 | |
| 206 | #ifdef CONFIG_DEBUG_SLAB |
| 207 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
| 208 | #endif |
| 209 | dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); |
| 210 | list_del (&page->page_list); |
| 211 | kfree (page); |
| 212 | } |
| 213 | |
| 214 | |
| 215 | /** |
| 216 | * dma_pool_destroy - destroys a pool of dma memory blocks. |
| 217 | * @pool: dma pool that will be destroyed |
| 218 | * Context: !in_interrupt() |
| 219 | * |
| 220 | * Caller guarantees that no more memory from the pool is in use, |
| 221 | * and that nothing will try to use the pool after this call. |
| 222 | */ |
| 223 | void |
| 224 | dma_pool_destroy (struct dma_pool *pool) |
| 225 | { |
| 226 | down (&pools_lock); |
| 227 | list_del (&pool->pools); |
| 228 | if (pool->dev && list_empty (&pool->dev->dma_pools)) |
| 229 | device_remove_file (pool->dev, &dev_attr_pools); |
| 230 | up (&pools_lock); |
| 231 | |
| 232 | while (!list_empty (&pool->page_list)) { |
| 233 | struct dma_page *page; |
| 234 | page = list_entry (pool->page_list.next, |
| 235 | struct dma_page, page_list); |
| 236 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { |
| 237 | if (pool->dev) |
| 238 | dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", |
| 239 | pool->name, page->vaddr); |
| 240 | else |
| 241 | printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", |
| 242 | pool->name, page->vaddr); |
| 243 | /* leak the still-in-use consistent memory */ |
| 244 | list_del (&page->page_list); |
| 245 | kfree (page); |
| 246 | } else |
| 247 | pool_free_page (pool, page); |
| 248 | } |
| 249 | |
| 250 | kfree (pool); |
| 251 | } |
| 252 | |
| 253 | |
| 254 | /** |
| 255 | * dma_pool_alloc - get a block of consistent memory |
| 256 | * @pool: dma pool that will produce the block |
| 257 | * @mem_flags: GFP_* bitmask |
| 258 | * @handle: pointer to dma address of block |
| 259 | * |
| 260 | * This returns the kernel virtual address of a currently unused block, |
| 261 | * and reports its dma address through the handle. |
| 262 | * If such a memory block can't be allocated, null is returned. |
| 263 | */ |
| 264 | void * |
| 265 | dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle) |
| 266 | { |
| 267 | unsigned long flags; |
| 268 | struct dma_page *page; |
| 269 | int map, block; |
| 270 | size_t offset; |
| 271 | void *retval; |
| 272 | |
| 273 | restart: |
| 274 | spin_lock_irqsave (&pool->lock, flags); |
| 275 | list_for_each_entry(page, &pool->page_list, page_list) { |
| 276 | int i; |
| 277 | /* only cachable accesses here ... */ |
| 278 | for (map = 0, i = 0; |
| 279 | i < pool->blocks_per_page; |
| 280 | i += BITS_PER_LONG, map++) { |
| 281 | if (page->bitmap [map] == 0) |
| 282 | continue; |
| 283 | block = ffz (~ page->bitmap [map]); |
| 284 | if ((i + block) < pool->blocks_per_page) { |
| 285 | clear_bit (block, &page->bitmap [map]); |
| 286 | offset = (BITS_PER_LONG * map) + block; |
| 287 | offset *= pool->size; |
| 288 | goto ready; |
| 289 | } |
| 290 | } |
| 291 | } |
| 292 | if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) { |
| 293 | if (mem_flags & __GFP_WAIT) { |
| 294 | DECLARE_WAITQUEUE (wait, current); |
| 295 | |
| 296 | current->state = TASK_INTERRUPTIBLE; |
| 297 | add_wait_queue (&pool->waitq, &wait); |
| 298 | spin_unlock_irqrestore (&pool->lock, flags); |
| 299 | |
| 300 | schedule_timeout (POOL_TIMEOUT_JIFFIES); |
| 301 | |
| 302 | remove_wait_queue (&pool->waitq, &wait); |
| 303 | goto restart; |
| 304 | } |
| 305 | retval = NULL; |
| 306 | goto done; |
| 307 | } |
| 308 | |
| 309 | clear_bit (0, &page->bitmap [0]); |
| 310 | offset = 0; |
| 311 | ready: |
| 312 | page->in_use++; |
| 313 | retval = offset + page->vaddr; |
| 314 | *handle = offset + page->dma; |
| 315 | #ifdef CONFIG_DEBUG_SLAB |
| 316 | memset (retval, POOL_POISON_ALLOCATED, pool->size); |
| 317 | #endif |
| 318 | done: |
| 319 | spin_unlock_irqrestore (&pool->lock, flags); |
| 320 | return retval; |
| 321 | } |
| 322 | |
| 323 | |
| 324 | static struct dma_page * |
| 325 | pool_find_page (struct dma_pool *pool, dma_addr_t dma) |
| 326 | { |
| 327 | unsigned long flags; |
| 328 | struct dma_page *page; |
| 329 | |
| 330 | spin_lock_irqsave (&pool->lock, flags); |
| 331 | list_for_each_entry(page, &pool->page_list, page_list) { |
| 332 | if (dma < page->dma) |
| 333 | continue; |
| 334 | if (dma < (page->dma + pool->allocation)) |
| 335 | goto done; |
| 336 | } |
| 337 | page = NULL; |
| 338 | done: |
| 339 | spin_unlock_irqrestore (&pool->lock, flags); |
| 340 | return page; |
| 341 | } |
| 342 | |
| 343 | |
| 344 | /** |
| 345 | * dma_pool_free - put block back into dma pool |
| 346 | * @pool: the dma pool holding the block |
| 347 | * @vaddr: virtual address of block |
| 348 | * @dma: dma address of block |
| 349 | * |
| 350 | * Caller promises neither device nor driver will again touch this block |
| 351 | * unless it is first re-allocated. |
| 352 | */ |
| 353 | void |
| 354 | dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
| 355 | { |
| 356 | struct dma_page *page; |
| 357 | unsigned long flags; |
| 358 | int map, block; |
| 359 | |
| 360 | if ((page = pool_find_page (pool, dma)) == 0) { |
| 361 | if (pool->dev) |
| 362 | dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", |
| 363 | pool->name, vaddr, (unsigned long) dma); |
| 364 | else |
| 365 | printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", |
| 366 | pool->name, vaddr, (unsigned long) dma); |
| 367 | return; |
| 368 | } |
| 369 | |
| 370 | block = dma - page->dma; |
| 371 | block /= pool->size; |
| 372 | map = block / BITS_PER_LONG; |
| 373 | block %= BITS_PER_LONG; |
| 374 | |
| 375 | #ifdef CONFIG_DEBUG_SLAB |
| 376 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { |
| 377 | if (pool->dev) |
| 378 | dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
| 379 | pool->name, vaddr, (unsigned long long) dma); |
| 380 | else |
| 381 | printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", |
| 382 | pool->name, vaddr, (unsigned long long) dma); |
| 383 | return; |
| 384 | } |
| 385 | if (page->bitmap [map] & (1UL << block)) { |
| 386 | if (pool->dev) |
| 387 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", |
| 388 | pool->name, (unsigned long long)dma); |
| 389 | else |
| 390 | printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", |
| 391 | pool->name, (unsigned long long)dma); |
| 392 | return; |
| 393 | } |
| 394 | memset (vaddr, POOL_POISON_FREED, pool->size); |
| 395 | #endif |
| 396 | |
| 397 | spin_lock_irqsave (&pool->lock, flags); |
| 398 | page->in_use--; |
| 399 | set_bit (block, &page->bitmap [map]); |
| 400 | if (waitqueue_active (&pool->waitq)) |
| 401 | wake_up (&pool->waitq); |
| 402 | /* |
| 403 | * Resist a temptation to do |
| 404 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); |
| 405 | * Better have a few empty pages hang around. |
| 406 | */ |
| 407 | spin_unlock_irqrestore (&pool->lock, flags); |
| 408 | } |
| 409 | |
| 410 | |
| 411 | EXPORT_SYMBOL (dma_pool_create); |
| 412 | EXPORT_SYMBOL (dma_pool_destroy); |
| 413 | EXPORT_SYMBOL (dma_pool_alloc); |
| 414 | EXPORT_SYMBOL (dma_pool_free); |