blob: 76a160083506c7f4cff83500e477c0c86484ea7b [file] [log] [blame]
Matthew Wilcox6182a092007-12-03 12:16:57 -05001/*
2 * DMA Pool allocator
3 *
4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
7 *
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
11 *
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
16 *
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050020 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
Matthew Wilcox6182a092007-12-03 12:16:57 -050023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050028#include <linux/kernel.h>
29#include <linux/list.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040030#include <linux/export.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050031#include <linux/mutex.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070032#include <linux/poison.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040033#include <linux/sched.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050034#include <linux/slab.h>
Paul Gortmaker7c775092011-10-16 02:03:46 -040035#include <linux/stat.h>
Matthew Wilcox6182a092007-12-03 12:16:57 -050036#include <linux/spinlock.h>
37#include <linux/string.h>
38#include <linux/types.h>
39#include <linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Andi Kleenb5ee5be2008-04-28 02:12:37 -070041#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42#define DMAPOOL_DEBUG 1
43#endif
44
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050045struct dma_pool { /* the pool */
46 struct list_head page_list;
47 spinlock_t lock;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050048 size_t size;
49 struct device *dev;
50 size_t allocation;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -050051 size_t boundary;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050052 char name[32];
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050053 struct list_head pools;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054};
55
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050056struct dma_page { /* cacheable header for 'allocation' bytes */
57 struct list_head page_list;
58 void *vaddr;
59 dma_addr_t dma;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050060 unsigned int in_use;
61 unsigned int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062};
63
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050064static DEFINE_MUTEX(pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -070065static DEFINE_MUTEX(pools_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
67static ssize_t
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050068show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
70 unsigned temp;
71 unsigned size;
72 char *next;
73 struct dma_page *page;
74 struct dma_pool *pool;
75
76 next = buf;
77 size = PAGE_SIZE;
78
79 temp = scnprintf(next, size, "poolinfo - 0.1\n");
80 size -= temp;
81 next += temp;
82
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +020083 mutex_lock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 list_for_each_entry(pool, &dev->dma_pools, pools) {
85 unsigned pages = 0;
86 unsigned blocks = 0;
87
Thomas Gleixnerc4956822009-06-30 11:41:25 -070088 spin_lock_irq(&pool->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 list_for_each_entry(page, &pool->page_list, page_list) {
90 pages++;
91 blocks += page->in_use;
92 }
Thomas Gleixnerc4956822009-06-30 11:41:25 -070093 spin_unlock_irq(&pool->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95 /* per-pool info, no real statistics yet */
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -080096 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
Matthew Wilcoxa35a3452007-12-03 14:08:28 -050097 pool->name, blocks,
98 pages * (pool->allocation / pool->size),
Matthew Wilcoxe87aa772007-12-03 12:04:31 -050099 pool->size, pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100 size -= temp;
101 next += temp;
102 }
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200103 mutex_unlock(&pools_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 return PAGE_SIZE - size;
106}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500107
Joe Perches0825a6f2018-06-14 15:27:58 -0700108static DEVICE_ATTR(pools, 0444, show_pools, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110/**
111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
112 * @name: name of pool, for diagnostics
113 * @dev: device that will be doing the DMA
114 * @size: size of the blocks in this pool.
115 * @align: alignment requirement for blocks; must be a power of two
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500116 * @boundary: returned blocks won't cross this power of two boundary
Mike Rapoporta862f682019-03-05 15:48:42 -0800117 * Context: not in_interrupt()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 *
Mike Rapoporta862f682019-03-05 15:48:42 -0800119 * Given one of these pools, dma_pool_alloc()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 * may be used to allocate memory. Such memory will all have "consistent"
121 * DMA mappings, accessible by the device and its driver without using
122 * cache flushing primitives. The actual size of blocks allocated may be
123 * larger than requested because of alignment.
124 *
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500125 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 * cross that size boundary. This is useful for devices which have
127 * addressing restrictions on individual DMA transfers, such as not crossing
128 * boundaries of 4KBytes.
Mike Rapoporta862f682019-03-05 15:48:42 -0800129 *
130 * Return: a dma allocation pool with the requested characteristics, or
131 * %NULL if one can't be created.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500133struct dma_pool *dma_pool_create(const char *name, struct device *dev,
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500134 size_t size, size_t align, size_t boundary)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500136 struct dma_pool *retval;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500137 size_t allocation;
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700138 bool empty = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700140 if (align == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 align = 1;
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700142 else if (align & (align - 1))
Matthew Wilcox399154b2007-12-03 12:10:24 -0500143 return NULL;
Matthew Wilcox399154b2007-12-03 12:10:24 -0500144
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700145 if (size == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 return NULL;
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700147 else if (size < 4)
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500148 size = 4;
Matthew Wilcox399154b2007-12-03 12:10:24 -0500149
150 if ((size % align) != 0)
151 size = ALIGN(size, align);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500153 allocation = max_t(size_t, size, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700155 if (!boundary)
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500156 boundary = allocation;
Paul McQuadebaa2ef82014-10-09 15:29:11 -0700157 else if ((boundary < size) || (boundary & (boundary - 1)))
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500158 return NULL;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500159
160 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
161 if (!retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return retval;
163
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500164 strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
166 retval->dev = dev;
167
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500168 INIT_LIST_HEAD(&retval->page_list);
169 spin_lock_init(&retval->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 retval->size = size;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500171 retval->boundary = boundary;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 retval->allocation = allocation;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Daeseok Youncc6b6642014-06-04 16:08:05 -0700174 INIT_LIST_HEAD(&retval->pools);
Cornelia Huck141ecc52006-09-22 11:37:27 +0200175
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700176 /*
177 * pools_lock ensures that the ->dma_pools list does not get corrupted.
178 * pools_reg_lock ensures that there is not a race between
179 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
180 * when the first invocation of dma_pool_create() failed on
181 * device_create_file() and the second assumes that it has been done (I
182 * know it is a short window).
183 */
184 mutex_lock(&pools_reg_lock);
Daeseok Youncc6b6642014-06-04 16:08:05 -0700185 mutex_lock(&pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700186 if (list_empty(&dev->dma_pools))
187 empty = true;
188 list_add(&retval->pools, &dev->dma_pools);
Daeseok Youncc6b6642014-06-04 16:08:05 -0700189 mutex_unlock(&pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700190 if (empty) {
191 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700193 err = device_create_file(dev, &dev_attr_pools);
194 if (err) {
195 mutex_lock(&pools_lock);
196 list_del(&retval->pools);
197 mutex_unlock(&pools_lock);
198 mutex_unlock(&pools_reg_lock);
199 kfree(retval);
200 return NULL;
201 }
202 }
203 mutex_unlock(&pools_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 return retval;
205}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500206EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500208static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
209{
210 unsigned int offset = 0;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500211 unsigned int next_boundary = pool->boundary;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500212
213 do {
214 unsigned int next = offset + pool->size;
Matthew Wilcoxe34f44b2007-12-03 14:16:24 -0500215 if (unlikely((next + pool->size) >= next_boundary)) {
216 next = next_boundary;
217 next_boundary += pool->boundary;
218 }
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500219 *(int *)(page->vaddr + offset) = next;
220 offset = next;
221 } while (offset < pool->allocation);
222}
223
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500224static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500226 struct dma_page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500228 page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 if (!page)
230 return NULL;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500231 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500232 &page->dma, mem_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (page->vaddr) {
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700234#ifdef DMAPOOL_DEBUG
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500235 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236#endif
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500237 pool_initialise_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 page->in_use = 0;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500239 page->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 } else {
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500241 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 page = NULL;
243 }
244 return page;
245}
246
Nicholas Kraused9e7e372015-09-04 15:48:19 -0700247static inline bool is_page_busy(struct dma_page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500249 return page->in_use != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
251
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500252static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500254 dma_addr_t dma = page->dma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700256#ifdef DMAPOOL_DEBUG
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500257 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258#endif
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500259 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
260 list_del(&page->page_list);
261 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264/**
265 * dma_pool_destroy - destroys a pool of dma memory blocks.
266 * @pool: dma pool that will be destroyed
267 * Context: !in_interrupt()
268 *
269 * Caller guarantees that no more memory from the pool is in use,
270 * and that nothing will try to use the pool after this call.
271 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500272void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273{
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700274 bool empty = false;
275
Sergey Senozhatsky44d71752015-09-08 15:00:56 -0700276 if (unlikely(!pool))
277 return;
278
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700279 mutex_lock(&pools_reg_lock);
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200280 mutex_lock(&pools_lock);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500281 list_del(&pool->pools);
282 if (pool->dev && list_empty(&pool->dev->dma_pools))
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700283 empty = true;
Matthias Kaehlckeb2366d62007-04-24 22:45:25 +0200284 mutex_unlock(&pools_lock);
Sebastian Andrzej Siewior01c29652014-10-09 15:28:50 -0700285 if (empty)
286 device_remove_file(pool->dev, &dev_attr_pools);
287 mutex_unlock(&pools_reg_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500289 while (!list_empty(&pool->page_list)) {
290 struct dma_page *page;
291 page = list_entry(pool->page_list.next,
292 struct dma_page, page_list);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500293 if (is_page_busy(page)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (pool->dev)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500295 dev_err(pool->dev,
296 "dma_pool_destroy %s, %p busy\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 pool->name, page->vaddr);
298 else
Joe Perches11705322016-03-17 14:19:50 -0700299 pr_err("dma_pool_destroy %s, %p busy\n",
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500300 pool->name, page->vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 /* leak the still-in-use consistent memory */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500302 list_del(&page->page_list);
303 kfree(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 } else
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500305 pool_free_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 }
307
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500308 kfree(pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500310EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312/**
313 * dma_pool_alloc - get a block of consistent memory
314 * @pool: dma pool that will produce the block
315 * @mem_flags: GFP_* bitmask
316 * @handle: pointer to dma address of block
317 *
Mike Rapoporta862f682019-03-05 15:48:42 -0800318 * Return: the kernel virtual address of a currently unused block,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * and reports its dma address through the handle.
Matthew Wilcox6182a092007-12-03 12:16:57 -0500320 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500322void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
323 dma_addr_t *handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500325 unsigned long flags;
326 struct dma_page *page;
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500327 size_t offset;
328 void *retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Mel Gormand0164ad2015-11-06 16:28:21 -0800330 might_sleep_if(gfpflags_allow_blocking(mem_flags));
Dima Zavinea05c842010-10-26 14:21:54 -0700331
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500332 spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 list_for_each_entry(page, &pool->page_list, page_list) {
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500334 if (page->offset < pool->allocation)
335 goto ready;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Marek Szyprowski387870f2012-11-07 15:37:07 +0100338 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
339 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700341 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
Marek Szyprowski387870f2012-11-07 15:37:07 +0100342 if (!page)
343 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Marek Szyprowski387870f2012-11-07 15:37:07 +0100345 spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Marek Szyprowski387870f2012-11-07 15:37:07 +0100347 list_add(&page->page_list, &pool->page_list);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500348 ready:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 page->in_use++;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500350 offset = page->offset;
351 page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 retval = offset + page->vaddr;
353 *handle = offset + page->dma;
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700354#ifdef DMAPOOL_DEBUG
Matthieu CASTET5de55b22012-12-11 16:01:31 -0800355 {
356 int i;
357 u8 *data = retval;
358 /* page->offset is stored in first 4 bytes */
359 for (i = sizeof(page->offset); i < pool->size; i++) {
360 if (data[i] == POOL_POISON_FREED)
361 continue;
362 if (pool->dev)
363 dev_err(pool->dev,
Hiroshige Sato5835f252014-04-16 21:28:34 +0900364 "dma_pool_alloc %s, %p (corrupted)\n",
Matthieu CASTET5de55b22012-12-11 16:01:31 -0800365 pool->name, retval);
366 else
Hiroshige Sato5835f252014-04-16 21:28:34 +0900367 pr_err("dma_pool_alloc %s, %p (corrupted)\n",
Matthieu CASTET5de55b22012-12-11 16:01:31 -0800368 pool->name, retval);
369
370 /*
371 * Dump the first 4 bytes even if they are not
372 * POOL_POISON_FREED
373 */
374 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
375 data, pool->size, 1);
376 break;
377 }
378 }
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700379 if (!(mem_flags & __GFP_ZERO))
380 memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381#endif
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500382 spin_unlock_irqrestore(&pool->lock, flags);
Sean O. Stalleyfa23f562015-09-08 15:02:24 -0700383
384 if (mem_flags & __GFP_ZERO)
385 memset(retval, 0, pool->size);
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 return retval;
388}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500389EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500391static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500393 struct dma_page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 list_for_each_entry(page, &pool->page_list, page_list) {
396 if (dma < page->dma)
397 continue;
Robin Murphy676bd992015-10-01 15:37:19 -0700398 if ((dma - page->dma) < pool->allocation)
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800399 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800401 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404/**
405 * dma_pool_free - put block back into dma pool
406 * @pool: the dma pool holding the block
407 * @vaddr: virtual address of block
408 * @dma: dma address of block
409 *
410 * Caller promises neither device nor driver will again touch this block
411 * unless it is first re-allocated.
412 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500413void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500415 struct dma_page *page;
416 unsigned long flags;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500417 unsigned int offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800419 spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500420 page = pool_find_page(pool, dma);
421 if (!page) {
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800422 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (pool->dev)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500424 dev_err(pool->dev,
425 "dma_pool_free %s, %p/%lx (bad dma)\n",
426 pool->name, vaddr, (unsigned long)dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 else
Joe Perches11705322016-03-17 14:19:50 -0700428 pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500429 pool->name, vaddr, (unsigned long)dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 return;
431 }
432
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500433 offset = vaddr - page->vaddr;
Andi Kleenb5ee5be2008-04-28 02:12:37 -0700434#ifdef DMAPOOL_DEBUG
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500435 if ((dma - page->dma) != offset) {
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800436 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 if (pool->dev)
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500438 dev_err(pool->dev,
Miles Chen199eaa02017-02-24 14:59:51 -0800439 "dma_pool_free %s, %p (bad vaddr)/%pad\n",
440 pool->name, vaddr, &dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 else
Miles Chen199eaa02017-02-24 14:59:51 -0800442 pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n",
443 pool->name, vaddr, &dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 return;
445 }
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500446 {
447 unsigned int chain = page->offset;
448 while (chain < pool->allocation) {
449 if (chain != offset) {
450 chain = *(int *)(page->vaddr + chain);
451 continue;
452 }
Rolf Eike Beer84bc2272011-01-13 15:47:24 -0800453 spin_unlock_irqrestore(&pool->lock, flags);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500454 if (pool->dev)
Miles Chen199eaa02017-02-24 14:59:51 -0800455 dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n",
456 pool->name, &dma);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500457 else
Miles Chen199eaa02017-02-24 14:59:51 -0800458 pr_err("dma_pool_free %s, dma %pad already free\n",
459 pool->name, &dma);
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500460 return;
461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 }
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500463 memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464#endif
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 page->in_use--;
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500467 *(int *)vaddr = page->offset;
468 page->offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 /*
470 * Resist a temptation to do
Matthew Wilcoxa35a3452007-12-03 14:08:28 -0500471 * if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 * Better have a few empty pages hang around.
473 */
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500474 spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500476EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Tejun Heo9ac78492007-01-20 16:00:26 +0900478/*
479 * Managed DMA pool
480 */
481static void dmam_pool_release(struct device *dev, void *res)
482{
483 struct dma_pool *pool = *(struct dma_pool **)res;
484
485 dma_pool_destroy(pool);
486}
487
488static int dmam_pool_match(struct device *dev, void *res, void *match_data)
489{
490 return *(struct dma_pool **)res == match_data;
491}
492
493/**
494 * dmam_pool_create - Managed dma_pool_create()
495 * @name: name of pool, for diagnostics
496 * @dev: device that will be doing the DMA
497 * @size: size of the blocks in this pool.
498 * @align: alignment requirement for blocks; must be a power of two
499 * @allocation: returned blocks won't cross this boundary (or zero)
500 *
501 * Managed dma_pool_create(). DMA pool created with this function is
502 * automatically destroyed on driver detach.
Mike Rapoporta862f682019-03-05 15:48:42 -0800503 *
504 * Return: a managed dma allocation pool with the requested
505 * characteristics, or %NULL if one can't be created.
Tejun Heo9ac78492007-01-20 16:00:26 +0900506 */
507struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
508 size_t size, size_t align, size_t allocation)
509{
510 struct dma_pool **ptr, *pool;
511
512 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
513 if (!ptr)
514 return NULL;
515
516 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
517 if (pool)
518 devres_add(dev, ptr);
519 else
520 devres_free(ptr);
521
522 return pool;
523}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500524EXPORT_SYMBOL(dmam_pool_create);
Tejun Heo9ac78492007-01-20 16:00:26 +0900525
526/**
527 * dmam_pool_destroy - Managed dma_pool_destroy()
528 * @pool: dma pool that will be destroyed
529 *
530 * Managed dma_pool_destroy().
531 */
532void dmam_pool_destroy(struct dma_pool *pool)
533{
534 struct device *dev = pool->dev;
535
Andy Shevchenko172cb4b2014-06-04 16:10:02 -0700536 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
Tejun Heo9ac78492007-01-20 16:00:26 +0900537}
Matthew Wilcoxe87aa772007-12-03 12:04:31 -0500538EXPORT_SYMBOL(dmam_pool_destroy);