blob: ee55578d9834159b8e403c0b8e3f26ce2de4d5f4 [file] [log] [blame]
Mikael Starvik51533b62005-07-27 11:44:44 -07001/*
2 * Dynamic DMA mapping support.
3 *
4 * On cris there is no hardware dynamic DMA address translation,
5 * so consistent alloc/free are merely page allocation/freeing.
6 * The rest of the dynamic DMA mapping interface is implemented
7 * in asm/pci.h.
8 *
9 * Borrowed from i386.
10 */
11
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/string.h>
15#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Mikael Starvik51533b62005-07-27 11:44:44 -070017#include <asm/io.h>
18
Mikael Starvik51533b62005-07-27 11:44:44 -070019void *dma_alloc_coherent(struct device *dev, size_t size,
Al Virodd0fc662005-10-07 07:46:04 +010020 dma_addr_t *dma_handle, gfp_t gfp)
Mikael Starvik51533b62005-07-27 11:44:44 -070021{
22 void *ret;
Mikael Starvik51533b62005-07-27 11:44:44 -070023 int order = get_order(size);
24 /* ignore region specifiers */
25 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
26
Dmitry Baryshkov8fa8b9f2008-07-20 15:00:32 +040027 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
28 return ret;
Mikael Starvik51533b62005-07-27 11:44:44 -070029
30 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
31 gfp |= GFP_DMA;
32
33 ret = (void *)__get_free_pages(gfp, order);
34
35 if (ret != NULL) {
36 memset(ret, 0, size);
37 *dma_handle = virt_to_phys(ret);
38 }
39 return ret;
40}
41
42void dma_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle)
44{
Mikael Starvik51533b62005-07-27 11:44:44 -070045 int order = get_order(size);
46
Dmitry Baryshkov8fa8b9f2008-07-20 15:00:32 +040047 if (!dma_release_from_coherent(dev, order, vaddr))
Mikael Starvik51533b62005-07-27 11:44:44 -070048 free_pages((unsigned long)vaddr, order);
49}
50