blob: dc6416d265d616e1ac65382b5174c81c1b22d723 [file] [log] [blame]
Michal Simekccfe27d2010-01-14 11:21:02 +01001/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010012#include <linux/dma-debug.h>
13#include <asm/bug.h>
14
15/*
16 * Generic direct DMA implementation
17 *
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
21 * default the offset is PCI_DRAM_OFFSET.
22 */
Michal Simek2549edd2010-01-20 14:36:24 +010023
Michal Simekccfe27d2010-01-14 11:21:02 +010024static unsigned long get_dma_direct_offset(struct device *dev)
25{
Michal Simek78ebfa82010-03-23 15:37:02 +010026 if (likely(dev))
Michal Simekccfe27d2010-01-14 11:21:02 +010027 return (unsigned long)dev->archdata.dma_data;
28
29 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
30}
31
Michal Simek1be53e02010-03-11 14:15:48 +010032#define NOT_COHERENT_CACHE
33
34static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Michal Simekccfe27d2010-01-14 11:21:02 +010035 dma_addr_t *dma_handle, gfp_t flag)
36{
Michal Simek1be53e02010-03-11 14:15:48 +010037#ifdef NOT_COHERENT_CACHE
38 return consistent_alloc(flag, size, dma_handle);
39#else
Michal Simekccfe27d2010-01-14 11:21:02 +010040 void *ret;
41 struct page *page;
42 int node = dev_to_node(dev);
43
44 /* ignore region specifiers */
45 flag &= ~(__GFP_HIGHMEM);
46
47 page = alloc_pages_node(node, flag, get_order(size));
48 if (page == NULL)
49 return NULL;
50 ret = page_address(page);
51 memset(ret, 0, size);
52 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
53
54 return ret;
Michal Simek1be53e02010-03-11 14:15:48 +010055#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010056}
57
Michal Simek1be53e02010-03-11 14:15:48 +010058static void dma_direct_free_coherent(struct device *dev, size_t size,
Michal Simekccfe27d2010-01-14 11:21:02 +010059 void *vaddr, dma_addr_t dma_handle)
60{
Michal Simek1be53e02010-03-11 14:15:48 +010061#ifdef NOT_COHERENT_CACHE
Michal Simekf1525762010-04-10 17:34:06 +020062 consistent_free(size, vaddr);
Michal Simek1be53e02010-03-11 14:15:48 +010063#else
Michal Simekccfe27d2010-01-14 11:21:02 +010064 free_pages((unsigned long)vaddr, get_order(size));
Michal Simek1be53e02010-03-11 14:15:48 +010065#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010066}
67
68static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
69 int nents, enum dma_data_direction direction,
70 struct dma_attrs *attrs)
71{
72 struct scatterlist *sg;
73 int i;
74
Michal Simekd79f3b02010-02-08 12:13:10 +010075 /* FIXME this part of code is untested */
Michal Simekccfe27d2010-01-14 11:21:02 +010076 for_each_sg(sgl, sg, nents, i) {
77 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
Eli Billauercf560c12011-09-11 22:43:06 +030078 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
Michal Simekd79f3b02010-02-08 12:13:10 +010079 sg->length, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +010080 }
81
82 return nents;
83}
84
85static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
86 int nents, enum dma_data_direction direction,
87 struct dma_attrs *attrs)
88{
89}
90
91static int dma_direct_dma_supported(struct device *dev, u64 mask)
92{
93 return 1;
94}
95
96static inline dma_addr_t dma_direct_map_page(struct device *dev,
97 struct page *page,
98 unsigned long offset,
99 size_t size,
Michal Simek2549edd2010-01-20 14:36:24 +0100100 enum dma_data_direction direction,
Michal Simekccfe27d2010-01-14 11:21:02 +0100101 struct dma_attrs *attrs)
102{
Eli Billauercf560c12011-09-11 22:43:06 +0300103 __dma_sync(page_to_phys(page) + offset, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100104 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
105}
106
107static inline void dma_direct_unmap_page(struct device *dev,
108 dma_addr_t dma_address,
109 size_t size,
110 enum dma_data_direction direction,
111 struct dma_attrs *attrs)
112{
Michal Simekd79f3b02010-02-08 12:13:10 +0100113/* There is not necessary to do cache cleanup
114 *
115 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
116 * dma_address is physical address
117 */
Eli Billauercf560c12011-09-11 22:43:06 +0300118 __dma_sync(dma_address, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100119}
120
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300121static inline void
122dma_direct_sync_single_for_cpu(struct device *dev,
123 dma_addr_t dma_handle, size_t size,
124 enum dma_data_direction direction)
125{
126 /*
127 * It's pointless to flush the cache as the memory segment
128 * is given to the CPU
129 */
130
131 if (direction == DMA_FROM_DEVICE)
132 __dma_sync(dma_handle, size, direction);
133}
134
135static inline void
136dma_direct_sync_single_for_device(struct device *dev,
137 dma_addr_t dma_handle, size_t size,
138 enum dma_data_direction direction)
139{
140 /*
141 * It's pointless to invalidate the cache if the device isn't
142 * supposed to write to the relevant region
143 */
144
145 if (direction == DMA_TO_DEVICE)
146 __dma_sync(dma_handle, size, direction);
147}
148
149static inline void
150dma_direct_sync_sg_for_cpu(struct device *dev,
151 struct scatterlist *sgl, int nents,
152 enum dma_data_direction direction)
153{
154 struct scatterlist *sg;
155 int i;
156
157 /* FIXME this part of code is untested */
158 if (direction == DMA_FROM_DEVICE)
159 for_each_sg(sgl, sg, nents, i)
160 __dma_sync(sg->dma_address, sg->length, direction);
161}
162
163static inline void
164dma_direct_sync_sg_for_device(struct device *dev,
165 struct scatterlist *sgl, int nents,
166 enum dma_data_direction direction)
167{
168 struct scatterlist *sg;
169 int i;
170
171 /* FIXME this part of code is untested */
172 if (direction == DMA_TO_DEVICE)
173 for_each_sg(sgl, sg, nents, i)
174 __dma_sync(sg->dma_address, sg->length, direction);
175}
176
Michal Simekccfe27d2010-01-14 11:21:02 +0100177struct dma_map_ops dma_direct_ops = {
178 .alloc_coherent = dma_direct_alloc_coherent,
179 .free_coherent = dma_direct_free_coherent,
180 .map_sg = dma_direct_map_sg,
181 .unmap_sg = dma_direct_unmap_sg,
182 .dma_supported = dma_direct_dma_supported,
183 .map_page = dma_direct_map_page,
184 .unmap_page = dma_direct_unmap_page,
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300185 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
186 .sync_single_for_device = dma_direct_sync_single_for_device,
187 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
188 .sync_sg_for_device = dma_direct_sync_sg_for_device,
Michal Simekccfe27d2010-01-14 11:21:02 +0100189};
190EXPORT_SYMBOL(dma_direct_ops);
191
192/* Number of entries preallocated for DMA-API debugging */
193#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
194
195static int __init dma_init(void)
196{
197 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
198
199 return 0;
200}
201fs_initcall(dma_init);