blob: a2bfa2ca5730657cb729e6b507827c22708348ca [file] [log] [blame]
Michal Simekccfe27d2010-01-14 11:21:02 +01001/*
2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 *
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090011#include <linux/gfp.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010012#include <linux/dma-debug.h>
Paul Gortmaker66421a62011-09-22 11:22:55 -040013#include <linux/export.h>
Michal Simekccfe27d2010-01-14 11:21:02 +010014#include <asm/bug.h>
15
16/*
17 * Generic direct DMA implementation
18 *
19 * This implementation supports a per-device offset that can be applied if
20 * the address at which memory is visible to devices is not 0. Platform code
21 * can set archdata.dma_data to an unsigned long holding the offset. By
22 * default the offset is PCI_DRAM_OFFSET.
23 */
Michal Simek2549edd2010-01-20 14:36:24 +010024
Michal Simekccfe27d2010-01-14 11:21:02 +010025static unsigned long get_dma_direct_offset(struct device *dev)
26{
Michal Simek78ebfa82010-03-23 15:37:02 +010027 if (likely(dev))
Michal Simekccfe27d2010-01-14 11:21:02 +010028 return (unsigned long)dev->archdata.dma_data;
29
30 return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
31}
32
Michal Simek1be53e02010-03-11 14:15:48 +010033#define NOT_COHERENT_CACHE
34
35static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicz988624e2012-03-27 14:56:04 +020036 dma_addr_t *dma_handle, gfp_t flag,
37 struct dma_attrs *attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010038{
Michal Simek1be53e02010-03-11 14:15:48 +010039#ifdef NOT_COHERENT_CACHE
40 return consistent_alloc(flag, size, dma_handle);
41#else
Michal Simekccfe27d2010-01-14 11:21:02 +010042 void *ret;
43 struct page *page;
44 int node = dev_to_node(dev);
45
46 /* ignore region specifiers */
47 flag &= ~(__GFP_HIGHMEM);
48
49 page = alloc_pages_node(node, flag, get_order(size));
50 if (page == NULL)
51 return NULL;
52 ret = page_address(page);
53 memset(ret, 0, size);
54 *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev);
55
56 return ret;
Michal Simek1be53e02010-03-11 14:15:48 +010057#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010058}
59
Michal Simek1be53e02010-03-11 14:15:48 +010060static void dma_direct_free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicz988624e2012-03-27 14:56:04 +020061 void *vaddr, dma_addr_t dma_handle,
62 struct dma_attrs *attrs)
Michal Simekccfe27d2010-01-14 11:21:02 +010063{
Michal Simek1be53e02010-03-11 14:15:48 +010064#ifdef NOT_COHERENT_CACHE
Michal Simekf1525762010-04-10 17:34:06 +020065 consistent_free(size, vaddr);
Michal Simek1be53e02010-03-11 14:15:48 +010066#else
Michal Simekccfe27d2010-01-14 11:21:02 +010067 free_pages((unsigned long)vaddr, get_order(size));
Michal Simek1be53e02010-03-11 14:15:48 +010068#endif
Michal Simekccfe27d2010-01-14 11:21:02 +010069}
70
71static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
72 int nents, enum dma_data_direction direction,
73 struct dma_attrs *attrs)
74{
75 struct scatterlist *sg;
76 int i;
77
Michal Simekd79f3b02010-02-08 12:13:10 +010078 /* FIXME this part of code is untested */
Michal Simekccfe27d2010-01-14 11:21:02 +010079 for_each_sg(sgl, sg, nents, i) {
80 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
Eli Billauercf560c12011-09-11 22:43:06 +030081 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
Michal Simekd79f3b02010-02-08 12:13:10 +010082 sg->length, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +010083 }
84
85 return nents;
86}
87
88static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
89 int nents, enum dma_data_direction direction,
90 struct dma_attrs *attrs)
91{
92}
93
94static int dma_direct_dma_supported(struct device *dev, u64 mask)
95{
96 return 1;
97}
98
99static inline dma_addr_t dma_direct_map_page(struct device *dev,
100 struct page *page,
101 unsigned long offset,
102 size_t size,
Michal Simek2549edd2010-01-20 14:36:24 +0100103 enum dma_data_direction direction,
Michal Simekccfe27d2010-01-14 11:21:02 +0100104 struct dma_attrs *attrs)
105{
Eli Billauercf560c12011-09-11 22:43:06 +0300106 __dma_sync(page_to_phys(page) + offset, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100107 return page_to_phys(page) + offset + get_dma_direct_offset(dev);
108}
109
110static inline void dma_direct_unmap_page(struct device *dev,
111 dma_addr_t dma_address,
112 size_t size,
113 enum dma_data_direction direction,
114 struct dma_attrs *attrs)
115{
Michal Simekd79f3b02010-02-08 12:13:10 +0100116/* There is not necessary to do cache cleanup
117 *
118 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
119 * dma_address is physical address
120 */
Eli Billauercf560c12011-09-11 22:43:06 +0300121 __dma_sync(dma_address, size, direction);
Michal Simekccfe27d2010-01-14 11:21:02 +0100122}
123
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300124static inline void
125dma_direct_sync_single_for_cpu(struct device *dev,
126 dma_addr_t dma_handle, size_t size,
127 enum dma_data_direction direction)
128{
129 /*
130 * It's pointless to flush the cache as the memory segment
131 * is given to the CPU
132 */
133
134 if (direction == DMA_FROM_DEVICE)
135 __dma_sync(dma_handle, size, direction);
136}
137
138static inline void
139dma_direct_sync_single_for_device(struct device *dev,
140 dma_addr_t dma_handle, size_t size,
141 enum dma_data_direction direction)
142{
143 /*
144 * It's pointless to invalidate the cache if the device isn't
145 * supposed to write to the relevant region
146 */
147
148 if (direction == DMA_TO_DEVICE)
149 __dma_sync(dma_handle, size, direction);
150}
151
152static inline void
153dma_direct_sync_sg_for_cpu(struct device *dev,
154 struct scatterlist *sgl, int nents,
155 enum dma_data_direction direction)
156{
157 struct scatterlist *sg;
158 int i;
159
160 /* FIXME this part of code is untested */
161 if (direction == DMA_FROM_DEVICE)
162 for_each_sg(sgl, sg, nents, i)
163 __dma_sync(sg->dma_address, sg->length, direction);
164}
165
166static inline void
167dma_direct_sync_sg_for_device(struct device *dev,
168 struct scatterlist *sgl, int nents,
169 enum dma_data_direction direction)
170{
171 struct scatterlist *sg;
172 int i;
173
174 /* FIXME this part of code is untested */
175 if (direction == DMA_TO_DEVICE)
176 for_each_sg(sgl, sg, nents, i)
177 __dma_sync(sg->dma_address, sg->length, direction);
178}
179
Michal Simekccfe27d2010-01-14 11:21:02 +0100180struct dma_map_ops dma_direct_ops = {
Andrzej Pietrasiewicz988624e2012-03-27 14:56:04 +0200181 .alloc = dma_direct_alloc_coherent,
182 .free = dma_direct_free_coherent,
Michal Simekccfe27d2010-01-14 11:21:02 +0100183 .map_sg = dma_direct_map_sg,
184 .unmap_sg = dma_direct_unmap_sg,
185 .dma_supported = dma_direct_dma_supported,
186 .map_page = dma_direct_map_page,
187 .unmap_page = dma_direct_unmap_page,
Eli Billauer0fb2a6f2011-09-11 22:43:07 +0300188 .sync_single_for_cpu = dma_direct_sync_single_for_cpu,
189 .sync_single_for_device = dma_direct_sync_single_for_device,
190 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
191 .sync_sg_for_device = dma_direct_sync_sg_for_device,
Michal Simekccfe27d2010-01-14 11:21:02 +0100192};
193EXPORT_SYMBOL(dma_direct_ops);
194
195/* Number of entries preallocated for DMA-API debugging */
196#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
197
198static int __init dma_init(void)
199{
200 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
201
202 return 0;
203}
204fs_initcall(dma_init);