blob: f9172ff30e5c1e327574bd92d36eb6592c554ffc [file] [log] [blame]
Robin Getz96f10502009-09-24 14:11:24 +00001/*
2 * Copyright 2004-2009 Analog Devices Inc.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
Bryan Wu1394f032007-05-06 14:50:22 -07007#ifndef _BLACKFIN_DMA_MAPPING_H
8#define _BLACKFIN_DMA_MAPPING_H
9
Barry Songdd3b0e32009-11-23 03:47:24 +000010#include <asm/cacheflush.h>
11struct scatterlist;
Bryan Wu1394f032007-05-06 14:50:22 -070012
Bryan Wu1394f032007-05-06 14:50:22 -070013void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp);
15void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle);
17
18/*
19 * Now for the API extensions over the pci_ one
20 */
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Barry Songdd3b0e32009-11-23 03:47:24 +000023#define dma_supported(d, m) (1)
24#define dma_get_cache_alignment() (32)
25#define dma_is_consistent(d, h) (1)
Bryan Wu1394f032007-05-06 14:50:22 -070026
Barry Songdd3b0e32009-11-23 03:47:24 +000027static inline int
28dma_set_mask(struct device *dev, u64 dma_mask)
29{
30 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
31 return -EIO;
32
33 *dev->dma_mask = dma_mask;
34
35 return 0;
36}
37
38static inline int
39dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Mike Frysinger62273ee2008-11-18 17:48:22 +080040{
41 return 0;
42}
Sonic Zhang334280f2007-06-21 11:34:16 +080043
Barry Songdd3b0e32009-11-23 03:47:24 +000044extern void
45__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
46static inline void
47_dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
48{
49 if (!__builtin_constant_p(dir)) {
50 __dma_sync(addr, size, dir);
51 return;
52 }
53
54 switch (dir) {
55 case DMA_NONE:
56 BUG();
57 case DMA_TO_DEVICE: /* writeback only */
58 flush_dcache_range(addr, addr + size);
59 break;
60 case DMA_FROM_DEVICE: /* invalidate only */
61 case DMA_BIDIRECTIONAL: /* flush and invalidate */
62 /* Blackfin has no dedicated invalidate (it includes a flush) */
63 invalidate_dcache_range(addr, addr + size);
64 break;
65 }
66}
67
Bryan Wu1394f032007-05-06 14:50:22 -070068/*
69 * Map a single buffer of the indicated size for DMA in streaming mode.
70 * The 32-bit bus address to use is returned.
71 *
72 * Once the device is given the dma address, the device owns this memory
73 * until either pci_unmap_single or pci_dma_sync_single is performed.
74 */
Barry Songdd3b0e32009-11-23 03:47:24 +000075static inline dma_addr_t
76dma_map_single(struct device *dev, void *ptr, size_t size,
77 enum dma_data_direction dir)
78{
79 _dma_sync((dma_addr_t)ptr, size, dir);
80 return (dma_addr_t) ptr;
81}
Bryan Wu1394f032007-05-06 14:50:22 -070082
Bryan Wu9fcdc782008-04-23 07:41:52 +080083static inline dma_addr_t
84dma_map_page(struct device *dev, struct page *page,
85 unsigned long offset, size_t size,
86 enum dma_data_direction dir)
87{
88 return dma_map_single(dev, page_address(page) + offset, size, dir);
89}
90
Bryan Wu1394f032007-05-06 14:50:22 -070091/*
92 * Unmap a single streaming mode DMA translation. The dma_addr and size
93 * must match what was provided for in a previous pci_map_single call. All
94 * other usages are undefined.
95 *
96 * After this call, reads by the cpu to the buffer are guarenteed to see
97 * whatever the device wrote there.
98 */
Barry Songdd3b0e32009-11-23 03:47:24 +000099static inline void
100dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
101 enum dma_data_direction dir)
102{
103 BUG_ON(!valid_dma_direction(dir));
104}
Bryan Wu1394f032007-05-06 14:50:22 -0700105
Bryan Wu9fcdc782008-04-23 07:41:52 +0800106static inline void
107dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
108 enum dma_data_direction dir)
109{
110 dma_unmap_single(dev, dma_addr, size, dir);
111}
112
Bryan Wu1394f032007-05-06 14:50:22 -0700113/*
114 * Map a set of buffers described by scatterlist in streaming
115 * mode for DMA. This is the scather-gather version of the
116 * above pci_map_single interface. Here the scatter gather list
117 * elements are each tagged with the appropriate dma address
118 * and length. They are obtained via sg_dma_{address,length}(SG).
119 *
120 * NOTE: An implementation may be able to use a smaller number of
121 * DMA address/length pairs than there are SG table elements.
122 * (for example via virtual mapping capabilities)
123 * The routine returns the number of addr/length pairs actually
124 * used, at most nents.
125 *
126 * Device ownership issues as mentioned above for pci_map_single are
127 * the same here.
128 */
129extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
Barry Songdd3b0e32009-11-23 03:47:24 +0000130 enum dma_data_direction dir);
Bryan Wu1394f032007-05-06 14:50:22 -0700131
132/*
133 * Unmap a set of streaming mode DMA translations.
134 * Again, cpu read rules concerning calls here are the same as for
135 * pci_unmap_single() above.
136 */
Barry Songdd3b0e32009-11-23 03:47:24 +0000137static inline void
138dma_unmap_sg(struct device *dev, struct scatterlist *sg,
139 int nhwentries, enum dma_data_direction dir)
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800140{
Barry Songdd3b0e32009-11-23 03:47:24 +0000141 BUG_ON(!valid_dma_direction(dir));
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800142}
143
Barry Songdd3b0e32009-11-23 03:47:24 +0000144static inline void
145dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
146 unsigned long offset, size_t size,
147 enum dma_data_direction dir)
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800148{
Barry Songdd3b0e32009-11-23 03:47:24 +0000149 BUG_ON(!valid_dma_direction(dir));
Bryan Wu31f3d4a2008-09-22 20:23:55 +0800150}
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400151
Barry Songdd3b0e32009-11-23 03:47:24 +0000152static inline void
153dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
154 unsigned long offset, size_t size,
155 enum dma_data_direction dir)
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400156{
Barry Songdd3b0e32009-11-23 03:47:24 +0000157 _dma_sync(handle + offset, size, dir);
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400158}
159
Barry Songdd3b0e32009-11-23 03:47:24 +0000160static inline void
161dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
162 enum dma_data_direction dir)
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400163{
Barry Songdd3b0e32009-11-23 03:47:24 +0000164 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
165}
166
167static inline void
168dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
169 enum dma_data_direction dir)
170{
171 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
172}
173
174static inline void
175dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
176 enum dma_data_direction dir)
177{
178 BUG_ON(!valid_dma_direction(dir));
179}
180
181extern void
182dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
183 int nents, enum dma_data_direction dir);
184
185static inline void
186dma_cache_sync(struct device *dev, void *vaddr, size_t size,
187 enum dma_data_direction dir)
188{
189 _dma_sync((dma_addr_t)vaddr, size, dir);
FUJITA Tomonori42b86e02009-06-22 21:48:37 -0400190}
191
Bryan Wu1394f032007-05-06 14:50:22 -0700192#endif /* _BLACKFIN_DMA_MAPPING_H */