blob: d6230f5145368928a66a91c5052d8ed4284b740c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_DMA_MAPPING_H
2#define _ASM_IA64_DMA_MAPPING_H
3
4/*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/machvec.h>
Jens Axboe9b6eccf2007-10-16 11:27:26 +02009#include <linux/scatterlist.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070010#include <asm/swiotlb.h>
11
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090012extern struct dma_map_ops *dma_ops;
Fenghua Yu62fdd762008-10-17 12:14:13 -070013extern struct ia64_machine_vector ia64_mv;
14extern void set_iommu_machvec(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090016static inline void *dma_alloc_coherent(struct device *dev, size_t size,
17 dma_addr_t *daddr, gfp_t gfp)
18{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090019 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090020 return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090021}
FUJITA Tomonori3a80b6a2008-09-08 18:10:10 +090022
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090023static inline void dma_free_coherent(struct device *dev, size_t size,
24 void *caddr, dma_addr_t daddr)
Roland Dreierb7de8e72007-02-14 00:32:53 -080025{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090026 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090027 ops->free_coherent(dev, size, caddr, daddr);
Roland Dreierb7de8e72007-02-14 00:32:53 -080028}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090029
30#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
31#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
32
33static inline dma_addr_t dma_map_single_attrs(struct device *dev,
34 void *caddr, size_t size,
35 enum dma_data_direction dir,
36 struct dma_attrs *attrs)
Roland Dreierb7de8e72007-02-14 00:32:53 -080037{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090038 struct dma_map_ops *ops = platform_dma_get_ops(dev);
39 return ops->map_page(dev, virt_to_page(caddr),
40 (unsigned long)caddr & ~PAGE_MASK, size,
41 dir, attrs);
Roland Dreierb7de8e72007-02-14 00:32:53 -080042}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090043
44static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
45 size_t size,
46 enum dma_data_direction dir,
47 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070048{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090049 struct dma_map_ops *ops = platform_dma_get_ops(dev);
50 ops->unmap_page(dev, daddr, size, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070051}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090052
53#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
54#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
55
56static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
57 int nents, enum dma_data_direction dir,
58 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070059{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090060 struct dma_map_ops *ops = platform_dma_get_ops(dev);
61 return ops->map_sg(dev, sgl, nents, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070062}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090063
64static inline void dma_unmap_sg_attrs(struct device *dev,
65 struct scatterlist *sgl, int nents,
66 enum dma_data_direction dir,
67 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070068{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090069 struct dma_map_ops *ops = platform_dma_get_ops(dev);
70 ops->unmap_sg(dev, sgl, nents, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070071}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090072
73#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
74#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
75
76static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
77 size_t size,
78 enum dma_data_direction dir)
Arthur Kepner309df0c2008-04-29 01:00:32 -070079{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090080 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090081 ops->sync_single_for_cpu(dev, daddr, size, dir);
Arthur Kepner309df0c2008-04-29 01:00:32 -070082}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090083
84static inline void dma_sync_sg_for_cpu(struct device *dev,
85 struct scatterlist *sgl,
86 int nents, enum dma_data_direction dir)
87{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090088 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090089 ops->sync_sg_for_cpu(dev, sgl, nents, dir);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090090}
91
92static inline void dma_sync_single_for_device(struct device *dev,
93 dma_addr_t daddr,
94 size_t size,
95 enum dma_data_direction dir)
96{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090097 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +090098 ops->sync_single_for_device(dev, daddr, size, dir);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090099}
100
101static inline void dma_sync_sg_for_device(struct device *dev,
102 struct scatterlist *sgl,
103 int nents,
104 enum dma_data_direction dir)
105{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900106 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +0900107 ops->sync_sg_for_device(dev, sgl, nents, dir);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900108}
109
110static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
111{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900112 struct dma_map_ops *ops = platform_dma_get_ops(dev);
FUJITA Tomonoric190ab02009-01-05 23:36:16 +0900113 return ops->mapping_error(dev, daddr);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900114}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900116static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
117 size_t offset, size_t size,
118 enum dma_data_direction dir)
119{
120 struct dma_map_ops *ops = platform_dma_get_ops(dev);
121 return ops->map_page(dev, page, offset, size, dir, NULL);
122}
123
124static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
125 size_t size, enum dma_data_direction dir)
126{
127 dma_unmap_single(dev, addr, size, dir);
128}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/*
131 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
132 * See Documentation/DMA-API.txt for details.
133 */
134
135#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
136 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
137#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
138 dma_sync_single_for_device(dev, dma_handle, size, dir)
139
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900140static inline int dma_supported(struct device *dev, u64 mask)
141{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900142 struct dma_map_ops *ops = platform_dma_get_ops(dev);
143 return ops->dma_supported(dev, mask);
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900144}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146static inline int
147dma_set_mask (struct device *dev, u64 mask)
148{
149 if (!dev->dma_mask || !dma_supported(dev, mask))
150 return -EIO;
151 *dev->dma_mask = mask;
152 return 0;
153}
154
John W. Linvillee1531b42005-11-07 00:57:54 -0800155extern int dma_get_cache_alignment(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157static inline void
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800158dma_cache_sync (struct device *dev, void *vaddr, size_t size,
159 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
161 /*
162 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
163 * ensure that dma_cache_sync() enforces order, hence the mb().
164 */
165 mb();
166}
167
Ralf Baechlef67637e2006-12-06 20:38:54 -0800168#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170#endif /* _ASM_IA64_DMA_MAPPING_H */