blob: 5298f4064e3c3bf5b20ead8e44799fc4c27b10fe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_DMA_MAPPING_H
2#define _ASM_IA64_DMA_MAPPING_H
3
4/*
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/machvec.h>
Jens Axboe9b6eccf2007-10-16 11:27:26 +02009#include <linux/scatterlist.h>
Fenghua Yu62fdd762008-10-17 12:14:13 -070010#include <asm/swiotlb.h>
11
12struct dma_mapping_ops {
13 int (*mapping_error)(struct device *dev,
14 dma_addr_t dma_addr);
15 void* (*alloc_coherent)(struct device *dev, size_t size,
16 dma_addr_t *dma_handle, gfp_t gfp);
17 void (*free_coherent)(struct device *dev, size_t size,
18 void *vaddr, dma_addr_t dma_handle);
19 dma_addr_t (*map_single)(struct device *hwdev, unsigned long ptr,
20 size_t size, int direction);
21 void (*unmap_single)(struct device *dev, dma_addr_t addr,
22 size_t size, int direction);
FUJITA Tomonorie751ab32009-01-05 23:36:06 +090023 dma_addr_t (*map_single_attrs)(struct device *dev, void *cpu_addr,
24 size_t size, int direction,
25 struct dma_attrs *attrs);
26 void (*unmap_single_attrs)(struct device *dev,
27 dma_addr_t dma_addr,
28 size_t size, int direction,
29 struct dma_attrs *attrs);
Fenghua Yu62fdd762008-10-17 12:14:13 -070030 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
FUJITA Tomonorie751ab32009-01-05 23:36:06 +090053 int (*map_sg_attrs)(struct device *dev,
54 struct scatterlist *sg, int nents,
55 int direction, struct dma_attrs *attrs);
56 void (*unmap_sg_attrs)(struct device *dev,
57 struct scatterlist *sg, int nents,
58 int direction,
59 struct dma_attrs *attrs);
Fenghua Yu62fdd762008-10-17 12:14:13 -070060 int (*dma_supported_op)(struct device *hwdev, u64 mask);
61 int is_phys;
62};
63
64extern struct dma_mapping_ops *dma_ops;
65extern struct ia64_machine_vector ia64_mv;
66extern void set_iommu_machvec(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090068static inline void *dma_alloc_coherent(struct device *dev, size_t size,
69 dma_addr_t *daddr, gfp_t gfp)
70{
71 return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA);
72}
FUJITA Tomonori3a80b6a2008-09-08 18:10:10 +090073
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090074static inline void dma_free_coherent(struct device *dev, size_t size,
75 void *caddr, dma_addr_t daddr)
Roland Dreierb7de8e72007-02-14 00:32:53 -080076{
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090077 dma_ops->free_coherent(dev, size, caddr, daddr);
Roland Dreierb7de8e72007-02-14 00:32:53 -080078}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090079
80#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
81#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
82
83static inline dma_addr_t dma_map_single_attrs(struct device *dev,
84 void *caddr, size_t size,
85 enum dma_data_direction dir,
86 struct dma_attrs *attrs)
Roland Dreierb7de8e72007-02-14 00:32:53 -080087{
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090088 return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs);
Roland Dreierb7de8e72007-02-14 00:32:53 -080089}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090090
91static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr,
92 size_t size,
93 enum dma_data_direction dir,
94 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -070095{
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090096 dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -070097}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +090098
99#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
100#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
101
102static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
103 int nents, enum dma_data_direction dir,
104 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -0700105{
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900106 return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700107}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900108
109static inline void dma_unmap_sg_attrs(struct device *dev,
110 struct scatterlist *sgl, int nents,
111 enum dma_data_direction dir,
112 struct dma_attrs *attrs)
Arthur Kepner309df0c2008-04-29 01:00:32 -0700113{
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900114 dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700115}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900116
117#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
118#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
119
120static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr,
121 size_t size,
122 enum dma_data_direction dir)
Arthur Kepner309df0c2008-04-29 01:00:32 -0700123{
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900124 dma_ops->sync_single_for_cpu(dev, daddr, size, dir);
Arthur Kepner309df0c2008-04-29 01:00:32 -0700125}
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900126
127static inline void dma_sync_sg_for_cpu(struct device *dev,
128 struct scatterlist *sgl,
129 int nents, enum dma_data_direction dir)
130{
131 dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir);
132}
133
134static inline void dma_sync_single_for_device(struct device *dev,
135 dma_addr_t daddr,
136 size_t size,
137 enum dma_data_direction dir)
138{
139 dma_ops->sync_single_for_device(dev, daddr, size, dir);
140}
141
142static inline void dma_sync_sg_for_device(struct device *dev,
143 struct scatterlist *sgl,
144 int nents,
145 enum dma_data_direction dir)
146{
147 dma_ops->sync_sg_for_device(dev, sgl, nents, dir);
148}
149
150static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
151{
152 return dma_ops->mapping_error(dev, daddr);
153}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
155#define dma_map_page(dev, pg, off, size, dir) \
156 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
157#define dma_unmap_page(dev, dma_addr, size, dir) \
158 dma_unmap_single(dev, dma_addr, size, dir)
159
160/*
161 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
162 * See Documentation/DMA-API.txt for details.
163 */
164
165#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
166 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
167#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
168 dma_sync_single_for_device(dev, dma_handle, size, dir)
169
FUJITA Tomonorib7ea6e92009-01-05 23:36:13 +0900170static inline int dma_supported(struct device *dev, u64 mask)
171{
172 return dma_ops->dma_supported_op(dev, mask);
173}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175static inline int
176dma_set_mask (struct device *dev, u64 mask)
177{
178 if (!dev->dma_mask || !dma_supported(dev, mask))
179 return -EIO;
180 *dev->dma_mask = mask;
181 return 0;
182}
183
John W. Linvillee1531b42005-11-07 00:57:54 -0800184extern int dma_get_cache_alignment(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186static inline void
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800187dma_cache_sync (struct device *dev, void *vaddr, size_t size,
188 enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
190 /*
191 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
192 * ensure that dma_cache_sync() enforces order, hence the mb().
193 */
194 mb();
195}
196
Ralf Baechlef67637e2006-12-06 20:38:54 -0800197#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Fenghua Yu62fdd762008-10-17 12:14:13 -0700199static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
200{
201 return dma_ops;
202}
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204#endif /* _ASM_IA64_DMA_MAPPING_H */