Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_DMA_MAPPING_H |
| 2 | #define _ASM_IA64_DMA_MAPPING_H |
| 3 | |
| 4 | /* |
| 5 | * Copyright (C) 2003-2004 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <asm/machvec.h> |
Jens Axboe | 9b6eccf | 2007-10-16 11:27:26 +0200 | [diff] [blame] | 9 | #include <linux/scatterlist.h> |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 10 | #include <asm/swiotlb.h> |
FUJITA Tomonori | 9916219 | 2009-06-17 16:28:14 -0700 | [diff] [blame] | 11 | #include <linux/dma-debug.h> |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 12 | |
John Keller | 175add1 | 2008-11-24 16:47:17 -0600 | [diff] [blame] | 13 | #define ARCH_HAS_DMA_GET_REQUIRED_MASK |
| 14 | |
Konrad Rzeszutek Wilk | 9f97535 | 2010-12-06 18:24:32 -0500 | [diff] [blame] | 15 | #define DMA_ERROR_CODE 0 |
| 16 | |
FUJITA Tomonori | 160c1d8 | 2009-01-05 23:59:02 +0900 | [diff] [blame] | 17 | extern struct dma_map_ops *dma_ops; |
Fenghua Yu | 62fdd76 | 2008-10-17 12:14:13 -0700 | [diff] [blame] | 18 | extern struct ia64_machine_vector ia64_mv; |
| 19 | extern void set_iommu_machvec(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
FUJITA Tomonori | c299030 | 2009-01-07 02:13:42 +0900 | [diff] [blame] | 21 | extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, |
| 22 | enum dma_data_direction); |
| 23 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, |
| 24 | enum dma_data_direction); |
| 25 | |
FUJITA Tomonori | d6d0a6a | 2009-06-17 16:28:13 -0700 | [diff] [blame] | 26 | #define get_dma_ops(dev) platform_dma_get_ops(dev) |
FUJITA Tomonori | b7ea6e9 | 2009-01-05 23:36:13 +0900 | [diff] [blame] | 27 | |
FUJITA Tomonori | a0b00ca | 2009-07-10 10:04:56 +0900 | [diff] [blame] | 28 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
| 29 | { |
| 30 | if (!dev->dma_mask) |
| 31 | return 0; |
| 32 | |
Jan Beulich | ac2b3e6 | 2009-12-15 16:47:43 -0800 | [diff] [blame] | 33 | return addr + size - 1 <= *dev->dma_mask; |
FUJITA Tomonori | a0b00ca | 2009-07-10 10:04:56 +0900 | [diff] [blame] | 34 | } |
| 35 | |
FUJITA Tomonori | 8d4f533 | 2009-07-10 10:05:01 +0900 | [diff] [blame] | 36 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 37 | { |
| 38 | return paddr; |
| 39 | } |
| 40 | |
| 41 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) |
| 42 | { |
| 43 | return daddr; |
| 44 | } |
| 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | static inline void |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 47 | dma_cache_sync (struct device *dev, void *vaddr, size_t size, |
| 48 | enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | { |
| 50 | /* |
| 51 | * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to |
| 52 | * ensure that dma_cache_sync() enforces order, hence the mb(). |
| 53 | */ |
| 54 | mb(); |
| 55 | } |
| 56 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | #endif /* _ASM_IA64_DMA_MAPPING_H */ |