Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* Copyright (C) 2004 IBM |
| 2 | * |
| 3 | * Implements the generic device dma API for ppc64. Handles |
| 4 | * the pci and vio busses |
| 5 | */ |
| 6 | |
| 7 | #ifndef _ASM_DMA_MAPPING_H |
| 8 | #define _ASM_DMA_MAPPING_H |
| 9 | |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/cache.h> |
| 12 | /* need struct page definitions */ |
| 13 | #include <linux/mm.h> |
| 14 | #include <asm/scatterlist.h> |
| 15 | #include <asm/bug.h> |
| 16 | |
| 17 | #define DMA_ERROR_CODE (~(dma_addr_t)0x0) |
| 18 | |
| 19 | extern int dma_supported(struct device *dev, u64 mask); |
| 20 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
| 21 | extern void *dma_alloc_coherent(struct device *dev, size_t size, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 22 | dma_addr_t *dma_handle, gfp_t flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
| 24 | dma_addr_t dma_handle); |
| 25 | extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
| 26 | size_t size, enum dma_data_direction direction); |
| 27 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, |
| 28 | size_t size, enum dma_data_direction direction); |
| 29 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 30 | unsigned long offset, size_t size, |
| 31 | enum dma_data_direction direction); |
| 32 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 33 | size_t size, enum dma_data_direction direction); |
| 34 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 35 | enum dma_data_direction direction); |
| 36 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 37 | int nhwentries, enum dma_data_direction direction); |
| 38 | |
| 39 | static inline void |
| 40 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, |
| 41 | enum dma_data_direction direction) |
| 42 | { |
| 43 | BUG_ON(direction == DMA_NONE); |
| 44 | /* nothing to do */ |
| 45 | } |
| 46 | |
| 47 | static inline void |
| 48 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, |
| 49 | enum dma_data_direction direction) |
| 50 | { |
| 51 | BUG_ON(direction == DMA_NONE); |
| 52 | /* nothing to do */ |
| 53 | } |
| 54 | |
| 55 | static inline void |
| 56 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, |
| 57 | enum dma_data_direction direction) |
| 58 | { |
| 59 | BUG_ON(direction == DMA_NONE); |
| 60 | /* nothing to do */ |
| 61 | } |
| 62 | |
| 63 | static inline void |
| 64 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, |
| 65 | enum dma_data_direction direction) |
| 66 | { |
| 67 | BUG_ON(direction == DMA_NONE); |
| 68 | /* nothing to do */ |
| 69 | } |
| 70 | |
| 71 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
| 72 | { |
| 73 | return (dma_addr == DMA_ERROR_CODE); |
| 74 | } |
| 75 | |
| 76 | /* Now for the API extensions over the pci_ one */ |
| 77 | |
| 78 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 79 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 80 | #define dma_is_consistent(d) (1) |
| 81 | |
| 82 | static inline int |
| 83 | dma_get_cache_alignment(void) |
| 84 | { |
| 85 | /* no easy way to get cache size on all processors, so return |
| 86 | * the maximum possible, to be safe */ |
| 87 | return (1 << L1_CACHE_SHIFT_MAX); |
| 88 | } |
| 89 | |
| 90 | static inline void |
| 91 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, |
| 92 | unsigned long offset, size_t size, |
| 93 | enum dma_data_direction direction) |
| 94 | { |
| 95 | BUG_ON(direction == DMA_NONE); |
| 96 | /* nothing to do */ |
| 97 | } |
| 98 | |
| 99 | static inline void |
| 100 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, |
| 101 | unsigned long offset, size_t size, |
| 102 | enum dma_data_direction direction) |
| 103 | { |
| 104 | BUG_ON(direction == DMA_NONE); |
| 105 | /* nothing to do */ |
| 106 | } |
| 107 | |
| 108 | static inline void |
| 109 | dma_cache_sync(void *vaddr, size_t size, |
| 110 | enum dma_data_direction direction) |
| 111 | { |
| 112 | BUG_ON(direction == DMA_NONE); |
| 113 | /* nothing to do */ |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO |
| 118 | */ |
| 119 | struct dma_mapping_ops { |
| 120 | void * (*alloc_coherent)(struct device *dev, size_t size, |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 121 | dma_addr_t *dma_handle, gfp_t flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | void (*free_coherent)(struct device *dev, size_t size, |
| 123 | void *vaddr, dma_addr_t dma_handle); |
| 124 | dma_addr_t (*map_single)(struct device *dev, void *ptr, |
| 125 | size_t size, enum dma_data_direction direction); |
| 126 | void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, |
| 127 | size_t size, enum dma_data_direction direction); |
| 128 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
| 129 | int nents, enum dma_data_direction direction); |
| 130 | void (*unmap_sg)(struct device *dev, struct scatterlist *sg, |
| 131 | int nents, enum dma_data_direction direction); |
| 132 | int (*dma_supported)(struct device *dev, u64 mask); |
| 133 | int (*dac_dma_supported)(struct device *dev, u64 mask); |
| 134 | }; |
| 135 | |
| 136 | #endif /* _ASM_DMA_MAPPING_H */ |