blob: 1c3f9435f1c9480ab6b71abfcafa69d00799eb1f [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Randy Dunlap5872fb92009-01-29 16:28:02 -08005 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
Vegard Nossumd7002852008-07-20 10:44:54 +02009#include <linux/kmemcheck.h>
Glauber Costa6f536632008-03-25 18:36:20 -030010#include <linux/scatterlist.h>
Joerg Roedel2118d0c2009-01-09 15:13:15 +010011#include <linux/dma-debug.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090012#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013#include <asm/io.h>
14#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020015#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030016
Glauber Costa7c183412008-03-25 18:36:36 -030017extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030018extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020019extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030020extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030021
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090022extern struct dma_map_ops *dma_ops;
Glauber Costa6f536632008-03-25 18:36:20 -030023
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090024static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030025{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070026#ifdef CONFIG_X86_32
27 return dma_ops;
28#else
29 if (unlikely(!dev) || !dev->archdata.dma_ops)
30 return dma_ops;
31 else
32 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080033#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070034}
35
FUJITA Tomonori7c095e42009-06-17 16:28:12 -070036#include <asm-generic/dma-mapping-common.h>
37
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070038/* Make sure we keep the same behaviour */
39static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
40{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090041 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070042 if (ops->mapping_error)
43 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030044
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010045 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030046}
47
Glauber Costa8d396de2008-03-25 18:36:31 -030048#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
49#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020050#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030051
Glauber Costa802c1f62008-03-25 18:36:34 -030052extern int dma_supported(struct device *hwdev, u64 mask);
53extern int dma_set_mask(struct device *dev, u64 mask);
54
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090055extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
56 dma_addr_t *dma_addr, gfp_t flag);
57
Glauber Costa3cb6a912008-03-25 18:36:33 -030058static inline void
59dma_cache_sync(struct device *dev, void *vaddr, size_t size,
60 enum dma_data_direction dir)
61{
62 flush_write_buffers();
63}
Glauber Costaae17a63b2008-03-25 18:36:38 -030064
Glauber Costab7107a32008-03-25 18:36:39 -030065static inline int dma_get_cache_alignment(void)
66{
67 /* no easy way to get cache size on all x86, so return the
68 * maximum possible, to be safe */
69 return boot_cpu_data.x86_clflush_size;
70}
71
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090072static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
73 gfp_t gfp)
74{
75 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -030076
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090077 dma_mask = dev->coherent_dma_mask;
78 if (!dma_mask)
Yang Hongyang2f4f27d2009-04-06 19:01:18 -070079 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090080
81 return dma_mask;
82}
83
84static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
85{
86 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
87
Yang Hongyang2f4f27d2009-04-06 19:01:18 -070088 if (dma_mask <= DMA_BIT_MASK(24))
FUJITA Tomonori75bebb72008-10-23 20:46:55 +090089 gfp |= GFP_DMA;
90#ifdef CONFIG_X86_64
Yang Hongyang284901a2009-04-06 19:01:15 -070091 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090092 gfp |= GFP_DMA32;
93#endif
94 return gfp;
95}
96
Joerg Roedel6c505ce2008-08-19 16:32:45 +020097static inline void *
98dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
99 gfp_t gfp)
100{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900101 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200102 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300103
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900104 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
105
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200106 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
107 return memory;
108
109 if (!dev) {
110 dev = &x86_dma_fallback_dev;
111 gfp |= GFP_DMA;
112 }
113
FUJITA Tomonori98216262008-09-10 00:49:48 +0900114 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900115 return NULL;
116
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900117 if (!ops->alloc_coherent)
118 return NULL;
119
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100120 memory = ops->alloc_coherent(dev, size, dma_handle,
121 dma_alloc_coherent_gfp_flags(dev, gfp));
122 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
123
124 return memory;
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200125}
126
127static inline void dma_free_coherent(struct device *dev, size_t size,
128 void *vaddr, dma_addr_t bus)
129{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900130 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200131
132 WARN_ON(irqs_disabled()); /* for portability */
133
134 if (dma_release_from_coherent(dev, get_order(size), vaddr))
135 return;
136
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100137 debug_dma_free_coherent(dev, size, vaddr, bus);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200138 if (ops->free_coherent)
139 ops->free_coherent(dev, size, vaddr, bus);
140}
141
Glauber Costa6f536632008-03-25 18:36:20 -0300142#endif