blob: 3a27b93e62619155064e87ffa7e895df8a0d5275 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Paul Bolle395cf962011-08-15 02:02:26 +02005 * IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
Randy Dunlap5872fb92009-01-29 16:28:02 -08006 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
Vegard Nossumd7002852008-07-20 10:44:54 +02009#include <linux/kmemcheck.h>
Glauber Costa6f536632008-03-25 18:36:20 -030010#include <linux/scatterlist.h>
Joerg Roedel2118d0c2009-01-09 15:13:15 +010011#include <linux/dma-debug.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090012#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013#include <asm/io.h>
14#include <asm/swiotlb.h>
Marek Szyprowski0a2b9a62011-12-29 13:09:51 +010015#include <linux/dma-contiguous.h>
Glauber Costa6f536632008-03-25 18:36:20 -030016
Jan Beulicheb647132009-11-08 12:12:14 +010017#ifdef CONFIG_ISA
18# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
19#else
20# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
21#endif
22
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +090023#define DMA_ERROR_CODE 0
24
Glauber Costab7107a32008-03-25 18:36:39 -030025extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020026extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030027extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030028
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090029extern struct dma_map_ops *dma_ops;
Glauber Costa6f536632008-03-25 18:36:20 -030030
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090031static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030032{
Alessandro Rubini4692d772012-04-04 19:39:58 +020033#ifndef CONFIG_X86_DEV_DMA_OPS
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070034 return dma_ops;
35#else
36 if (unlikely(!dev) || !dev->archdata.dma_ops)
37 return dma_ops;
38 else
39 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080040#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070041}
42
Christoph Hellwig68942582015-09-09 15:39:39 -070043bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
44#define arch_dma_alloc_attrs arch_dma_alloc_attrs
45
Christoph Hellwigee196372015-09-09 15:39:49 -070046#define HAVE_ARCH_DMA_SUPPORTED 1
47extern int dma_supported(struct device *hwdev, u64 mask);
48
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090049extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +020050 dma_addr_t *dma_addr, gfp_t flag,
51 struct dma_attrs *attrs);
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090052
Marek Szyprowski0a2b9a62011-12-29 13:09:51 +010053extern void dma_generic_free_coherent(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_addr,
55 struct dma_attrs *attrs);
56
Alessandro Rubinif7219a52012-04-04 19:40:10 +020057#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
58extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
59extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
60extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
61#else
62
FUJITA Tomonori99becac2009-07-10 10:04:54 +090063static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
64{
65 if (!dev->dma_mask)
66 return 0;
67
Jan Beulichac2b3e62009-12-15 16:47:43 -080068 return addr + size - 1 <= *dev->dma_mask;
FUJITA Tomonori99becac2009-07-10 10:04:54 +090069}
70
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +090071static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
72{
73 return paddr;
74}
75
76static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
77{
78 return daddr;
79}
Alessandro Rubinif7219a52012-04-04 19:40:10 +020080#endif /* CONFIG_X86_DMA_REMAP */
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +090081
Glauber Costa3cb6a912008-03-25 18:36:33 -030082static inline void
83dma_cache_sync(struct device *dev, void *vaddr, size_t size,
84 enum dma_data_direction dir)
85{
86 flush_write_buffers();
87}
Glauber Costaae17a63b2008-03-25 18:36:38 -030088
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090089static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
90 gfp_t gfp)
91{
92 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -030093
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090094 dma_mask = dev->coherent_dma_mask;
95 if (!dma_mask)
Yang Hongyang2f4f27d2009-04-06 19:01:18 -070096 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
FUJITA Tomonori823e7e82008-09-08 18:10:13 +090097
98 return dma_mask;
99}
100
101static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
102{
103 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
104
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700105 if (dma_mask <= DMA_BIT_MASK(24))
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900106 gfp |= GFP_DMA;
107#ifdef CONFIG_X86_64
Yang Hongyang284901a2009-04-06 19:01:15 -0700108 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900109 gfp |= GFP_DMA32;
110#endif
111 return gfp;
112}
113
Glauber Costa6f536632008-03-25 18:36:20 -0300114#endif