blob: 181a095468e444a4c0637377c29d962ded2ba91b [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
FUJITA Tomonori46bab4e2009-08-04 19:08:26 +000016#include <linux/dma-debug.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
Becky Bruceec3cf2e2009-05-14 12:42:28 +000018#include <asm/swiotlb.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050019
Christoph Hellwigefa21e42015-09-09 15:39:46 -070020#ifdef CONFIG_PPC64
Anton Blanchard33ff9102007-10-16 14:54:33 -050021#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
Christoph Hellwigefa21e42015-09-09 15:39:46 -070022#endif
Anton Blanchard33ff9102007-10-16 14:54:33 -050023
Becky Bruceec3cf2e2009-05-14 12:42:28 +000024/* Some dma direct funcs must be visible for use in other dma_ops */
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +100025extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070027 unsigned long attrs);
Benjamin Herrenschmidt817820b2015-06-24 15:25:31 +100028extern void __dma_direct_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070030 unsigned long attrs);
Marek Szyprowski64ccc9c2012-06-14 13:03:04 +020031extern int dma_direct_mmap_coherent(struct device *dev,
32 struct vm_area_struct *vma,
33 void *cpu_addr, dma_addr_t handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070034 size_t size, unsigned long attrs);
Becky Bruceec3cf2e2009-05-14 12:42:28 +000035
Anton Blanchard33ff9102007-10-16 14:54:33 -050036#ifdef CONFIG_NOT_COHERENT_CACHE
37/*
38 * DMA-consistent mapping functions for PowerPCs that don't support
39 * cache snooping. These allocate/free a region of uncached mapped
40 * memory space for use with DMA devices. Alternatively, you could
41 * allocate the space "normally" and use the cache management functions
42 * to ensure it is consistent.
43 */
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100044struct device;
45extern void *__dma_alloc_coherent(struct device *dev, size_t size,
46 dma_addr_t *handle, gfp_t gfp);
Anton Blanchard33ff9102007-10-16 14:54:33 -050047extern void __dma_free_coherent(size_t size, void *vaddr);
48extern void __dma_sync(void *vaddr, size_t size, int direction);
49extern void __dma_sync_page(struct page *page, unsigned long offset,
50 size_t size, int direction);
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +000051extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
Anton Blanchard33ff9102007-10-16 14:54:33 -050052
53#else /* ! CONFIG_NOT_COHERENT_CACHE */
54/*
55 * Cache coherent cores.
56 */
57
Benjamin Herrenschmidt8b31e492009-05-27 13:50:33 +100058#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
Anton Blanchard33ff9102007-10-16 14:54:33 -050059#define __dma_free_coherent(size, addr) ((void)0)
60#define __dma_sync(addr, size, rw) ((void)0)
61#define __dma_sync_page(pg, off, sz, rw) ((void)0)
62
63#endif /* ! CONFIG_NOT_COHERENT_CACHE */
64
Mark Nelson3a4c6f02008-07-05 05:05:45 +100065static inline unsigned long device_to_mask(struct device *dev)
66{
67 if (dev->dma_mask && *dev->dma_mask)
68 return *dev->dma_mask;
69 /* Assume devices without mask can take 32 bit addresses */
70 return 0xfffffffful;
71}
72
Anton Blanchard33ff9102007-10-16 14:54:33 -050073/*
Becky Bruce4fc665b2008-09-12 10:34:46 +000074 * Available generic sets of operations
75 */
76#ifdef CONFIG_PPC64
FUJITA Tomonori45223c52009-08-04 19:08:25 +000077extern struct dma_map_ops dma_iommu_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000078#endif
Bart Van Assche52997092017-01-20 13:04:01 -080079extern const struct dma_map_ops dma_direct_ops;
Becky Bruce4fc665b2008-09-12 10:34:46 +000080
Bart Van Assche815dd182017-01-20 13:04:04 -080081static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
Anton Blanchard33ff9102007-10-16 14:54:33 -050082{
83 /* We don't handle the NULL dev case for ISA for now. We could
84 * do it via an out of line call but it is not needed for now. The
85 * only ISA DMA device we support is the floppy and we have a hack
86 * in the floppy driver directly to get a device for us.
87 */
Bart Van Assche815dd182017-01-20 13:04:04 -080088 return NULL;
Anton Blanchard33ff9102007-10-16 14:54:33 -050089}
90
Becky Bruce1cebd7a2009-09-21 08:26:34 +000091/*
92 * get_dma_offset()
93 *
94 * Get the dma offset on configurations where the dma address can be determined
95 * from the physical address by looking at a simple offset. Direct dma and
96 * swiotlb use this function, but it is typically not used by implementations
97 * with an iommu.
98 */
Becky Bruce738ef422009-09-21 08:26:35 +000099static inline dma_addr_t get_dma_offset(struct device *dev)
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000100{
101 if (dev)
Benjamin Herrenschmidt2db49282015-06-24 15:25:22 +1000102 return dev->archdata.dma_offset;
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000103
104 return PCI_DRAM_OFFSET;
105}
106
Becky Bruce738ef422009-09-21 08:26:35 +0000107static inline void set_dma_offset(struct device *dev, dma_addr_t off)
108{
109 if (dev)
Benjamin Herrenschmidt2db49282015-06-24 15:25:22 +1000110 dev->archdata.dma_offset = off;
Becky Bruce738ef422009-09-21 08:26:35 +0000111}
112
FUJITA Tomonori46bab4e2009-08-04 19:08:26 +0000113/* this will be removed soon */
114#define flush_write_buffers()
115
Christoph Hellwig452e06a2015-09-09 15:39:53 -0700116#define HAVE_ARCH_DMA_SET_MASK 1
117extern int dma_set_mask(struct device *dev, u64 dma_mask);
118
Benjamin Herrenschmidtcd15b042014-02-11 11:32:38 +1100119extern int __dma_set_mask(struct device *dev, u64 dma_mask);
Gavin Shanfe7e85c2014-09-30 12:39:10 +1000120extern u64 __dma_get_required_mask(struct device *dev);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500121
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900122static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
123{
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000124#ifdef CONFIG_SWIOTLB
125 struct dev_archdata *sd = &dev->archdata;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900126
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000127 if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
Joe Perchesacdb6682015-03-30 16:46:04 -0700128 return false;
FUJITA Tomonori762afb72009-08-04 19:08:22 +0000129#endif
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900130
131 if (!dev->dma_mask)
Joe Perchesacdb6682015-03-30 16:46:04 -0700132 return false;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900133
Jan Beulichac2b3e62009-12-15 16:47:43 -0800134 return addr + size - 1 <= *dev->dma_mask;
FUJITA Tomonori9a937c92009-07-10 10:04:57 +0900135}
136
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900137static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
138{
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000139 return paddr + get_dma_offset(dev);
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900140}
141
142static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
143{
Becky Bruce1cebd7a2009-09-21 08:26:34 +0000144 return daddr - get_dma_offset(dev);
FUJITA Tomonori8d4f5332009-07-10 10:05:01 +0900145}
146
Benjamin Herrenschmidt60909122011-03-24 20:50:06 +0000147#define ARCH_HAS_DMA_MMAP_COHERENT
148
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800149static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100150 enum dma_data_direction direction)
151{
152 BUG_ON(direction == DMA_NONE);
153 __dma_sync(vaddr, size, (int)direction);
154}
155
Arnd Bergmann88ced032005-12-16 22:43:46 +0100156#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100157#endif /* _ASM_DMA_MAPPING_H */