blob: 96ac6cce4a32c03ead94166ac1190b91ac5b032d [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_DMA_MAPPING_H
16#define _ASM_TILE_DMA_MAPPING_H
17
18#include <linux/mm.h>
19#include <linux/scatterlist.h>
20#include <linux/cache.h>
21#include <linux/io.h>
22
Chris Metcalfdc7d5cf2013-08-05 13:40:47 -040023#ifdef __tilegx__
24#define ARCH_HAS_DMA_GET_REQUIRED_MASK
25#endif
26
Chris Metcalf41bb38f2012-06-15 15:23:06 -040027extern struct dma_map_ops *tile_dma_map_ops;
28extern struct dma_map_ops *gx_pci_dma_map_ops;
29extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
Chris Metcalf803c8742013-08-02 12:24:42 -040030extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
Chris Metcalf867e3592010-05-28 23:09:12 -040031
Chris Metcalf41bb38f2012-06-15 15:23:06 -040032static inline struct dma_map_ops *get_dma_ops(struct device *dev)
33{
34 if (dev && dev->archdata.dma_ops)
35 return dev->archdata.dma_ops;
36 else
37 return tile_dma_map_ops;
38}
Chris Metcalf867e3592010-05-28 23:09:12 -040039
Chris Metcalf41bb38f2012-06-15 15:23:06 -040040static inline dma_addr_t get_dma_offset(struct device *dev)
41{
42 return dev->archdata.dma_offset;
43}
Chris Metcalf867e3592010-05-28 23:09:12 -040044
Chris Metcalf41bb38f2012-06-15 15:23:06 -040045static inline void set_dma_offset(struct device *dev, dma_addr_t off)
46{
47 dev->archdata.dma_offset = off;
48}
Chris Metcalf867e3592010-05-28 23:09:12 -040049
Chris Metcalf41bb38f2012-06-15 15:23:06 -040050static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
51{
Chris Metcalf803c8742013-08-02 12:24:42 -040052 return paddr;
Chris Metcalf41bb38f2012-06-15 15:23:06 -040053}
Chris Metcalf867e3592010-05-28 23:09:12 -040054
Chris Metcalf41bb38f2012-06-15 15:23:06 -040055static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
56{
Chris Metcalf803c8742013-08-02 12:24:42 -040057 return daddr;
Chris Metcalf41bb38f2012-06-15 15:23:06 -040058}
Chris Metcalf867e3592010-05-28 23:09:12 -040059
Chris Metcalf41bb38f2012-06-15 15:23:06 -040060static inline void dma_mark_clean(void *addr, size_t size) {}
61
Chris Metcalf41bb38f2012-06-15 15:23:06 -040062static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
63{
64 dev->archdata.dma_ops = ops;
65}
66
67static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
68{
69 if (!dev->dma_mask)
70 return 0;
71
72 return addr + size - 1 <= *dev->dma_mask;
73}
Chris Metcalf867e3592010-05-28 23:09:12 -040074
Christoph Hellwig452e06a2015-09-09 15:39:53 -070075#define HAVE_ARCH_DMA_SET_MASK 1
76
77#include <asm-generic/dma-mapping-common.h>
78
Chris Metcalf867e3592010-05-28 23:09:12 -040079static inline int
Chris Metcalf867e3592010-05-28 23:09:12 -040080dma_set_mask(struct device *dev, u64 mask)
81{
Chris Metcalf41bb38f2012-06-15 15:23:06 -040082 struct dma_map_ops *dma_ops = get_dma_ops(dev);
83
Chris Metcalfb40f4512013-08-30 10:12:36 -040084 /*
85 * For PCI devices with 64-bit DMA addressing capability, promote
86 * the dma_ops to hybrid, with the consistent memory DMA space limited
87 * to 32-bit. For 32-bit capable devices, limit the streaming DMA
88 * address range to max_direct_dma_addr.
89 */
90 if (dma_ops == gx_pci_dma_map_ops ||
91 dma_ops == gx_hybrid_pci_dma_map_ops ||
92 dma_ops == gx_legacy_pci_dma_map_ops) {
93 if (mask == DMA_BIT_MASK(64) &&
94 dma_ops == gx_legacy_pci_dma_map_ops)
95 set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
96 else if (mask > dev->archdata.max_direct_dma_addr)
Chris Metcalf41bb38f2012-06-15 15:23:06 -040097 mask = dev->archdata.max_direct_dma_addr;
98 }
99
Chris Metcalf867e3592010-05-28 23:09:12 -0400100 if (!dev->dma_mask || !dma_supported(dev, mask))
101 return -EIO;
102
103 *dev->dma_mask = mask;
104
105 return 0;
106}
107
Chris Metcalf41bb38f2012-06-15 15:23:06 -0400108/*
109 * dma_alloc_noncoherent() is #defined to return coherent memory,
110 * so there's no need to do any flushing here.
111 */
112static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
113 enum dma_data_direction direction)
114{
115}
116
Chris Metcalf867e3592010-05-28 23:09:12 -0400117#endif /* _ASM_TILE_DMA_MAPPING_H */