blob: 6a902751cc7f79034b802821857020a7386536e7 [file] [log] [blame]
Songmao Tian42d226c2007-06-06 14:52:38 +08001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006, 07 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
8 * Author: Fuxin Zhang, zhangfx@lemote.com
9 *
10 */
Wu Zhangjin8e497112009-07-02 23:26:08 +080011#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H
12#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H
Songmao Tian42d226c2007-06-06 14:52:38 +080013
Huacai Chen1299b0e2014-03-21 18:44:06 +080014#ifdef CONFIG_SWIOTLB
15#include <linux/swiotlb.h>
16#endif
17
Songmao Tian42d226c2007-06-06 14:52:38 +080018struct device;
19
Huacai Chen1299b0e2014-03-21 18:44:06 +080020extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
21extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
Songmao Tian42d226c2007-06-06 14:52:38 +080022static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
23 size_t size)
24{
Huacai Chen1299b0e2014-03-21 18:44:06 +080025#ifdef CONFIG_CPU_LOONGSON3
26 return virt_to_phys(addr);
27#else
Songmao Tian42d226c2007-06-06 14:52:38 +080028 return virt_to_phys(addr) | 0x80000000;
Huacai Chen1299b0e2014-03-21 18:44:06 +080029#endif
Songmao Tian42d226c2007-06-06 14:52:38 +080030}
31
32static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
33 struct page *page)
34{
Huacai Chen1299b0e2014-03-21 18:44:06 +080035#ifdef CONFIG_CPU_LOONGSON3
36 return page_to_phys(page);
37#else
Songmao Tian42d226c2007-06-06 14:52:38 +080038 return page_to_phys(page) | 0x80000000;
Huacai Chen1299b0e2014-03-21 18:44:06 +080039#endif
Songmao Tian42d226c2007-06-06 14:52:38 +080040}
41
Kevin Cernekee3807ef3f62009-04-23 17:25:12 -070042static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
43 dma_addr_t dma_addr)
Songmao Tian42d226c2007-06-06 14:52:38 +080044{
Huacai Chen1299b0e2014-03-21 18:44:06 +080045#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
46 return dma_addr;
47#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
Wu Zhangjin6f7a2512009-11-06 18:45:05 +080048 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
49#else
Songmao Tian42d226c2007-06-06 14:52:38 +080050 return dma_addr & 0x7fffffff;
Wu Zhangjin6f7a2512009-11-06 18:45:05 +080051#endif
Songmao Tian42d226c2007-06-06 14:52:38 +080052}
53
Kevin Cernekeed3f634b2009-04-23 17:03:43 -070054static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
55 size_t size, enum dma_data_direction direction)
Songmao Tian42d226c2007-06-06 14:52:38 +080056{
57}
58
David Daney843aef42008-12-11 15:33:36 -080059static inline int plat_dma_supported(struct device *dev, u64 mask)
60{
61 /*
62 * we fall back to GFP_DMA when the mask isn't all 1s,
63 * so we can't guarantee allocations that must be
64 * within a tighter range than GFP_DMA..
65 */
66 if (mask < DMA_BIT_MASK(24))
67 return 0;
68
69 return 1;
70}
71
Songmao Tian42d226c2007-06-06 14:52:38 +080072static inline int plat_device_is_coherent(struct device *dev)
73{
Huacai Chen1299b0e2014-03-21 18:44:06 +080074#ifdef CONFIG_DMA_NONCOHERENT
Songmao Tian42d226c2007-06-06 14:52:38 +080075 return 0;
Huacai Chen1299b0e2014-03-21 18:44:06 +080076#else
77 return 1;
78#endif /* CONFIG_DMA_NONCOHERENT */
Songmao Tian42d226c2007-06-06 14:52:38 +080079}
80
Wu Zhangjin8e497112009-07-02 23:26:08 +080081#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */