blob: 1c7cd27efa7b53d7770bb09040f7511eff8492ec [file] [log] [blame]
Ralf Baechle9a88cbb2006-11-16 02:56:12 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
7 */
8#ifndef __ASM_MACH_JAZZ_DMA_COHERENCE_H
9#define __ASM_MACH_JAZZ_DMA_COHERENCE_H
10
11#include <asm/jazzdma.h>
12
13struct device;
14
15static dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
16{
17 return vdma_alloc(virt_to_phys(addr), size);
18}
19
20static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
21{
22 return vdma_alloc(page_to_phys(page), PAGE_SIZE);
23}
24
25static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
26{
27 return vdma_log2phys(dma_addr);
28}
29
David Daney843aef42008-12-11 15:33:36 -080030static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000031{
32 vdma_free(dma_addr);
33}
34
David Daney843aef42008-12-11 15:33:36 -080035static inline int plat_dma_supported(struct device *dev, u64 mask)
36{
37 /*
38 * we fall back to GFP_DMA when the mask isn't all 1s,
39 * so we can't guarantee allocations that must be
40 * within a tighter range than GFP_DMA..
41 */
42 if (mask < DMA_BIT_MASK(24))
43 return 0;
44
45 return 1;
46}
47
48static inline void plat_extra_sync_for_device(struct device *dev)
49{
50 return;
51}
52
53static inline int plat_dma_mapping_error(struct device *dev,
54 dma_addr_t dma_addr)
55{
56 return 0;
57}
58
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000059static inline int plat_device_is_coherent(struct device *dev)
60{
61 return 0;
62}
63
64#endif /* __ASM_MACH_JAZZ_DMA_COHERENCE_H */