blob: 636e942940a0402195d794a1c7eeb6e7347262f0 [file] [log] [blame]
GuanXuetao10c9c102011-01-15 18:18:29 +08001/*
2 * linux/arch/unicore32/include/asm/dma-mapping.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef __UNICORE_DMA_MAPPING_H__
13#define __UNICORE_DMA_MAPPING_H__
14
15#ifdef __KERNEL__
16
17#include <linux/mm_types.h>
18#include <linux/scatterlist.h>
19#include <linux/swiotlb.h>
20
GuanXuetao10c9c102011-01-15 18:18:29 +080021#include <asm/memory.h>
22#include <asm/cacheflush.h>
23
24extern struct dma_map_ops swiotlb_dma_map_ops;
25
26static inline struct dma_map_ops *get_dma_ops(struct device *dev)
27{
28 return &swiotlb_dma_map_ops;
29}
30
31static inline int dma_supported(struct device *dev, u64 mask)
32{
33 struct dma_map_ops *dma_ops = get_dma_ops(dev);
34
35 if (unlikely(dma_ops == NULL))
36 return 0;
37
38 return dma_ops->dma_supported(dev, mask);
39}
40
41static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
42{
43 struct dma_map_ops *dma_ops = get_dma_ops(dev);
44
45 if (dma_ops->mapping_error)
46 return dma_ops->mapping_error(dev, dma_addr);
47
48 return 0;
49}
50
51#include <asm-generic/dma-mapping-common.h>
52
53static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
54{
55 if (dev && dev->dma_mask)
56 return addr + size - 1 <= *dev->dma_mask;
57
58 return 1;
59}
60
61static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
62{
63 return paddr;
64}
65
66static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
67{
68 return daddr;
69}
70
71static inline void dma_mark_clean(void *addr, size_t size) {}
72
73static inline int dma_set_mask(struct device *dev, u64 dma_mask)
74{
75 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
76 return -EIO;
77
78 *dev->dma_mask = dma_mask;
79
80 return 0;
81}
82
GuanXuetao10c9c102011-01-15 18:18:29 +080083static inline void dma_cache_sync(struct device *dev, void *vaddr,
84 size_t size, enum dma_data_direction direction)
85{
86 unsigned long start = (unsigned long)vaddr;
87 unsigned long end = start + size;
88
89 switch (direction) {
90 case DMA_NONE:
91 BUG();
92 case DMA_FROM_DEVICE:
93 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
94 __cpuc_dma_flush_range(start, end);
95 break;
96 case DMA_TO_DEVICE: /* writeback only */
97 __cpuc_dma_clean_range(start, end);
98 break;
99 }
100}
101
102#endif /* __KERNEL__ */
103#endif