Ralf Baechle | 9a88cbb | 2006-11-16 02:56:12 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org> |
| 7 | * |
| 8 | */ |
Kumba | ab012eb | 2007-05-25 02:26:47 -0400 | [diff] [blame] | 9 | #ifndef __ASM_MACH_IP32_DMA_COHERENCE_H |
| 10 | #define __ASM_MACH_IP32_DMA_COHERENCE_H |
Ralf Baechle | 9a88cbb | 2006-11-16 02:56:12 +0000 | [diff] [blame] | 11 | |
| 12 | #include <asm/ip32/crime.h> |
| 13 | |
| 14 | struct device; |
| 15 | |
| 16 | /* |
| 17 | * Few notes. |
| 18 | * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M |
| 19 | * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for |
| 20 | * native-endian) |
| 21 | * 3. All other devices see memory as one big chunk at 0x40000000 |
| 22 | * 4. Non-PCI devices will pass NULL as struct device* |
| 23 | * |
| 24 | * Thus we translate differently, depending on device. |
| 25 | */ |
| 26 | |
| 27 | #define RAM_OFFSET_MASK 0x3fffffffUL |
| 28 | |
Ralf Baechle | f1dbf8e | 2007-03-10 03:26:56 +0000 | [diff] [blame] | 29 | static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, |
| 30 | size_t size) |
Ralf Baechle | 9a88cbb | 2006-11-16 02:56:12 +0000 | [diff] [blame] | 31 | { |
| 32 | dma_addr_t pa = virt_to_phys(addr) & RAM_OFFSET_MASK; |
| 33 | |
| 34 | if (dev == NULL) |
| 35 | pa += CRIME_HI_MEM_BASE; |
| 36 | |
| 37 | return pa; |
| 38 | } |
| 39 | |
| 40 | static dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page) |
| 41 | { |
| 42 | dma_addr_t pa; |
| 43 | |
| 44 | pa = page_to_phys(page) & RAM_OFFSET_MASK; |
| 45 | |
| 46 | if (dev == NULL) |
| 47 | pa += CRIME_HI_MEM_BASE; |
| 48 | |
| 49 | return pa; |
| 50 | } |
| 51 | |
| 52 | /* This is almost certainly wrong but it's what dma-ip32.c used to use */ |
Kevin Cernekee | 3807ef3f6 | 2009-04-23 17:25:12 -0700 | [diff] [blame^] | 53 | static unsigned long plat_dma_addr_to_phys(struct device *dev, |
| 54 | dma_addr_t dma_addr) |
Ralf Baechle | 9a88cbb | 2006-11-16 02:56:12 +0000 | [diff] [blame] | 55 | { |
| 56 | unsigned long addr = dma_addr & RAM_OFFSET_MASK; |
| 57 | |
| 58 | if (dma_addr >= 256*1024*1024) |
| 59 | addr += CRIME_HI_MEM_BASE; |
| 60 | |
| 61 | return addr; |
| 62 | } |
| 63 | |
Kevin Cernekee | d3f634b | 2009-04-23 17:03:43 -0700 | [diff] [blame] | 64 | static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, |
| 65 | size_t size, enum dma_data_direction direction) |
Ralf Baechle | 9a88cbb | 2006-11-16 02:56:12 +0000 | [diff] [blame] | 66 | { |
| 67 | } |
| 68 | |
David Daney | 843aef4 | 2008-12-11 15:33:36 -0800 | [diff] [blame] | 69 | static inline int plat_dma_supported(struct device *dev, u64 mask) |
| 70 | { |
| 71 | /* |
| 72 | * we fall back to GFP_DMA when the mask isn't all 1s, |
| 73 | * so we can't guarantee allocations that must be |
| 74 | * within a tighter range than GFP_DMA.. |
| 75 | */ |
| 76 | if (mask < DMA_BIT_MASK(24)) |
| 77 | return 0; |
| 78 | |
| 79 | return 1; |
| 80 | } |
| 81 | |
| 82 | static inline void plat_extra_sync_for_device(struct device *dev) |
| 83 | { |
| 84 | return; |
| 85 | } |
| 86 | |
| 87 | static inline int plat_dma_mapping_error(struct device *dev, |
| 88 | dma_addr_t dma_addr) |
| 89 | { |
| 90 | return 0; |
| 91 | } |
| 92 | |
Ralf Baechle | 9a88cbb | 2006-11-16 02:56:12 +0000 | [diff] [blame] | 93 | static inline int plat_device_is_coherent(struct device *dev) |
| 94 | { |
| 95 | return 0; /* IP32 is non-cohernet */ |
| 96 | } |
| 97 | |
Kumba | ab012eb | 2007-05-25 02:26:47 -0400 | [diff] [blame] | 98 | #endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */ |