blob: 142cb333db29830b3b5740ef8eb36229db9a56af [file] [log] [blame]
Bryan Wu1394f032007-05-06 14:50:22 -07001#ifndef _BFIN_IO_H
2#define _BFIN_IO_H
3
4#ifdef __KERNEL__
5
6#ifndef __ASSEMBLY__
7#include <linux/types.h>
8#endif
9#include <linux/compiler.h>
10
11/*
12 * These are for ISA/PCI shared memory _only_ and should never be used
13 * on any other type of memory, including Zorro memory. They are meant to
14 * access the bus in the bus byte order which is little-endian!.
15 *
16 * readX/writeX() are used to access memory mapped devices. On some
17 * architectures the memory mapped IO stuff needs to be accessed
18 * differently. On the bfin architecture, we just read/write the
19 * memory location directly.
20 */
21#ifndef __ASSEMBLY__
22
Mike Frysinger216e39d2007-06-21 11:34:16 +080023static inline unsigned char readb(const volatile void __iomem *addr)
Bryan Wu1394f032007-05-06 14:50:22 -070024{
25 unsigned int val;
26 int tmp;
27
28 __asm__ __volatile__ ("cli %1;\n\t"
29 "NOP; NOP; SSYNC;\n\t"
30 "%0 = b [%2] (z);\n\t"
31 "sti %1;\n\t"
32 : "=d"(val), "=d"(tmp): "a"(addr)
33 );
34
35 return (unsigned char) val;
36}
37
Mike Frysinger216e39d2007-06-21 11:34:16 +080038static inline unsigned short readw(const volatile void __iomem *addr)
Bryan Wu1394f032007-05-06 14:50:22 -070039{
40 unsigned int val;
41 int tmp;
42
43 __asm__ __volatile__ ("cli %1;\n\t"
44 "NOP; NOP; SSYNC;\n\t"
45 "%0 = w [%2] (z);\n\t"
46 "sti %1;\n\t"
47 : "=d"(val), "=d"(tmp): "a"(addr)
48 );
49
50 return (unsigned short) val;
51}
52
Mike Frysinger216e39d2007-06-21 11:34:16 +080053static inline unsigned int readl(const volatile void __iomem *addr)
Bryan Wu1394f032007-05-06 14:50:22 -070054{
55 unsigned int val;
56 int tmp;
57
58 __asm__ __volatile__ ("cli %1;\n\t"
59 "NOP; NOP; SSYNC;\n\t"
60 "%0 = [%2];\n\t"
61 "sti %1;\n\t"
62 : "=d"(val), "=d"(tmp): "a"(addr)
63 );
64 return val;
65}
66
67#endif /* __ASSEMBLY__ */
68
69#define writeb(b,addr) (void)((*(volatile unsigned char *) (addr)) = (b))
70#define writew(b,addr) (void)((*(volatile unsigned short *) (addr)) = (b))
71#define writel(b,addr) (void)((*(volatile unsigned int *) (addr)) = (b))
72
73#define __raw_readb readb
74#define __raw_readw readw
75#define __raw_readl readl
76#define __raw_writeb writeb
77#define __raw_writew writew
78#define __raw_writel writel
79#define memset_io(a,b,c) memset((void *)(a),(b),(c))
80#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
81#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
82
83#define inb(addr) readb(addr)
84#define inw(addr) readw(addr)
85#define inl(addr) readl(addr)
86#define outb(x,addr) ((void) writeb(x,addr))
87#define outw(x,addr) ((void) writew(x,addr))
88#define outl(x,addr) ((void) writel(x,addr))
89
90#define inb_p(addr) inb(addr)
91#define inw_p(addr) inw(addr)
92#define inl_p(addr) inl(addr)
93#define outb_p(x,addr) outb(x,addr)
94#define outw_p(x,addr) outw(x,addr)
95#define outl_p(x,addr) outl(x,addr)
96
97#define ioread8_rep(a,d,c) insb(a,d,c)
98#define ioread16_rep(a,d,c) insw(a,d,c)
99#define ioread32_rep(a,d,c) insl(a,d,c)
100#define iowrite8_rep(a,s,c) outsb(a,s,c)
101#define iowrite16_rep(a,s,c) outsw(a,s,c)
102#define iowrite32_rep(a,s,c) outsl(a,s,c)
103
104#define ioread8(X) readb(X)
105#define ioread16(X) readw(X)
106#define ioread32(X) readl(X)
107#define iowrite8(val,X) writeb(val,X)
108#define iowrite16(val,X) writew(val,X)
109#define iowrite32(val,X) writel(val,X)
110
111#define IO_SPACE_LIMIT 0xffffffff
112
113/* Values for nocacheflag and cmode */
114#define IOMAP_NOCACHE_SER 1
115
116#ifndef __ASSEMBLY__
117
Michael Hennerich23ee9682007-05-21 18:09:17 +0800118extern void outsb(void __iomem *port, const void *addr, unsigned short count);
119extern void outsw(void __iomem *port, const void *addr, unsigned short count);
120extern void outsl(void __iomem *port, const void *addr, unsigned short count);
Bryan Wu1394f032007-05-06 14:50:22 -0700121
Michael Hennerich23ee9682007-05-21 18:09:17 +0800122extern void insb(const void __iomem *port, void *addr, unsigned short count);
123extern void insw(const void __iomem *port, void *addr, unsigned short count);
124extern void insl(const void __iomem *port, void *addr, unsigned short count);
125
126extern void dma_outsb(void __iomem *port, const void *addr, unsigned short count);
127extern void dma_outsw(void __iomem *port, const void *addr, unsigned short count);
128extern void dma_outsl(void __iomem *port, const void *addr, unsigned short count);
129
130extern void dma_insb(const void __iomem *port, void *addr, unsigned short count);
131extern void dma_insw(const void __iomem *port, void *addr, unsigned short count);
132extern void dma_insl(const void __iomem *port, void *addr, unsigned short count);
Bryan Wu1394f032007-05-06 14:50:22 -0700133
134/*
135 * Map some physical address range into the kernel address space.
136 */
137static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
138 int cacheflag)
139{
140 return (void __iomem *)physaddr;
141}
142
143/*
144 * Unmap a ioremap()ed region again
145 */
146static inline void iounmap(void *addr)
147{
148}
149
150/*
151 * __iounmap unmaps nearly everything, so be careful
152 * it doesn't free currently pointer/page tables anymore but it
153 * wans't used anyway and might be added later.
154 */
155static inline void __iounmap(void *addr, unsigned long size)
156{
157}
158
159/*
160 * Set new cache mode for some kernel address space.
161 * The caller must push data for that range itself, if such data may already
162 * be in the cache.
163 */
164static inline void kernel_set_cachemode(void *addr, unsigned long size,
165 int cmode)
166{
167}
168
169static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
170{
171 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
172}
173static inline void __iomem *ioremap_nocache(unsigned long physaddr,
174 unsigned long size)
175{
176 return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
177}
178
179extern void blkfin_inv_cache_all(void);
180
181#endif
182
183#define ioport_map(port, nr) ((void __iomem*)(port))
184#define ioport_unmap(addr)
185
186#define dma_cache_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
187#define dma_cache_wback(_start,_size) do { } while (0)
188#define dma_cache_wback_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
189
190/* Pages to physical address... */
191#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
192#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
193
194#define mm_ptov(vaddr) ((void *) (vaddr))
195#define mm_vtop(vaddr) ((unsigned long) (vaddr))
196#define phys_to_virt(vaddr) ((void *) (vaddr))
197#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
198
199#define virt_to_bus virt_to_phys
200#define bus_to_virt phys_to_virt
201
202/*
203 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
204 * access
205 */
206#define xlate_dev_mem_ptr(p) __va(p)
207
208/*
209 * Convert a virtual cached pointer to an uncached pointer
210 */
211#define xlate_dev_kmem_ptr(p) p
212
213#endif /* __KERNEL__ */
214
215#endif /* _BFIN_IO_H */