blob: 22c97ef92201bff82b5a7bc7a6fe139147c014a4 [file] [log] [blame]
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07001#ifndef __ASM_AVR32_IO_H
2#define __ASM_AVR32_IO_H
3
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +01004#include <linux/kernel.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07005#include <linux/string.h>
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +01006#include <linux/types.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07007
8#include <asm/addrspace.h>
9#include <asm/byteorder.h>
10
Haavard Skinnemoen3663b732008-08-05 13:57:38 +020011#include <mach/io.h>
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010012
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070013/* virt_to_phys will only work when address is in P1 or P2 */
14static __inline__ unsigned long virt_to_phys(volatile void *address)
15{
16 return PHYSADDR(address);
17}
18
19static __inline__ void * phys_to_virt(unsigned long address)
20{
21 return (void *)P1SEGADDR(address);
22}
23
24#define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
25#define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr))
26#define phys_to_cached(addr) ((void *)P1SEGADDR(addr))
27#define phys_to_uncached(addr) ((void *)P2SEGADDR(addr))
28
29/*
30 * Generic IO read/write. These perform native-endian accesses. Note
31 * that some architectures will want to re-define __raw_{read,write}w.
32 */
Haavard Skinnemoenb60f16e2007-02-16 12:47:40 +010033extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
34extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
35extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070036
Haavard Skinnemoenb60f16e2007-02-16 12:47:40 +010037extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
38extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
39extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070040
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010041static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070042{
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010043 *(volatile u8 __force *)addr = v;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070044}
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010045static inline void __raw_writew(u16 v, volatile void __iomem *addr)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070046{
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010047 *(volatile u16 __force *)addr = v;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070048}
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010049static inline void __raw_writel(u32 v, volatile void __iomem *addr)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070050{
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010051 *(volatile u32 __force *)addr = v;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070052}
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070053
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010054static inline u8 __raw_readb(const volatile void __iomem *addr)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070055{
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010056 return *(const volatile u8 __force *)addr;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070057}
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010058static inline u16 __raw_readw(const volatile void __iomem *addr)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070059{
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010060 return *(const volatile u16 __force *)addr;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070061}
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010062static inline u32 __raw_readl(const volatile void __iomem *addr)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070063{
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010064 return *(const volatile u32 __force *)addr;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070065}
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070066
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010067/* Convert I/O port address to virtual address */
68#ifndef __io
69# define __io(p) ((void *)phys_to_uncached(p))
70#endif
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070071
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010072/*
73 * Not really sure about the best way to slow down I/O on
74 * AVR32. Defining it as a no-op until we have an actual test case.
75 */
76#define SLOW_DOWN_IO do { } while (0)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070077
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +010078#define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \
79static inline void \
80pfx##write##bwl(type val, volatile void __iomem *addr) \
81{ \
82 volatile type *__addr; \
83 type __val; \
84 \
85 __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
86 __val = pfx##ioswab##bwl(__addr, val); \
87 \
88 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
89 \
90 *__addr = __val; \
91} \
92 \
93static inline type pfx##read##bwl(const volatile void __iomem *addr) \
94{ \
95 volatile type *__addr; \
96 type __val; \
97 \
98 __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \
99 \
100 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
101 \
102 __val = *__addr; \
103 return pfx##ioswab##bwl(__addr, __val); \
104}
105
106#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \
107static inline void pfx##out##bwl##p(type val, unsigned long port) \
108{ \
109 volatile type *__addr; \
110 type __val; \
111 \
112 __addr = __io(__swizzle_addr_##bwl(port)); \
113 __val = pfx##ioswab##bwl(__addr, val); \
114 \
115 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
116 \
117 *__addr = __val; \
118 slow; \
119} \
120 \
121static inline type pfx##in##bwl##p(unsigned long port) \
122{ \
123 volatile type *__addr; \
124 type __val; \
125 \
126 __addr = __io(__swizzle_addr_##bwl(port)); \
127 \
128 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
129 \
130 __val = *__addr; \
131 slow; \
132 \
133 return pfx##ioswab##bwl(__addr, __val); \
134}
135
136#define __BUILD_MEMORY_PFX(bus, bwl, type) \
137 __BUILD_MEMORY_SINGLE(bus, bwl, type)
138
139#define BUILDIO_MEM(bwl, type) \
140 __BUILD_MEMORY_PFX(, bwl, type) \
141 __BUILD_MEMORY_PFX(__mem_, bwl, type)
142
143#define __BUILD_IOPORT_PFX(bus, bwl, type) \
144 __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \
145 __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
146
147#define BUILDIO_IOPORT(bwl, type) \
148 __BUILD_IOPORT_PFX(, bwl, type) \
149 __BUILD_IOPORT_PFX(__mem_, bwl, type)
150
151BUILDIO_MEM(b, u8)
152BUILDIO_MEM(w, u16)
153BUILDIO_MEM(l, u32)
154
155BUILDIO_IOPORT(b, u8)
156BUILDIO_IOPORT(w, u16)
157BUILDIO_IOPORT(l, u32)
158
159#define readb_relaxed readb
160#define readw_relaxed readw
161#define readl_relaxed readl
162
Haavard Skinnemoen520bab82008-06-18 15:36:32 +0200163#define readb_be __raw_readb
164#define readw_be __raw_readw
165#define readl_be __raw_readl
166
167#define writeb_be __raw_writeb
168#define writew_be __raw_writew
169#define writel_be __raw_writel
170
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100171#define __BUILD_MEMORY_STRING(bwl, type) \
172static inline void writes##bwl(volatile void __iomem *addr, \
173 const void *data, unsigned int count) \
174{ \
175 const type *__data = data; \
176 \
177 while (count--) \
178 __mem_write##bwl(*__data++, addr); \
179} \
180 \
181static inline void reads##bwl(const volatile void __iomem *addr, \
182 void *data, unsigned int count) \
183{ \
184 type *__data = data; \
185 \
186 while (count--) \
187 *__data++ = __mem_read##bwl(addr); \
188}
189
190#define __BUILD_IOPORT_STRING(bwl, type) \
191static inline void outs##bwl(unsigned long port, const void *data, \
192 unsigned int count) \
193{ \
194 const type *__data = data; \
195 \
196 while (count--) \
197 __mem_out##bwl(*__data++, port); \
198} \
199 \
200static inline void ins##bwl(unsigned long port, void *data, \
201 unsigned int count) \
202{ \
203 type *__data = data; \
204 \
205 while (count--) \
206 *__data++ = __mem_in##bwl(port); \
207}
208
209#define BUILDSTRING(bwl, type) \
210 __BUILD_MEMORY_STRING(bwl, type) \
211 __BUILD_IOPORT_STRING(bwl, type)
212
213BUILDSTRING(b, u8)
214BUILDSTRING(w, u16)
215BUILDSTRING(l, u32)
Ben Nizette065834a2006-10-24 10:12:43 +0200216
217/*
218 * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
219 */
220#ifndef ioread8
221
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100222#define ioread8(p) ((unsigned int)readb(p))
Ben Nizette065834a2006-10-24 10:12:43 +0200223
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100224#define ioread16(p) ((unsigned int)readw(p))
225#define ioread16be(p) ((unsigned int)__raw_readw(p))
Ben Nizette065834a2006-10-24 10:12:43 +0200226
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100227#define ioread32(p) ((unsigned int)readl(p))
228#define ioread32be(p) ((unsigned int)__raw_readl(p))
Ben Nizette065834a2006-10-24 10:12:43 +0200229
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100230#define iowrite8(v,p) writeb(v, p)
Ben Nizette065834a2006-10-24 10:12:43 +0200231
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100232#define iowrite16(v,p) writew(v, p)
233#define iowrite16be(v,p) __raw_writew(v, p)
Ben Nizette065834a2006-10-24 10:12:43 +0200234
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100235#define iowrite32(v,p) writel(v, p)
236#define iowrite32be(v,p) __raw_writel(v, p)
Ben Nizette065834a2006-10-24 10:12:43 +0200237
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100238#define ioread8_rep(p,d,c) readsb(p,d,c)
239#define ioread16_rep(p,d,c) readsw(p,d,c)
240#define ioread32_rep(p,d,c) readsl(p,d,c)
Ben Nizette065834a2006-10-24 10:12:43 +0200241
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100242#define iowrite8_rep(p,s,c) writesb(p,s,c)
243#define iowrite16_rep(p,s,c) writesw(p,s,c)
244#define iowrite32_rep(p,s,c) writesl(p,s,c)
Ben Nizette065834a2006-10-24 10:12:43 +0200245
246#endif
247
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700248static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
249 unsigned long count)
250{
Haavard Skinnemoen2c1a2a32007-03-07 10:40:44 +0100251 memcpy(to, (const void __force *)from, count);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700252}
253
254static inline void memcpy_toio(volatile void __iomem *to, const void * from,
255 unsigned long count)
256{
Haavard Skinnemoen2c1a2a32007-03-07 10:40:44 +0100257 memcpy((void __force *)to, from, count);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700258}
259
260static inline void memset_io(volatile void __iomem *addr, unsigned char val,
261 unsigned long count)
262{
Haavard Skinnemoen2c1a2a32007-03-07 10:40:44 +0100263 memset((void __force *)addr, val, count);
Haavard Skinnemoene3e7d8d2007-02-12 16:28:56 +0100264}
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700265
Haavard Skinnemoencca67162007-08-13 16:24:01 +0200266#define mmiowb()
267
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700268#define IO_SPACE_LIMIT 0xffffffff
269
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700270extern void __iomem *__ioremap(unsigned long offset, size_t size,
271 unsigned long flags);
272extern void __iounmap(void __iomem *addr);
273
274/*
275 * ioremap - map bus memory into CPU space
276 * @offset bus address of the memory
277 * @size size of the resource to map
278 *
279 * ioremap performs a platform specific sequence of operations to make
280 * bus memory CPU accessible via the readb/.../writel functions and
281 * the other mmio helpers. The returned address is not guaranteed to
282 * be usable directly as a virtual address.
283 */
284#define ioremap(offset, size) \
285 __ioremap((offset), (size), 0)
286
Haavard Skinnemoen2201ec22007-02-16 12:53:57 +0100287#define ioremap_nocache(offset, size) \
288 __ioremap((offset), (size), 0)
289
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700290#define iounmap(addr) \
291 __iounmap(addr)
292
293#define cached(addr) P1SEGADDR(addr)
294#define uncached(addr) P2SEGADDR(addr)
295
296#define virt_to_bus virt_to_phys
297#define bus_to_virt phys_to_virt
298#define page_to_bus page_to_phys
299#define bus_to_page phys_to_page
300
Haavard Skinnemoen2201ec22007-02-16 12:53:57 +0100301/*
302 * Create a virtual mapping cookie for an IO port range. There exists
303 * no such thing as port-based I/O on AVR32, so a regular ioremap()
304 * should do what we need.
305 */
306#define ioport_map(port, nr) ioremap(port, nr)
307#define ioport_unmap(port) iounmap(port)
308
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700309/*
310 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
311 * access
312 */
313#define xlate_dev_mem_ptr(p) __va(p)
314
315/*
316 * Convert a virtual cached pointer to an uncached pointer
317 */
318#define xlate_dev_kmem_ptr(p) p
319
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700320#endif /* __ASM_AVR32_IO_H */