Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 1 | #ifndef __ASM_AVR32_IO_H |
| 2 | #define __ASM_AVR32_IO_H |
| 3 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 4 | #include <linux/kernel.h> |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 5 | #include <linux/string.h> |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 6 | #include <linux/types.h> |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 7 | |
| 8 | #include <asm/addrspace.h> |
| 9 | #include <asm/byteorder.h> |
| 10 | |
Haavard Skinnemoen | 3663b73 | 2008-08-05 13:57:38 +0200 | [diff] [blame] | 11 | #include <mach/io.h> |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 12 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 13 | /* virt_to_phys will only work when address is in P1 or P2 */ |
| 14 | static __inline__ unsigned long virt_to_phys(volatile void *address) |
| 15 | { |
| 16 | return PHYSADDR(address); |
| 17 | } |
| 18 | |
| 19 | static __inline__ void * phys_to_virt(unsigned long address) |
| 20 | { |
| 21 | return (void *)P1SEGADDR(address); |
| 22 | } |
| 23 | |
| 24 | #define cached_to_phys(addr) ((unsigned long)PHYSADDR(addr)) |
| 25 | #define uncached_to_phys(addr) ((unsigned long)PHYSADDR(addr)) |
| 26 | #define phys_to_cached(addr) ((void *)P1SEGADDR(addr)) |
| 27 | #define phys_to_uncached(addr) ((void *)P2SEGADDR(addr)) |
| 28 | |
| 29 | /* |
| 30 | * Generic IO read/write. These perform native-endian accesses. Note |
| 31 | * that some architectures will want to re-define __raw_{read,write}w. |
| 32 | */ |
Haavard Skinnemoen | b60f16e | 2007-02-16 12:47:40 +0100 | [diff] [blame] | 33 | extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen); |
| 34 | extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen); |
| 35 | extern void __raw_writesl(void __iomem *addr, const void *data, int longlen); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 36 | |
Haavard Skinnemoen | b60f16e | 2007-02-16 12:47:40 +0100 | [diff] [blame] | 37 | extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen); |
| 38 | extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); |
| 39 | extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 40 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 41 | static inline void __raw_writeb(u8 v, volatile void __iomem *addr) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 42 | { |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 43 | *(volatile u8 __force *)addr = v; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 44 | } |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 45 | static inline void __raw_writew(u16 v, volatile void __iomem *addr) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 46 | { |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 47 | *(volatile u16 __force *)addr = v; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 48 | } |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 49 | static inline void __raw_writel(u32 v, volatile void __iomem *addr) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 50 | { |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 51 | *(volatile u32 __force *)addr = v; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 52 | } |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 53 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 54 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 55 | { |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 56 | return *(const volatile u8 __force *)addr; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 57 | } |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 58 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 59 | { |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 60 | return *(const volatile u16 __force *)addr; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 61 | } |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 62 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 63 | { |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 64 | return *(const volatile u32 __force *)addr; |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 65 | } |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 66 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 67 | /* Convert I/O port address to virtual address */ |
| 68 | #ifndef __io |
| 69 | # define __io(p) ((void *)phys_to_uncached(p)) |
| 70 | #endif |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 71 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 72 | /* |
| 73 | * Not really sure about the best way to slow down I/O on |
| 74 | * AVR32. Defining it as a no-op until we have an actual test case. |
| 75 | */ |
| 76 | #define SLOW_DOWN_IO do { } while (0) |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 77 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 78 | #define __BUILD_MEMORY_SINGLE(pfx, bwl, type) \ |
| 79 | static inline void \ |
| 80 | pfx##write##bwl(type val, volatile void __iomem *addr) \ |
| 81 | { \ |
| 82 | volatile type *__addr; \ |
| 83 | type __val; \ |
| 84 | \ |
| 85 | __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \ |
| 86 | __val = pfx##ioswab##bwl(__addr, val); \ |
| 87 | \ |
| 88 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 89 | \ |
| 90 | *__addr = __val; \ |
| 91 | } \ |
| 92 | \ |
| 93 | static inline type pfx##read##bwl(const volatile void __iomem *addr) \ |
| 94 | { \ |
| 95 | volatile type *__addr; \ |
| 96 | type __val; \ |
| 97 | \ |
| 98 | __addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr)); \ |
| 99 | \ |
| 100 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 101 | \ |
| 102 | __val = *__addr; \ |
| 103 | return pfx##ioswab##bwl(__addr, __val); \ |
| 104 | } |
| 105 | |
| 106 | #define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow) \ |
| 107 | static inline void pfx##out##bwl##p(type val, unsigned long port) \ |
| 108 | { \ |
| 109 | volatile type *__addr; \ |
| 110 | type __val; \ |
| 111 | \ |
| 112 | __addr = __io(__swizzle_addr_##bwl(port)); \ |
| 113 | __val = pfx##ioswab##bwl(__addr, val); \ |
| 114 | \ |
| 115 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 116 | \ |
| 117 | *__addr = __val; \ |
| 118 | slow; \ |
| 119 | } \ |
| 120 | \ |
| 121 | static inline type pfx##in##bwl##p(unsigned long port) \ |
| 122 | { \ |
| 123 | volatile type *__addr; \ |
| 124 | type __val; \ |
| 125 | \ |
| 126 | __addr = __io(__swizzle_addr_##bwl(port)); \ |
| 127 | \ |
| 128 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 129 | \ |
| 130 | __val = *__addr; \ |
| 131 | slow; \ |
| 132 | \ |
| 133 | return pfx##ioswab##bwl(__addr, __val); \ |
| 134 | } |
| 135 | |
| 136 | #define __BUILD_MEMORY_PFX(bus, bwl, type) \ |
| 137 | __BUILD_MEMORY_SINGLE(bus, bwl, type) |
| 138 | |
| 139 | #define BUILDIO_MEM(bwl, type) \ |
| 140 | __BUILD_MEMORY_PFX(, bwl, type) \ |
| 141 | __BUILD_MEMORY_PFX(__mem_, bwl, type) |
| 142 | |
| 143 | #define __BUILD_IOPORT_PFX(bus, bwl, type) \ |
| 144 | __BUILD_IOPORT_SINGLE(bus, bwl, type, ,) \ |
| 145 | __BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO) |
| 146 | |
| 147 | #define BUILDIO_IOPORT(bwl, type) \ |
| 148 | __BUILD_IOPORT_PFX(, bwl, type) \ |
| 149 | __BUILD_IOPORT_PFX(__mem_, bwl, type) |
| 150 | |
| 151 | BUILDIO_MEM(b, u8) |
| 152 | BUILDIO_MEM(w, u16) |
| 153 | BUILDIO_MEM(l, u32) |
| 154 | |
| 155 | BUILDIO_IOPORT(b, u8) |
| 156 | BUILDIO_IOPORT(w, u16) |
| 157 | BUILDIO_IOPORT(l, u32) |
| 158 | |
| 159 | #define readb_relaxed readb |
| 160 | #define readw_relaxed readw |
| 161 | #define readl_relaxed readl |
| 162 | |
Haavard Skinnemoen | 520bab8 | 2008-06-18 15:36:32 +0200 | [diff] [blame] | 163 | #define readb_be __raw_readb |
| 164 | #define readw_be __raw_readw |
| 165 | #define readl_be __raw_readl |
| 166 | |
| 167 | #define writeb_be __raw_writeb |
| 168 | #define writew_be __raw_writew |
| 169 | #define writel_be __raw_writel |
| 170 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 171 | #define __BUILD_MEMORY_STRING(bwl, type) \ |
| 172 | static inline void writes##bwl(volatile void __iomem *addr, \ |
| 173 | const void *data, unsigned int count) \ |
| 174 | { \ |
| 175 | const type *__data = data; \ |
| 176 | \ |
| 177 | while (count--) \ |
| 178 | __mem_write##bwl(*__data++, addr); \ |
| 179 | } \ |
| 180 | \ |
| 181 | static inline void reads##bwl(const volatile void __iomem *addr, \ |
| 182 | void *data, unsigned int count) \ |
| 183 | { \ |
| 184 | type *__data = data; \ |
| 185 | \ |
| 186 | while (count--) \ |
| 187 | *__data++ = __mem_read##bwl(addr); \ |
| 188 | } |
| 189 | |
| 190 | #define __BUILD_IOPORT_STRING(bwl, type) \ |
| 191 | static inline void outs##bwl(unsigned long port, const void *data, \ |
| 192 | unsigned int count) \ |
| 193 | { \ |
| 194 | const type *__data = data; \ |
| 195 | \ |
| 196 | while (count--) \ |
| 197 | __mem_out##bwl(*__data++, port); \ |
| 198 | } \ |
| 199 | \ |
| 200 | static inline void ins##bwl(unsigned long port, void *data, \ |
| 201 | unsigned int count) \ |
| 202 | { \ |
| 203 | type *__data = data; \ |
| 204 | \ |
| 205 | while (count--) \ |
| 206 | *__data++ = __mem_in##bwl(port); \ |
| 207 | } |
| 208 | |
| 209 | #define BUILDSTRING(bwl, type) \ |
| 210 | __BUILD_MEMORY_STRING(bwl, type) \ |
| 211 | __BUILD_IOPORT_STRING(bwl, type) |
| 212 | |
| 213 | BUILDSTRING(b, u8) |
| 214 | BUILDSTRING(w, u16) |
| 215 | BUILDSTRING(l, u32) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 216 | |
| 217 | /* |
| 218 | * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be |
| 219 | */ |
| 220 | #ifndef ioread8 |
| 221 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 222 | #define ioread8(p) ((unsigned int)readb(p)) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 223 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 224 | #define ioread16(p) ((unsigned int)readw(p)) |
| 225 | #define ioread16be(p) ((unsigned int)__raw_readw(p)) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 226 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 227 | #define ioread32(p) ((unsigned int)readl(p)) |
| 228 | #define ioread32be(p) ((unsigned int)__raw_readl(p)) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 229 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 230 | #define iowrite8(v,p) writeb(v, p) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 231 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 232 | #define iowrite16(v,p) writew(v, p) |
| 233 | #define iowrite16be(v,p) __raw_writew(v, p) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 234 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 235 | #define iowrite32(v,p) writel(v, p) |
| 236 | #define iowrite32be(v,p) __raw_writel(v, p) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 237 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 238 | #define ioread8_rep(p,d,c) readsb(p,d,c) |
| 239 | #define ioread16_rep(p,d,c) readsw(p,d,c) |
| 240 | #define ioread32_rep(p,d,c) readsl(p,d,c) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 241 | |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 242 | #define iowrite8_rep(p,s,c) writesb(p,s,c) |
| 243 | #define iowrite16_rep(p,s,c) writesw(p,s,c) |
| 244 | #define iowrite32_rep(p,s,c) writesl(p,s,c) |
Ben Nizette | 065834a | 2006-10-24 10:12:43 +0200 | [diff] [blame] | 245 | |
| 246 | #endif |
| 247 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 248 | static inline void memcpy_fromio(void * to, const volatile void __iomem *from, |
| 249 | unsigned long count) |
| 250 | { |
Haavard Skinnemoen | 2c1a2a3 | 2007-03-07 10:40:44 +0100 | [diff] [blame] | 251 | memcpy(to, (const void __force *)from, count); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | static inline void memcpy_toio(volatile void __iomem *to, const void * from, |
| 255 | unsigned long count) |
| 256 | { |
Haavard Skinnemoen | 2c1a2a3 | 2007-03-07 10:40:44 +0100 | [diff] [blame] | 257 | memcpy((void __force *)to, from, count); |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, |
| 261 | unsigned long count) |
| 262 | { |
Haavard Skinnemoen | 2c1a2a3 | 2007-03-07 10:40:44 +0100 | [diff] [blame] | 263 | memset((void __force *)addr, val, count); |
Haavard Skinnemoen | e3e7d8d | 2007-02-12 16:28:56 +0100 | [diff] [blame] | 264 | } |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 265 | |
Haavard Skinnemoen | cca6716 | 2007-08-13 16:24:01 +0200 | [diff] [blame] | 266 | #define mmiowb() |
| 267 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 268 | #define IO_SPACE_LIMIT 0xffffffff |
| 269 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 270 | extern void __iomem *__ioremap(unsigned long offset, size_t size, |
| 271 | unsigned long flags); |
| 272 | extern void __iounmap(void __iomem *addr); |
| 273 | |
| 274 | /* |
| 275 | * ioremap - map bus memory into CPU space |
| 276 | * @offset bus address of the memory |
| 277 | * @size size of the resource to map |
| 278 | * |
| 279 | * ioremap performs a platform specific sequence of operations to make |
| 280 | * bus memory CPU accessible via the readb/.../writel functions and |
| 281 | * the other mmio helpers. The returned address is not guaranteed to |
| 282 | * be usable directly as a virtual address. |
| 283 | */ |
| 284 | #define ioremap(offset, size) \ |
| 285 | __ioremap((offset), (size), 0) |
| 286 | |
Haavard Skinnemoen | 2201ec2 | 2007-02-16 12:53:57 +0100 | [diff] [blame] | 287 | #define ioremap_nocache(offset, size) \ |
| 288 | __ioremap((offset), (size), 0) |
| 289 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 290 | #define iounmap(addr) \ |
| 291 | __iounmap(addr) |
| 292 | |
| 293 | #define cached(addr) P1SEGADDR(addr) |
| 294 | #define uncached(addr) P2SEGADDR(addr) |
| 295 | |
| 296 | #define virt_to_bus virt_to_phys |
| 297 | #define bus_to_virt phys_to_virt |
| 298 | #define page_to_bus page_to_phys |
| 299 | #define bus_to_page phys_to_page |
| 300 | |
Haavard Skinnemoen | 2201ec2 | 2007-02-16 12:53:57 +0100 | [diff] [blame] | 301 | /* |
| 302 | * Create a virtual mapping cookie for an IO port range. There exists |
| 303 | * no such thing as port-based I/O on AVR32, so a regular ioremap() |
| 304 | * should do what we need. |
| 305 | */ |
| 306 | #define ioport_map(port, nr) ioremap(port, nr) |
| 307 | #define ioport_unmap(port) iounmap(port) |
| 308 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 309 | /* |
| 310 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
| 311 | * access |
| 312 | */ |
| 313 | #define xlate_dev_mem_ptr(p) __va(p) |
| 314 | |
| 315 | /* |
| 316 | * Convert a virtual cached pointer to an uncached pointer |
| 317 | */ |
| 318 | #define xlate_dev_kmem_ptr(p) p |
| 319 | |
Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame] | 320 | #endif /* __ASM_AVR32_IO_H */ |