Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_IO_H |
| 2 | #define __ASM_SH_IO_H |
Paul Mundt | 37b7a97 | 2010-11-01 09:49:04 -0400 | [diff] [blame] | 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | /* |
| 5 | * Convention: |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 6 | * read{b,w,l,q}/write{b,w,l,q} are for PCI, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * while in{b,w,l}/out{b,w,l} are for ISA |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 8 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p |
| 10 | * and 'string' versions: ins{b,w,l}/outs{b,w,l} |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 12 | * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers |
| 13 | * automatically, there are also __raw versions, which do not. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ |
Paul Mundt | 4f744af | 2010-01-18 21:30:29 +0900 | [diff] [blame] | 15 | #include <linux/errno.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/cache.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/addrspace.h> |
| 18 | #include <asm/machvec.h> |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 19 | #include <asm/pgtable.h> |
| 20 | #include <asm-generic/iomap.h> |
| 21 | |
| 22 | #ifdef __KERNEL__ |
Paul Mundt | 37b7a97 | 2010-11-01 09:49:04 -0400 | [diff] [blame] | 23 | #define __IO_PREFIX generic |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 24 | #include <asm/io_generic.h> |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 25 | #include <asm/io_trapped.h> |
Paul Mundt | b7e68d6 | 2012-03-29 16:05:10 +0900 | [diff] [blame] | 26 | #include <mach/mangle-port.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 28 | #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v)) |
| 29 | #define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v)) |
| 30 | #define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v)) |
| 31 | #define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 33 | #define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a)) |
| 34 | #define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a)) |
| 35 | #define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a)) |
| 36 | #define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Paul Mundt | b7e68d6 | 2012-03-29 16:05:10 +0900 | [diff] [blame] | 38 | #define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; }) |
| 39 | #define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; }) |
| 40 | #define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; }) |
| 41 | #define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
Paul Mundt | b7e68d6 | 2012-03-29 16:05:10 +0900 | [diff] [blame] | 43 | #define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c)) |
| 44 | #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c)) |
| 45 | #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c)) |
| 46 | #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c)) |
Paul Mundt | 37b7a97 | 2010-11-01 09:49:04 -0400 | [diff] [blame] | 47 | |
| 48 | #define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; }) |
| 49 | #define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; }) |
| 50 | #define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; }) |
| 51 | #define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; }) |
| 52 | |
| 53 | #define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); }) |
| 54 | #define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); }) |
| 55 | #define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); }) |
| 56 | #define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); }) |
| 57 | |
| 58 | #define readsb(p,d,l) __raw_readsb(p,d,l) |
| 59 | #define readsw(p,d,l) __raw_readsw(p,d,l) |
| 60 | #define readsl(p,d,l) __raw_readsl(p,d,l) |
| 61 | |
| 62 | #define writesb(p,d,l) __raw_writesb(p,d,l) |
| 63 | #define writesw(p,d,l) __raw_writesw(p,d,l) |
| 64 | #define writesl(p,d,l) __raw_writesl(p,d,l) |
| 65 | |
| 66 | #define __BUILD_UNCACHED_IO(bwlq, type) \ |
| 67 | static inline type read##bwlq##_uncached(unsigned long addr) \ |
| 68 | { \ |
| 69 | type ret; \ |
| 70 | jump_to_uncached(); \ |
| 71 | ret = __raw_read##bwlq(addr); \ |
| 72 | back_to_cached(); \ |
| 73 | return ret; \ |
| 74 | } \ |
| 75 | \ |
| 76 | static inline void write##bwlq##_uncached(type v, unsigned long addr) \ |
| 77 | { \ |
| 78 | jump_to_uncached(); \ |
| 79 | __raw_write##bwlq(v, addr); \ |
| 80 | back_to_cached(); \ |
| 81 | } |
| 82 | |
| 83 | __BUILD_UNCACHED_IO(b, u8) |
| 84 | __BUILD_UNCACHED_IO(w, u16) |
| 85 | __BUILD_UNCACHED_IO(l, u32) |
| 86 | __BUILD_UNCACHED_IO(q, u64) |
| 87 | |
| 88 | #define __BUILD_MEMORY_STRING(pfx, bwlq, type) \ |
| 89 | \ |
| 90 | static inline void \ |
| 91 | pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \ |
| 92 | unsigned int count) \ |
| 93 | { \ |
| 94 | const volatile type *__addr = addr; \ |
| 95 | \ |
| 96 | while (count--) { \ |
| 97 | __raw_write##bwlq(*__addr, mem); \ |
| 98 | __addr++; \ |
| 99 | } \ |
| 100 | } \ |
| 101 | \ |
| 102 | static inline void pfx##reads##bwlq(volatile void __iomem *mem, \ |
| 103 | void *addr, unsigned int count) \ |
| 104 | { \ |
| 105 | volatile type *__addr = addr; \ |
| 106 | \ |
| 107 | while (count--) { \ |
| 108 | *__addr = __raw_read##bwlq(mem); \ |
| 109 | __addr++; \ |
| 110 | } \ |
| 111 | } |
| 112 | |
| 113 | __BUILD_MEMORY_STRING(__raw_, b, u8) |
| 114 | __BUILD_MEMORY_STRING(__raw_, w, u16) |
| 115 | |
| 116 | #ifdef CONFIG_SUPERH32 |
| 117 | void __raw_writesl(void __iomem *addr, const void *data, int longlen); |
| 118 | void __raw_readsl(const void __iomem *addr, void *data, int longlen); |
| 119 | #else |
| 120 | __BUILD_MEMORY_STRING(__raw_, l, u32) |
| 121 | #endif |
| 122 | |
| 123 | __BUILD_MEMORY_STRING(__raw_, q, u64) |
| 124 | |
| 125 | #ifdef CONFIG_HAS_IOPORT |
| 126 | |
| 127 | /* |
| 128 | * Slowdown I/O port space accesses for antique hardware. |
| 129 | */ |
| 130 | #undef CONF_SLOWDOWN_IO |
| 131 | |
| 132 | /* |
| 133 | * On SuperH I/O ports are memory mapped, so we access them using normal |
| 134 | * load/store instructions. sh_io_port_base is the virtual address to |
| 135 | * which all ports are being mapped. |
| 136 | */ |
Andi Kleen | 666e81f | 2012-10-04 17:11:41 -0700 | [diff] [blame] | 137 | extern unsigned long sh_io_port_base; |
Paul Mundt | 37b7a97 | 2010-11-01 09:49:04 -0400 | [diff] [blame] | 138 | |
| 139 | static inline void __set_io_port_base(unsigned long pbase) |
| 140 | { |
| 141 | *(unsigned long *)&sh_io_port_base = pbase; |
| 142 | barrier(); |
| 143 | } |
| 144 | |
| 145 | #ifdef CONFIG_GENERIC_IOMAP |
| 146 | #define __ioport_map ioport_map |
| 147 | #else |
| 148 | extern void __iomem *__ioport_map(unsigned long addr, unsigned int size); |
| 149 | #endif |
| 150 | |
| 151 | #ifdef CONF_SLOWDOWN_IO |
| 152 | #define SLOW_DOWN_IO __raw_readw(sh_io_port_base) |
| 153 | #else |
| 154 | #define SLOW_DOWN_IO |
| 155 | #endif |
| 156 | |
| 157 | #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ |
| 158 | \ |
| 159 | static inline void pfx##out##bwlq##p(type val, unsigned long port) \ |
| 160 | { \ |
| 161 | volatile type *__addr; \ |
| 162 | \ |
| 163 | __addr = __ioport_map(port, sizeof(type)); \ |
| 164 | *__addr = val; \ |
| 165 | slow; \ |
| 166 | } \ |
| 167 | \ |
| 168 | static inline type pfx##in##bwlq##p(unsigned long port) \ |
| 169 | { \ |
| 170 | volatile type *__addr; \ |
| 171 | type __val; \ |
| 172 | \ |
| 173 | __addr = __ioport_map(port, sizeof(type)); \ |
| 174 | __val = *__addr; \ |
| 175 | slow; \ |
| 176 | \ |
| 177 | return __val; \ |
| 178 | } |
| 179 | |
| 180 | #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ |
| 181 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ |
| 182 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) |
| 183 | |
| 184 | #define BUILDIO_IOPORT(bwlq, type) \ |
| 185 | __BUILD_IOPORT_PFX(, bwlq, type) |
| 186 | |
| 187 | BUILDIO_IOPORT(b, u8) |
| 188 | BUILDIO_IOPORT(w, u16) |
| 189 | BUILDIO_IOPORT(l, u32) |
| 190 | BUILDIO_IOPORT(q, u64) |
| 191 | |
| 192 | #define __BUILD_IOPORT_STRING(bwlq, type) \ |
| 193 | \ |
| 194 | static inline void outs##bwlq(unsigned long port, const void *addr, \ |
| 195 | unsigned int count) \ |
| 196 | { \ |
| 197 | const volatile type *__addr = addr; \ |
| 198 | \ |
| 199 | while (count--) { \ |
| 200 | out##bwlq(*__addr, port); \ |
| 201 | __addr++; \ |
| 202 | } \ |
| 203 | } \ |
| 204 | \ |
| 205 | static inline void ins##bwlq(unsigned long port, void *addr, \ |
| 206 | unsigned int count) \ |
| 207 | { \ |
| 208 | volatile type *__addr = addr; \ |
| 209 | \ |
| 210 | while (count--) { \ |
| 211 | *__addr = in##bwlq(port); \ |
| 212 | __addr++; \ |
| 213 | } \ |
| 214 | } |
| 215 | |
| 216 | __BUILD_IOPORT_STRING(b, u8) |
| 217 | __BUILD_IOPORT_STRING(w, u16) |
| 218 | __BUILD_IOPORT_STRING(l, u32) |
| 219 | __BUILD_IOPORT_STRING(q, u64) |
| 220 | |
Paul Mundt | c5e50fa | 2012-05-10 13:07:55 +0900 | [diff] [blame] | 221 | #else /* !CONFIG_HAS_IOPORT */ |
| 222 | |
| 223 | #include <asm/io_noioport.h> |
| 224 | |
Paul Mundt | 37b7a97 | 2010-11-01 09:49:04 -0400 | [diff] [blame] | 225 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | |
Paul Mundt | c5e50fa | 2012-05-10 13:07:55 +0900 | [diff] [blame] | 227 | |
Paul Mundt | 37b7a97 | 2010-11-01 09:49:04 -0400 | [diff] [blame] | 228 | #define IO_SPACE_LIMIT 0xffffffff |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 229 | |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 230 | /* synco on SH-4A, otherwise a nop */ |
| 231 | #define mmiowb() wmb() |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | /* We really want to try and get these to memcpy etc */ |
Paul Mundt | 1486654 | 2008-10-04 05:25:52 +0900 | [diff] [blame] | 234 | void memcpy_fromio(void *, const volatile void __iomem *, unsigned long); |
| 235 | void memcpy_toio(volatile void __iomem *, const void *, unsigned long); |
| 236 | void memset_io(volatile void __iomem *, int, unsigned long); |
Paul Mundt | 959f85f | 2006-09-27 16:43:28 +0900 | [diff] [blame] | 237 | |
Paul Mundt | ac490a4 | 2007-11-20 18:26:28 +0900 | [diff] [blame] | 238 | /* Quad-word real-mode I/O, don't ask.. */ |
| 239 | unsigned long long peek_real_address_q(unsigned long long addr); |
| 240 | unsigned long long poke_real_address_q(unsigned long long addr, |
| 241 | unsigned long long val); |
| 242 | |
Paul Mundt | da06b8d | 2007-11-09 12:58:12 +0900 | [diff] [blame] | 243 | #if !defined(CONFIG_MMU) |
| 244 | #define virt_to_phys(address) ((unsigned long)(address)) |
| 245 | #define phys_to_virt(address) ((void *)(address)) |
Stuart Menefy | d02b08f | 2007-11-30 17:52:53 +0900 | [diff] [blame] | 246 | #else |
Paul Mundt | da06b8d | 2007-11-09 12:58:12 +0900 | [diff] [blame] | 247 | #define virt_to_phys(address) (__pa(address)) |
| 248 | #define phys_to_virt(address) (__va(address)) |
Yoshinori Sato | a2d1a5f | 2006-09-27 17:25:07 +0900 | [diff] [blame] | 249 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | /* |
Paul Mundt | da06b8d | 2007-11-09 12:58:12 +0900 | [diff] [blame] | 252 | * On 32-bit SH, we traditionally have the whole physical address space |
| 253 | * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do |
| 254 | * not need to do anything but place the address in the proper segment. |
| 255 | * This is true for P1 and P2 addresses, as well as some P3 ones. |
| 256 | * However, most of the P3 addresses and newer cores using extended |
| 257 | * addressing need to map through page tables, so the ioremap() |
| 258 | * implementation becomes a bit more complicated. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | * |
Paul Mundt | da06b8d | 2007-11-09 12:58:12 +0900 | [diff] [blame] | 260 | * See arch/sh/mm/ioremap.c for additional notes on this. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | * |
| 262 | * We cheat a bit and always return uncachable areas until we've fixed |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 263 | * the drivers to handle caching properly. |
Paul Mundt | da06b8d | 2007-11-09 12:58:12 +0900 | [diff] [blame] | 264 | * |
| 265 | * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply |
| 266 | * doesn't exist, so everything must go through page tables. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | */ |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 268 | #ifdef CONFIG_MMU |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 269 | void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 270 | pgprot_t prot, void *caller); |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 271 | void __iounmap(void __iomem *addr); |
Paul Mundt | ccd8058 | 2008-04-25 12:58:40 +0900 | [diff] [blame] | 272 | |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 273 | static inline void __iomem * |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 274 | __ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) |
Paul Mundt | bf3cded | 2009-12-14 14:23:41 +0900 | [diff] [blame] | 275 | { |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 276 | return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); |
Paul Mundt | bf3cded | 2009-12-14 14:23:41 +0900 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | static inline void __iomem * |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 280 | __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | { |
Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 282 | #ifdef CONFIG_29BIT |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 283 | phys_addr_t last_addr = offset + size - 1; |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 284 | |
| 285 | /* |
| 286 | * For P1 and P2 space this is trivial, as everything is already |
| 287 | * mapped. Uncached access for P1 addresses are done through P2. |
| 288 | * In the P3 case or for addresses outside of the 29-bit space, |
| 289 | * mapping must be done by the PMB or by using page tables. |
| 290 | */ |
| 291 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { |
Paul Mundt | efb3e34 | 2011-01-11 15:02:59 +0900 | [diff] [blame] | 292 | u64 flags = pgprot_val(prot); |
| 293 | |
| 294 | /* |
| 295 | * Anything using the legacy PTEA space attributes needs |
| 296 | * to be kicked down to page table mappings. |
| 297 | */ |
| 298 | if (unlikely(flags & _PAGE_PCC_MASK)) |
| 299 | return NULL; |
| 300 | if (unlikely(flags & _PAGE_CACHABLE)) |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 301 | return (void __iomem *)P1SEGADDR(offset); |
| 302 | |
| 303 | return (void __iomem *)P2SEGADDR(offset); |
| 304 | } |
Magnus Damm | 716777d | 2008-11-25 21:57:29 +0900 | [diff] [blame] | 305 | |
| 306 | /* P4 above the store queues are always mapped. */ |
| 307 | if (unlikely(offset >= P3_ADDR_MAX)) |
| 308 | return (void __iomem *)P4SEGADDR(offset); |
Paul Mundt | da06b8d | 2007-11-09 12:58:12 +0900 | [diff] [blame] | 309 | #endif |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 310 | |
Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 311 | return NULL; |
| 312 | } |
| 313 | |
| 314 | static inline void __iomem * |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 315 | __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) |
Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 316 | { |
| 317 | void __iomem *ret; |
| 318 | |
| 319 | ret = __ioremap_trapped(offset, size); |
| 320 | if (ret) |
| 321 | return ret; |
| 322 | |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 323 | ret = __ioremap_29bit(offset, size, prot); |
Paul Mundt | a0ab366 | 2010-01-13 18:31:48 +0900 | [diff] [blame] | 324 | if (ret) |
| 325 | return ret; |
| 326 | |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 327 | return __ioremap(offset, size, prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | } |
Magnus Damm | e6be3a2 | 2009-04-30 12:56:37 +0900 | [diff] [blame] | 329 | #else |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 330 | #define __ioremap(offset, size, prot) ((void __iomem *)(offset)) |
| 331 | #define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset)) |
Magnus Damm | e6be3a2 | 2009-04-30 12:56:37 +0900 | [diff] [blame] | 332 | #define __iounmap(addr) do { } while (0) |
| 333 | #endif /* CONFIG_MMU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 335 | static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 336 | { |
| 337 | return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); |
| 338 | } |
| 339 | |
| 340 | static inline void __iomem * |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 341 | ioremap_cache(phys_addr_t offset, unsigned long size) |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 342 | { |
| 343 | return __ioremap_mode(offset, size, PAGE_KERNEL); |
| 344 | } |
| 345 | |
Paul Mundt | 6d63e73 | 2010-01-19 14:00:14 +0900 | [diff] [blame] | 346 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 347 | static inline void __iomem * |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 348 | ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 349 | { |
| 350 | return __ioremap_mode(offset, size, __pgprot(flags)); |
| 351 | } |
Paul Mundt | 6d63e73 | 2010-01-19 14:00:14 +0900 | [diff] [blame] | 352 | #endif |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 353 | |
Paul Mundt | d627a2e | 2010-01-28 18:17:29 +0900 | [diff] [blame] | 354 | #ifdef CONFIG_IOREMAP_FIXED |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 355 | extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); |
Paul Mundt | d627a2e | 2010-01-28 18:17:29 +0900 | [diff] [blame] | 356 | extern int iounmap_fixed(void __iomem *); |
| 357 | extern void ioremap_fixed_init(void); |
| 358 | #else |
| 359 | static inline void __iomem * |
Paul Mundt | 90e7d64 | 2010-02-23 16:20:53 +0900 | [diff] [blame] | 360 | ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) |
Paul Mundt | d627a2e | 2010-01-28 18:17:29 +0900 | [diff] [blame] | 361 | { |
| 362 | BUG(); |
| 363 | return NULL; |
| 364 | } |
| 365 | |
| 366 | static inline void ioremap_fixed_init(void) { } |
| 367 | static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; } |
| 368 | #endif |
| 369 | |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 370 | #define ioremap_nocache ioremap |
Paul Mundt | d57d640 | 2010-01-19 13:34:38 +0900 | [diff] [blame] | 371 | #define iounmap __iounmap |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
| 375 | * access |
| 376 | */ |
| 377 | #define xlate_dev_mem_ptr(p) __va(p) |
| 378 | |
| 379 | /* |
| 380 | * Convert a virtual cached pointer to an uncached pointer |
| 381 | */ |
| 382 | #define xlate_dev_kmem_ptr(p) p |
| 383 | |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 384 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
Cyril Chemparathy | 7e6735c | 2012-09-12 14:05:58 -0400 | [diff] [blame] | 385 | int valid_phys_addr_range(phys_addr_t addr, size_t size); |
Paul Mundt | 185aed7 | 2008-11-12 12:53:48 +0900 | [diff] [blame] | 386 | int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
| 387 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | #endif /* __KERNEL__ */ |
| 389 | |
| 390 | #endif /* __ASM_SH_IO_H */ |