blob: 89ab2c57a4c284f9b450ed4e0b95675f373e72d3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_IO_H
2#define __ASM_SH_IO_H
Paul Mundt37b7a972010-11-01 09:49:04 -04003
Linus Torvalds1da177e2005-04-16 15:20:36 -07004/*
5 * Convention:
Paul Mundt14866542008-10-04 05:25:52 +09006 * read{b,w,l,q}/write{b,w,l,q} are for PCI,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * while in{b,w,l}/out{b,w,l} are for ISA
Paul Mundt14866542008-10-04 05:25:52 +09008 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
10 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
Paul Mundt14866542008-10-04 05:25:52 +090012 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
13 * automatically, there are also __raw versions, which do not.
14 *
15 * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
16 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
17 * these have the same semantics as the __raw variants, and as such, all
18 * new code should be using the __raw versions.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
Paul Mundt4f744af2010-01-18 21:30:29 +090020#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cache.h>
22#include <asm/system.h>
23#include <asm/addrspace.h>
24#include <asm/machvec.h>
Paul Mundtb66c1a32006-01-16 22:14:15 -080025#include <asm/pgtable.h>
26#include <asm-generic/iomap.h>
27
28#ifdef __KERNEL__
Paul Mundt37b7a972010-11-01 09:49:04 -040029#define __IO_PREFIX generic
Paul Mundtb66c1a32006-01-16 22:14:15 -080030#include <asm/io_generic.h>
Magnus Damme7cc9a72008-02-07 20:18:21 +090031#include <asm/io_trapped.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Paul Mundt14866542008-10-04 05:25:52 +090033#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
34#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
35#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
36#define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
Paul Mundt14866542008-10-04 05:25:52 +090038#define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
39#define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
40#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
41#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Paul Mundt37b7a972010-11-01 09:49:04 -040043#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
44#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
45 __raw_readw(c)); __v; })
46#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
47 __raw_readl(c)); __v; })
48#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64) \
49 __raw_readq(c)); __v; })
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Paul Mundt37b7a972010-11-01 09:49:04 -040051#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
52#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
53 cpu_to_le16(v),c))
54#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
55 cpu_to_le32(v),c))
56#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64) \
57 cpu_to_le64(v),c))
58
59#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
60#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
61#define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; })
62#define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; })
63
64#define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); })
65#define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); })
66#define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); })
67#define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); })
68
69#define readsb(p,d,l) __raw_readsb(p,d,l)
70#define readsw(p,d,l) __raw_readsw(p,d,l)
71#define readsl(p,d,l) __raw_readsl(p,d,l)
72
73#define writesb(p,d,l) __raw_writesb(p,d,l)
74#define writesw(p,d,l) __raw_writesw(p,d,l)
75#define writesl(p,d,l) __raw_writesl(p,d,l)
76
77#define __BUILD_UNCACHED_IO(bwlq, type) \
78static inline type read##bwlq##_uncached(unsigned long addr) \
79{ \
80 type ret; \
81 jump_to_uncached(); \
82 ret = __raw_read##bwlq(addr); \
83 back_to_cached(); \
84 return ret; \
85} \
86 \
87static inline void write##bwlq##_uncached(type v, unsigned long addr) \
88{ \
89 jump_to_uncached(); \
90 __raw_write##bwlq(v, addr); \
91 back_to_cached(); \
92}
93
94__BUILD_UNCACHED_IO(b, u8)
95__BUILD_UNCACHED_IO(w, u16)
96__BUILD_UNCACHED_IO(l, u32)
97__BUILD_UNCACHED_IO(q, u64)
98
99#define __BUILD_MEMORY_STRING(pfx, bwlq, type) \
100 \
101static inline void \
102pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \
103 unsigned int count) \
104{ \
105 const volatile type *__addr = addr; \
106 \
107 while (count--) { \
108 __raw_write##bwlq(*__addr, mem); \
109 __addr++; \
110 } \
111} \
112 \
113static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
114 void *addr, unsigned int count) \
115{ \
116 volatile type *__addr = addr; \
117 \
118 while (count--) { \
119 *__addr = __raw_read##bwlq(mem); \
120 __addr++; \
121 } \
122}
123
124__BUILD_MEMORY_STRING(__raw_, b, u8)
125__BUILD_MEMORY_STRING(__raw_, w, u16)
126
127#ifdef CONFIG_SUPERH32
128void __raw_writesl(void __iomem *addr, const void *data, int longlen);
129void __raw_readsl(const void __iomem *addr, void *data, int longlen);
130#else
131__BUILD_MEMORY_STRING(__raw_, l, u32)
132#endif
133
134__BUILD_MEMORY_STRING(__raw_, q, u64)
135
136#ifdef CONFIG_HAS_IOPORT
137
138/*
139 * Slowdown I/O port space accesses for antique hardware.
140 */
141#undef CONF_SLOWDOWN_IO
142
143/*
144 * On SuperH I/O ports are memory mapped, so we access them using normal
145 * load/store instructions. sh_io_port_base is the virtual address to
146 * which all ports are being mapped.
147 */
148extern const unsigned long sh_io_port_base;
149
150static inline void __set_io_port_base(unsigned long pbase)
151{
152 *(unsigned long *)&sh_io_port_base = pbase;
153 barrier();
154}
155
156#ifdef CONFIG_GENERIC_IOMAP
157#define __ioport_map ioport_map
158#else
159extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
160#endif
161
162#ifdef CONF_SLOWDOWN_IO
163#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
164#else
165#define SLOW_DOWN_IO
166#endif
167
168#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
169 \
170static inline void pfx##out##bwlq##p(type val, unsigned long port) \
171{ \
172 volatile type *__addr; \
173 \
174 __addr = __ioport_map(port, sizeof(type)); \
175 *__addr = val; \
176 slow; \
177} \
178 \
179static inline type pfx##in##bwlq##p(unsigned long port) \
180{ \
181 volatile type *__addr; \
182 type __val; \
183 \
184 __addr = __ioport_map(port, sizeof(type)); \
185 __val = *__addr; \
186 slow; \
187 \
188 return __val; \
189}
190
191#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
192 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
193 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
194
195#define BUILDIO_IOPORT(bwlq, type) \
196 __BUILD_IOPORT_PFX(, bwlq, type)
197
198BUILDIO_IOPORT(b, u8)
199BUILDIO_IOPORT(w, u16)
200BUILDIO_IOPORT(l, u32)
201BUILDIO_IOPORT(q, u64)
202
203#define __BUILD_IOPORT_STRING(bwlq, type) \
204 \
205static inline void outs##bwlq(unsigned long port, const void *addr, \
206 unsigned int count) \
207{ \
208 const volatile type *__addr = addr; \
209 \
210 while (count--) { \
211 out##bwlq(*__addr, port); \
212 __addr++; \
213 } \
214} \
215 \
216static inline void ins##bwlq(unsigned long port, void *addr, \
217 unsigned int count) \
218{ \
219 volatile type *__addr = addr; \
220 \
221 while (count--) { \
222 *__addr = in##bwlq(port); \
223 __addr++; \
224 } \
225}
226
227__BUILD_IOPORT_STRING(b, u8)
228__BUILD_IOPORT_STRING(w, u16)
229__BUILD_IOPORT_STRING(l, u32)
230__BUILD_IOPORT_STRING(q, u64)
231
232#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Paul Mundt485773f2010-01-26 13:02:10 +0900234/*
235 * Legacy SuperH on-chip I/O functions
236 *
237 * These are all deprecated, all new (and especially cross-platform) code
238 * should be using the __raw_xxx() routines directly.
239 */
240static inline u8 __deprecated ctrl_inb(unsigned long addr)
241{
242 return __raw_readb(addr);
243}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Paul Mundt485773f2010-01-26 13:02:10 +0900245static inline u16 __deprecated ctrl_inw(unsigned long addr)
246{
247 return __raw_readw(addr);
248}
249
250static inline u32 __deprecated ctrl_inl(unsigned long addr)
251{
252 return __raw_readl(addr);
253}
254
255static inline u64 __deprecated ctrl_inq(unsigned long addr)
256{
257 return __raw_readq(addr);
258}
259
260static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
261{
262 __raw_writeb(v, addr);
263}
264
265static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
266{
267 __raw_writew(v, addr);
268}
269
270static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
271{
272 __raw_writel(v, addr);
273}
274
275static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
276{
277 __raw_writeq(v, addr);
278}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
Paul Mundt37b7a972010-11-01 09:49:04 -0400280#define IO_SPACE_LIMIT 0xffffffff
Paul Mundtb66c1a32006-01-16 22:14:15 -0800281
Paul Mundt14866542008-10-04 05:25:52 +0900282/* synco on SH-4A, otherwise a nop */
283#define mmiowb() wmb()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285/* We really want to try and get these to memcpy etc */
Paul Mundt14866542008-10-04 05:25:52 +0900286void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
287void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
288void memset_io(volatile void __iomem *, int, unsigned long);
Paul Mundt959f85f2006-09-27 16:43:28 +0900289
Paul Mundtac490a42007-11-20 18:26:28 +0900290/* Quad-word real-mode I/O, don't ask.. */
291unsigned long long peek_real_address_q(unsigned long long addr);
292unsigned long long poke_real_address_q(unsigned long long addr,
293 unsigned long long val);
294
Paul Mundtda06b8d2007-11-09 12:58:12 +0900295#if !defined(CONFIG_MMU)
296#define virt_to_phys(address) ((unsigned long)(address))
297#define phys_to_virt(address) ((void *)(address))
Stuart Menefyd02b08f2007-11-30 17:52:53 +0900298#else
Paul Mundtda06b8d2007-11-09 12:58:12 +0900299#define virt_to_phys(address) (__pa(address))
300#define phys_to_virt(address) (__va(address))
Yoshinori Satoa2d1a5f2006-09-27 17:25:07 +0900301#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303/*
Paul Mundtda06b8d2007-11-09 12:58:12 +0900304 * On 32-bit SH, we traditionally have the whole physical address space
305 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
306 * not need to do anything but place the address in the proper segment.
307 * This is true for P1 and P2 addresses, as well as some P3 ones.
308 * However, most of the P3 addresses and newer cores using extended
309 * addressing need to map through page tables, so the ioremap()
310 * implementation becomes a bit more complicated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 *
Paul Mundtda06b8d2007-11-09 12:58:12 +0900312 * See arch/sh/mm/ioremap.c for additional notes on this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 *
314 * We cheat a bit and always return uncachable areas until we've fixed
Paul Mundtb66c1a32006-01-16 22:14:15 -0800315 * the drivers to handle caching properly.
Paul Mundtda06b8d2007-11-09 12:58:12 +0900316 *
317 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
318 * doesn't exist, so everything must go through page tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 */
Paul Mundtb66c1a32006-01-16 22:14:15 -0800320#ifdef CONFIG_MMU
Paul Mundt90e7d642010-02-23 16:20:53 +0900321void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
Paul Mundtd57d6402010-01-19 13:34:38 +0900322 pgprot_t prot, void *caller);
Paul Mundtb66c1a32006-01-16 22:14:15 -0800323void __iounmap(void __iomem *addr);
Paul Mundtccd80582008-04-25 12:58:40 +0900324
Paul Mundtb66c1a32006-01-16 22:14:15 -0800325static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900326__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
Paul Mundtbf3cded2009-12-14 14:23:41 +0900327{
Paul Mundtd57d6402010-01-19 13:34:38 +0900328 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
Paul Mundtbf3cded2009-12-14 14:23:41 +0900329}
330
331static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900332__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
Paul Mundta0ab3662010-01-13 18:31:48 +0900334#ifdef CONFIG_29BIT
Paul Mundt90e7d642010-02-23 16:20:53 +0900335 phys_addr_t last_addr = offset + size - 1;
Paul Mundtb66c1a32006-01-16 22:14:15 -0800336
337 /*
338 * For P1 and P2 space this is trivial, as everything is already
339 * mapped. Uncached access for P1 addresses are done through P2.
340 * In the P3 case or for addresses outside of the 29-bit space,
341 * mapping must be done by the PMB or by using page tables.
342 */
343 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
Paul Mundtd57d6402010-01-19 13:34:38 +0900344 if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
Paul Mundtb66c1a32006-01-16 22:14:15 -0800345 return (void __iomem *)P1SEGADDR(offset);
346
347 return (void __iomem *)P2SEGADDR(offset);
348 }
Magnus Damm716777d2008-11-25 21:57:29 +0900349
350 /* P4 above the store queues are always mapped. */
351 if (unlikely(offset >= P3_ADDR_MAX))
352 return (void __iomem *)P4SEGADDR(offset);
Paul Mundtda06b8d2007-11-09 12:58:12 +0900353#endif
Paul Mundtb66c1a32006-01-16 22:14:15 -0800354
Paul Mundta0ab3662010-01-13 18:31:48 +0900355 return NULL;
356}
357
358static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900359__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
Paul Mundta0ab3662010-01-13 18:31:48 +0900360{
361 void __iomem *ret;
362
363 ret = __ioremap_trapped(offset, size);
364 if (ret)
365 return ret;
366
Paul Mundtd57d6402010-01-19 13:34:38 +0900367 ret = __ioremap_29bit(offset, size, prot);
Paul Mundta0ab3662010-01-13 18:31:48 +0900368 if (ret)
369 return ret;
370
Paul Mundtd57d6402010-01-19 13:34:38 +0900371 return __ioremap(offset, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372}
Magnus Damme6be3a22009-04-30 12:56:37 +0900373#else
Paul Mundtd57d6402010-01-19 13:34:38 +0900374#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
375#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
Magnus Damme6be3a22009-04-30 12:56:37 +0900376#define __iounmap(addr) do { } while (0)
377#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Paul Mundt90e7d642010-02-23 16:20:53 +0900379static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
Paul Mundtd57d6402010-01-19 13:34:38 +0900380{
381 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
382}
383
384static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900385ioremap_cache(phys_addr_t offset, unsigned long size)
Paul Mundtd57d6402010-01-19 13:34:38 +0900386{
387 return __ioremap_mode(offset, size, PAGE_KERNEL);
388}
389
Paul Mundt6d63e732010-01-19 14:00:14 +0900390#ifdef CONFIG_HAVE_IOREMAP_PROT
Paul Mundtd57d6402010-01-19 13:34:38 +0900391static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900392ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
Paul Mundtd57d6402010-01-19 13:34:38 +0900393{
394 return __ioremap_mode(offset, size, __pgprot(flags));
395}
Paul Mundt6d63e732010-01-19 14:00:14 +0900396#endif
Paul Mundtd57d6402010-01-19 13:34:38 +0900397
Paul Mundtd627a2e2010-01-28 18:17:29 +0900398#ifdef CONFIG_IOREMAP_FIXED
Paul Mundt90e7d642010-02-23 16:20:53 +0900399extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
Paul Mundtd627a2e2010-01-28 18:17:29 +0900400extern int iounmap_fixed(void __iomem *);
401extern void ioremap_fixed_init(void);
402#else
403static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900404ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
Paul Mundtd627a2e2010-01-28 18:17:29 +0900405{
406 BUG();
407 return NULL;
408}
409
410static inline void ioremap_fixed_init(void) { }
411static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
412#endif
413
Paul Mundtd57d6402010-01-19 13:34:38 +0900414#define ioremap_nocache ioremap
Paul Mundtd57d6402010-01-19 13:34:38 +0900415#define iounmap __iounmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
419 * access
420 */
421#define xlate_dev_mem_ptr(p) __va(p)
422
423/*
424 * Convert a virtual cached pointer to an uncached pointer
425 */
426#define xlate_dev_kmem_ptr(p) p
427
Paul Mundt185aed72008-11-12 12:53:48 +0900428#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
429int valid_phys_addr_range(unsigned long addr, size_t size);
430int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
431
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432#endif /* __KERNEL__ */
433
434#endif /* __ASM_SH_IO_H */