blob: 4f7f235f15f856775ee2fbdae67e06ad20fdc363 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __ASM_SH_IO_H
3#define __ASM_SH_IO_H
Paul Mundt37b7a972010-11-01 09:49:04 -04004
Linus Torvalds1da177e2005-04-16 15:20:36 -07005/*
6 * Convention:
Paul Mundt14866542008-10-04 05:25:52 +09007 * read{b,w,l,q}/write{b,w,l,q} are for PCI,
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * while in{b,w,l}/out{b,w,l} are for ISA
Paul Mundt14866542008-10-04 05:25:52 +09009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
11 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 *
Paul Mundt14866542008-10-04 05:25:52 +090013 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
14 * automatically, there are also __raw versions, which do not.
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 */
Paul Mundt4f744af2010-01-18 21:30:29 +090016#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/addrspace.h>
19#include <asm/machvec.h>
Paul Mundtb66c1a32006-01-16 22:14:15 -080020#include <asm/pgtable.h>
21#include <asm-generic/iomap.h>
22
23#ifdef __KERNEL__
Paul Mundt37b7a972010-11-01 09:49:04 -040024#define __IO_PREFIX generic
Paul Mundtb66c1a32006-01-16 22:14:15 -080025#include <asm/io_generic.h>
Magnus Damme7cc9a72008-02-07 20:18:21 +090026#include <asm/io_trapped.h>
Mark Brown915c9e12018-12-14 14:17:00 -080027#include <asm-generic/pci_iomap.h>
Paul Mundtb7e68d62012-03-29 16:05:10 +090028#include <mach/mangle-port.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Paul Mundt14866542008-10-04 05:25:52 +090030#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
31#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
32#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
33#define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Paul Mundt14866542008-10-04 05:25:52 +090035#define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
36#define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
37#define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
38#define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Paul Mundtb7e68d62012-03-29 16:05:10 +090040#define readb_relaxed(c) ({ u8 __v = ioswabb(__raw_readb(c)); __v; })
41#define readw_relaxed(c) ({ u16 __v = ioswabw(__raw_readw(c)); __v; })
42#define readl_relaxed(c) ({ u32 __v = ioswabl(__raw_readl(c)); __v; })
43#define readq_relaxed(c) ({ u64 __v = ioswabq(__raw_readq(c)); __v; })
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Paul Mundtb7e68d62012-03-29 16:05:10 +090045#define writeb_relaxed(v,c) ((void)__raw_writeb((__force u8)ioswabb(v),c))
46#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)ioswabw(v),c))
47#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)ioswabl(v),c))
48#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)ioswabq(v),c))
Paul Mundt37b7a972010-11-01 09:49:04 -040049
50#define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
51#define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
52#define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; })
53#define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; })
54
55#define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); })
56#define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); })
57#define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); })
58#define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); })
59
60#define readsb(p,d,l) __raw_readsb(p,d,l)
61#define readsw(p,d,l) __raw_readsw(p,d,l)
62#define readsl(p,d,l) __raw_readsl(p,d,l)
63
64#define writesb(p,d,l) __raw_writesb(p,d,l)
65#define writesw(p,d,l) __raw_writesw(p,d,l)
66#define writesl(p,d,l) __raw_writesl(p,d,l)
67
68#define __BUILD_UNCACHED_IO(bwlq, type) \
69static inline type read##bwlq##_uncached(unsigned long addr) \
70{ \
71 type ret; \
72 jump_to_uncached(); \
73 ret = __raw_read##bwlq(addr); \
74 back_to_cached(); \
75 return ret; \
76} \
77 \
78static inline void write##bwlq##_uncached(type v, unsigned long addr) \
79{ \
80 jump_to_uncached(); \
81 __raw_write##bwlq(v, addr); \
82 back_to_cached(); \
83}
84
85__BUILD_UNCACHED_IO(b, u8)
86__BUILD_UNCACHED_IO(w, u16)
87__BUILD_UNCACHED_IO(l, u32)
88__BUILD_UNCACHED_IO(q, u64)
89
90#define __BUILD_MEMORY_STRING(pfx, bwlq, type) \
91 \
92static inline void \
93pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \
94 unsigned int count) \
95{ \
96 const volatile type *__addr = addr; \
97 \
98 while (count--) { \
99 __raw_write##bwlq(*__addr, mem); \
100 __addr++; \
101 } \
102} \
103 \
104static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
105 void *addr, unsigned int count) \
106{ \
107 volatile type *__addr = addr; \
108 \
109 while (count--) { \
110 *__addr = __raw_read##bwlq(mem); \
111 __addr++; \
112 } \
113}
114
115__BUILD_MEMORY_STRING(__raw_, b, u8)
116__BUILD_MEMORY_STRING(__raw_, w, u16)
117
118#ifdef CONFIG_SUPERH32
119void __raw_writesl(void __iomem *addr, const void *data, int longlen);
120void __raw_readsl(const void __iomem *addr, void *data, int longlen);
121#else
122__BUILD_MEMORY_STRING(__raw_, l, u32)
123#endif
124
125__BUILD_MEMORY_STRING(__raw_, q, u64)
126
Uwe Kleine-Königce816fa2014-04-07 15:39:19 -0700127#ifdef CONFIG_HAS_IOPORT_MAP
Paul Mundt37b7a972010-11-01 09:49:04 -0400128
129/*
130 * Slowdown I/O port space accesses for antique hardware.
131 */
132#undef CONF_SLOWDOWN_IO
133
134/*
135 * On SuperH I/O ports are memory mapped, so we access them using normal
136 * load/store instructions. sh_io_port_base is the virtual address to
137 * which all ports are being mapped.
138 */
Andi Kleen666e81f2012-10-04 17:11:41 -0700139extern unsigned long sh_io_port_base;
Paul Mundt37b7a972010-11-01 09:49:04 -0400140
141static inline void __set_io_port_base(unsigned long pbase)
142{
143 *(unsigned long *)&sh_io_port_base = pbase;
144 barrier();
145}
146
147#ifdef CONFIG_GENERIC_IOMAP
148#define __ioport_map ioport_map
149#else
150extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
151#endif
152
153#ifdef CONF_SLOWDOWN_IO
154#define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
155#else
156#define SLOW_DOWN_IO
157#endif
158
159#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
160 \
161static inline void pfx##out##bwlq##p(type val, unsigned long port) \
162{ \
163 volatile type *__addr; \
164 \
165 __addr = __ioport_map(port, sizeof(type)); \
166 *__addr = val; \
167 slow; \
168} \
169 \
170static inline type pfx##in##bwlq##p(unsigned long port) \
171{ \
172 volatile type *__addr; \
173 type __val; \
174 \
175 __addr = __ioport_map(port, sizeof(type)); \
176 __val = *__addr; \
177 slow; \
178 \
179 return __val; \
180}
181
182#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
183 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
184 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
185
186#define BUILDIO_IOPORT(bwlq, type) \
187 __BUILD_IOPORT_PFX(, bwlq, type)
188
189BUILDIO_IOPORT(b, u8)
190BUILDIO_IOPORT(w, u16)
191BUILDIO_IOPORT(l, u32)
192BUILDIO_IOPORT(q, u64)
193
194#define __BUILD_IOPORT_STRING(bwlq, type) \
195 \
196static inline void outs##bwlq(unsigned long port, const void *addr, \
197 unsigned int count) \
198{ \
199 const volatile type *__addr = addr; \
200 \
201 while (count--) { \
202 out##bwlq(*__addr, port); \
203 __addr++; \
204 } \
205} \
206 \
207static inline void ins##bwlq(unsigned long port, void *addr, \
208 unsigned int count) \
209{ \
210 volatile type *__addr = addr; \
211 \
212 while (count--) { \
213 *__addr = in##bwlq(port); \
214 __addr++; \
215 } \
216}
217
218__BUILD_IOPORT_STRING(b, u8)
219__BUILD_IOPORT_STRING(w, u16)
220__BUILD_IOPORT_STRING(l, u32)
221__BUILD_IOPORT_STRING(q, u64)
222
Uwe Kleine-Königce816fa2014-04-07 15:39:19 -0700223#else /* !CONFIG_HAS_IOPORT_MAP */
Paul Mundtc5e50fa2012-05-10 13:07:55 +0900224
225#include <asm/io_noioport.h>
226
Paul Mundt37b7a972010-11-01 09:49:04 -0400227#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Paul Mundtc5e50fa2012-05-10 13:07:55 +0900229
Paul Mundt37b7a972010-11-01 09:49:04 -0400230#define IO_SPACE_LIMIT 0xffffffff
Paul Mundtb66c1a32006-01-16 22:14:15 -0800231
Paul Mundt14866542008-10-04 05:25:52 +0900232/* synco on SH-4A, otherwise a nop */
233#define mmiowb() wmb()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235/* We really want to try and get these to memcpy etc */
Paul Mundt14866542008-10-04 05:25:52 +0900236void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
237void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
238void memset_io(volatile void __iomem *, int, unsigned long);
Paul Mundt959f85f2006-09-27 16:43:28 +0900239
Paul Mundtac490a42007-11-20 18:26:28 +0900240/* Quad-word real-mode I/O, don't ask.. */
241unsigned long long peek_real_address_q(unsigned long long addr);
242unsigned long long poke_real_address_q(unsigned long long addr,
243 unsigned long long val);
244
Paul Mundtda06b8d2007-11-09 12:58:12 +0900245#if !defined(CONFIG_MMU)
246#define virt_to_phys(address) ((unsigned long)(address))
247#define phys_to_virt(address) ((void *)(address))
Stuart Menefyd02b08f2007-11-30 17:52:53 +0900248#else
Paul Mundtda06b8d2007-11-09 12:58:12 +0900249#define virt_to_phys(address) (__pa(address))
250#define phys_to_virt(address) (__va(address))
Yoshinori Satoa2d1a5f2006-09-27 17:25:07 +0900251#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253/*
Paul Mundtda06b8d2007-11-09 12:58:12 +0900254 * On 32-bit SH, we traditionally have the whole physical address space
255 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
256 * not need to do anything but place the address in the proper segment.
257 * This is true for P1 and P2 addresses, as well as some P3 ones.
258 * However, most of the P3 addresses and newer cores using extended
259 * addressing need to map through page tables, so the ioremap()
260 * implementation becomes a bit more complicated.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 *
Paul Mundtda06b8d2007-11-09 12:58:12 +0900262 * See arch/sh/mm/ioremap.c for additional notes on this.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 *
264 * We cheat a bit and always return uncachable areas until we've fixed
Paul Mundtb66c1a32006-01-16 22:14:15 -0800265 * the drivers to handle caching properly.
Paul Mundtda06b8d2007-11-09 12:58:12 +0900266 *
267 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
268 * doesn't exist, so everything must go through page tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 */
Paul Mundtb66c1a32006-01-16 22:14:15 -0800270#ifdef CONFIG_MMU
Paul Mundt90e7d642010-02-23 16:20:53 +0900271void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
Paul Mundtd57d6402010-01-19 13:34:38 +0900272 pgprot_t prot, void *caller);
Paul Mundtb66c1a32006-01-16 22:14:15 -0800273void __iounmap(void __iomem *addr);
Paul Mundtccd80582008-04-25 12:58:40 +0900274
Paul Mundtb66c1a32006-01-16 22:14:15 -0800275static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900276__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
Paul Mundtbf3cded2009-12-14 14:23:41 +0900277{
Paul Mundtd57d6402010-01-19 13:34:38 +0900278 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
Paul Mundtbf3cded2009-12-14 14:23:41 +0900279}
280
281static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900282__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283{
Paul Mundta0ab3662010-01-13 18:31:48 +0900284#ifdef CONFIG_29BIT
Paul Mundt90e7d642010-02-23 16:20:53 +0900285 phys_addr_t last_addr = offset + size - 1;
Paul Mundtb66c1a32006-01-16 22:14:15 -0800286
287 /*
288 * For P1 and P2 space this is trivial, as everything is already
289 * mapped. Uncached access for P1 addresses are done through P2.
290 * In the P3 case or for addresses outside of the 29-bit space,
291 * mapping must be done by the PMB or by using page tables.
292 */
293 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
Paul Mundtefb3e342011-01-11 15:02:59 +0900294 u64 flags = pgprot_val(prot);
295
296 /*
297 * Anything using the legacy PTEA space attributes needs
298 * to be kicked down to page table mappings.
299 */
300 if (unlikely(flags & _PAGE_PCC_MASK))
301 return NULL;
302 if (unlikely(flags & _PAGE_CACHABLE))
Paul Mundtb66c1a32006-01-16 22:14:15 -0800303 return (void __iomem *)P1SEGADDR(offset);
304
305 return (void __iomem *)P2SEGADDR(offset);
306 }
Magnus Damm716777d2008-11-25 21:57:29 +0900307
308 /* P4 above the store queues are always mapped. */
309 if (unlikely(offset >= P3_ADDR_MAX))
310 return (void __iomem *)P4SEGADDR(offset);
Paul Mundtda06b8d2007-11-09 12:58:12 +0900311#endif
Paul Mundtb66c1a32006-01-16 22:14:15 -0800312
Paul Mundta0ab3662010-01-13 18:31:48 +0900313 return NULL;
314}
315
316static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900317__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
Paul Mundta0ab3662010-01-13 18:31:48 +0900318{
319 void __iomem *ret;
320
321 ret = __ioremap_trapped(offset, size);
322 if (ret)
323 return ret;
324
Paul Mundtd57d6402010-01-19 13:34:38 +0900325 ret = __ioremap_29bit(offset, size, prot);
Paul Mundta0ab3662010-01-13 18:31:48 +0900326 if (ret)
327 return ret;
328
Paul Mundtd57d6402010-01-19 13:34:38 +0900329 return __ioremap(offset, size, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
Magnus Damme6be3a22009-04-30 12:56:37 +0900331#else
Paul Mundtd57d6402010-01-19 13:34:38 +0900332#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
333#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
Magnus Damme6be3a22009-04-30 12:56:37 +0900334#define __iounmap(addr) do { } while (0)
335#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Paul Mundt90e7d642010-02-23 16:20:53 +0900337static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
Paul Mundtd57d6402010-01-19 13:34:38 +0900338{
339 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
340}
341
342static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900343ioremap_cache(phys_addr_t offset, unsigned long size)
Paul Mundtd57d6402010-01-19 13:34:38 +0900344{
345 return __ioremap_mode(offset, size, PAGE_KERNEL);
346}
Dan Williams92281dee2015-08-10 23:07:06 -0400347#define ioremap_cache ioremap_cache
Paul Mundtd57d6402010-01-19 13:34:38 +0900348
Paul Mundt6d63e732010-01-19 14:00:14 +0900349#ifdef CONFIG_HAVE_IOREMAP_PROT
Paul Mundtd57d6402010-01-19 13:34:38 +0900350static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900351ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
Paul Mundtd57d6402010-01-19 13:34:38 +0900352{
353 return __ioremap_mode(offset, size, __pgprot(flags));
354}
Paul Mundt6d63e732010-01-19 14:00:14 +0900355#endif
Paul Mundtd57d6402010-01-19 13:34:38 +0900356
Paul Mundtd627a2e2010-01-28 18:17:29 +0900357#ifdef CONFIG_IOREMAP_FIXED
Paul Mundt90e7d642010-02-23 16:20:53 +0900358extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
Paul Mundtd627a2e2010-01-28 18:17:29 +0900359extern int iounmap_fixed(void __iomem *);
360extern void ioremap_fixed_init(void);
361#else
362static inline void __iomem *
Paul Mundt90e7d642010-02-23 16:20:53 +0900363ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
Paul Mundtd627a2e2010-01-28 18:17:29 +0900364{
365 BUG();
366 return NULL;
367}
368
369static inline void ioremap_fixed_init(void) { }
370static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
371#endif
372
Paul Mundtd57d6402010-01-19 13:34:38 +0900373#define ioremap_nocache ioremap
Luis R. Rodriguez4c73e892015-07-28 20:17:13 +0200374#define ioremap_uc ioremap
Paul Mundtd57d6402010-01-19 13:34:38 +0900375#define iounmap __iounmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
379 * access
380 */
381#define xlate_dev_mem_ptr(p) __va(p)
382
383/*
384 * Convert a virtual cached pointer to an uncached pointer
385 */
386#define xlate_dev_kmem_ptr(p) p
387
Paul Mundt185aed72008-11-12 12:53:48 +0900388#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400389int valid_phys_addr_range(phys_addr_t addr, size_t size);
Paul Mundt185aed72008-11-12 12:53:48 +0900390int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392#endif /* __KERNEL__ */
393
394#endif /* __ASM_SH_IO_H */