blob: bcee6365dca02581f5e5f79c8209b7c241fe94eb [file] [log] [blame]
Arnd Bergmann3f7e2122009-05-13 22:56:35 +00001/* Generic I/O port emulation, based on MN10300 code
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef __ASM_GENERIC_IO_H
12#define __ASM_GENERIC_IO_H
13
14#include <asm/page.h> /* I/O is all done through memory accesses */
15#include <asm/cacheflush.h>
16#include <linux/types.h>
17
18#ifdef CONFIG_GENERIC_IOMAP
19#include <asm-generic/iomap.h>
20#endif
21
22#define mmiowb() do {} while (0)
23
24/*****************************************************************************/
25/*
26 * readX/writeX() are used to access memory mapped devices. On some
27 * architectures the memory mapped IO stuff needs to be accessed
28 * differently. On the simple architectures, we just read/write the
29 * memory location directly.
30 */
31static inline u8 __raw_readb(const volatile void __iomem *addr)
32{
33 return *(const volatile u8 __force *) addr;
34}
35
36static inline u16 __raw_readw(const volatile void __iomem *addr)
37{
38 return *(const volatile u16 __force *) addr;
39}
40
41static inline u32 __raw_readl(const volatile void __iomem *addr)
42{
43 return *(const volatile u32 __force *) addr;
44}
45
46#define readb __raw_readb
47#define readw(addr) __le16_to_cpu(__raw_readw(addr))
48#define readl(addr) __le32_to_cpu(__raw_readl(addr))
49
50static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
51{
52 *(volatile u8 __force *) addr = b;
53}
54
55static inline void __raw_writew(u16 b, volatile void __iomem *addr)
56{
57 *(volatile u16 __force *) addr = b;
58}
59
60static inline void __raw_writel(u32 b, volatile void __iomem *addr)
61{
62 *(volatile u32 __force *) addr = b;
63}
64
65#define writeb __raw_writeb
66#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
67#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
68
69#ifdef CONFIG_64BIT
70static inline u64 __raw_readq(const volatile void __iomem *addr)
71{
72 return *(const volatile u64 __force *) addr;
73}
74#define readq(addr) __le64_to_cpu(__raw_readq(addr))
75
76static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
77{
78 *(volatile u64 __force *) addr = b;
79}
80#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
81#endif
82
83/*****************************************************************************/
84/*
85 * traditional input/output functions
86 */
87
88static inline u8 inb(unsigned long addr)
89{
90 return readb((volatile void __iomem *) addr);
91}
92
93static inline u16 inw(unsigned long addr)
94{
95 return readw((volatile void __iomem *) addr);
96}
97
98static inline u32 inl(unsigned long addr)
99{
100 return readl((volatile void __iomem *) addr);
101}
102
103static inline void outb(u8 b, unsigned long addr)
104{
105 writeb(b, (volatile void __iomem *) addr);
106}
107
108static inline void outw(u16 b, unsigned long addr)
109{
110 writew(b, (volatile void __iomem *) addr);
111}
112
113static inline void outl(u32 b, unsigned long addr)
114{
115 writel(b, (volatile void __iomem *) addr);
116}
117
118#define inb_p(addr) inb(addr)
119#define inw_p(addr) inw(addr)
120#define inl_p(addr) inl(addr)
121#define outb_p(x, addr) outb((x), (addr))
122#define outw_p(x, addr) outw((x), (addr))
123#define outl_p(x, addr) outl((x), (addr))
124
125static inline void insb(unsigned long addr, void *buffer, int count)
126{
127 if (count) {
128 u8 *buf = buffer;
129 do {
130 u8 x = inb(addr);
131 *buf++ = x;
132 } while (--count);
133 }
134}
135
136static inline void insw(unsigned long addr, void *buffer, int count)
137{
138 if (count) {
139 u16 *buf = buffer;
140 do {
141 u16 x = inw(addr);
142 *buf++ = x;
143 } while (--count);
144 }
145}
146
147static inline void insl(unsigned long addr, void *buffer, int count)
148{
149 if (count) {
150 u32 *buf = buffer;
151 do {
152 u32 x = inl(addr);
153 *buf++ = x;
154 } while (--count);
155 }
156}
157
158static inline void outsb(unsigned long addr, const void *buffer, int count)
159{
160 if (count) {
161 const u8 *buf = buffer;
162 do {
163 outb(*buf++, addr);
164 } while (--count);
165 }
166}
167
168static inline void outsw(unsigned long addr, const void *buffer, int count)
169{
170 if (count) {
171 const u16 *buf = buffer;
172 do {
173 outw(*buf++, addr);
174 } while (--count);
175 }
176}
177
178static inline void outsl(unsigned long addr, const void *buffer, int count)
179{
180 if (count) {
181 const u32 *buf = buffer;
182 do {
183 outl(*buf++, addr);
184 } while (--count);
185 }
186}
187
188#ifndef CONFIG_GENERIC_IOMAP
189#define ioread8(addr) readb(addr)
190#define ioread16(addr) readw(addr)
191#define ioread32(addr) readl(addr)
192
193#define iowrite8(v, addr) writeb((v), (addr))
194#define iowrite16(v, addr) writew((v), (addr))
195#define iowrite32(v, addr) writel((v), (addr))
196
197#define ioread8_rep(p, dst, count) \
198 insb((unsigned long) (p), (dst), (count))
199#define ioread16_rep(p, dst, count) \
200 insw((unsigned long) (p), (dst), (count))
201#define ioread32_rep(p, dst, count) \
202 insl((unsigned long) (p), (dst), (count))
203
204#define iowrite8_rep(p, src, count) \
205 outsb((unsigned long) (p), (src), (count))
206#define iowrite16_rep(p, src, count) \
207 outsw((unsigned long) (p), (src), (count))
208#define iowrite32_rep(p, src, count) \
209 outsl((unsigned long) (p), (src), (count))
210#endif /* CONFIG_GENERIC_IOMAP */
211
212
213#define IO_SPACE_LIMIT 0xffffffff
214
215#ifdef __KERNEL__
216
217#include <linux/vmalloc.h>
218#define __io_virt(x) ((void __force *) (x))
219
220#ifndef CONFIG_GENERIC_IOMAP
221/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
222struct pci_dev;
223extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
224static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
225{
226}
227#endif /* CONFIG_GENERIC_IOMAP */
228
229/*
230 * Change virtual addresses to physical addresses and vv.
231 * These are pretty trivial
232 */
233static inline unsigned long virt_to_phys(volatile void *address)
234{
235 return __pa((unsigned long)address);
236}
237
238static inline void *phys_to_virt(unsigned long address)
239{
240 return __va(address);
241}
242
243/*
244 * Change "struct page" to physical address.
245 */
246static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
247{
248 return (void __iomem*) (unsigned long)offset;
249}
250
251#define __ioremap(offset, size, flags) ioremap(offset, size)
252
253#ifndef ioremap_nocache
254#define ioremap_nocache ioremap
255#endif
256
257#ifndef ioremap_wc
258#define ioremap_wc ioremap_nocache
259#endif
260
261static inline void iounmap(void *addr)
262{
263}
264
265#ifndef CONFIG_GENERIC_IOMAP
266static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
267{
268 return (void __iomem *) port;
269}
270
271static inline void ioport_unmap(void __iomem *p)
272{
273}
274#else /* CONFIG_GENERIC_IOMAP */
275extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
276extern void ioport_unmap(void __iomem *p);
277#endif /* CONFIG_GENERIC_IOMAP */
278
279#define xlate_dev_kmem_ptr(p) p
280#define xlate_dev_mem_ptr(p) ((void *) (p))
281
282#ifndef virt_to_bus
283static inline unsigned long virt_to_bus(volatile void *address)
284{
285 return ((unsigned long) address);
286}
287
288static inline void *bus_to_virt(unsigned long address)
289{
290 return (void *) address;
291}
292#endif
293
294#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
295#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
296#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
297
298#endif /* __KERNEL__ */
299
300#endif /* __ASM_GENERIC_IO_H */