blob: eb9c7489a999bba1e53c1071b821d038325cbe71 [file] [log] [blame]
Russell Kinga09e64f2008-08-05 16:14:15 +01001/*
2 * arch/arm/mach-ixp4xx/include/mach/io.h
3 *
4 * Author: Deepak Saxena <dsaxena@plexity.net>
5 *
6 * Copyright (C) 2002-2005 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __ASM_ARM_ARCH_IO_H
14#define __ASM_ARM_ARCH_IO_H
15
16#include <linux/bitops.h>
17
18#include <mach/hardware.h>
19
Mikael Petterssondee2b902009-08-09 21:21:57 +020020#define IO_SPACE_LIMIT 0x0000ffff
Russell Kinga09e64f2008-08-05 16:14:15 +010021
22extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
23extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
24
25
26/*
27 * IXP4xx provides two methods of accessing PCI memory space:
28 *
29 * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
30 * To access PCI via this space, we simply ioremap() the BAR
31 * into the kernel and we can use the standard read[bwl]/write[bwl]
32 * macros. This is the preffered method due to speed but it
33 * limits the system to just 64MB of PCI memory. This can be
34 * problamatic if using video cards and other memory-heavy
35 * targets.
36 *
37 * 2) If > 64MB of memory space is required, the IXP4xx can be configured
38 * to use indirect registers to access PCI (as we do below for I/O
39 * transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
40 * of memory on the bus. The disadvantage of this is that every
41 * PCI access requires three local register accesses plus a spinlock,
42 * but in some cases the performance hit is acceptable. In addition,
43 * you cannot mmap() PCI devices in this case.
44 *
45 */
46#ifndef CONFIG_IXP4XX_INDIRECT_PCI
47
48#define __mem_pci(a) (a)
49
50#else
51
Russell Kinga09e64f2008-08-05 16:14:15 +010052/*
53 * In the case of using indirect PCI, we simply return the actual PCI
54 * address and our read/write implementation use that to drive the
55 * access registers. If something outside of PCI is ioremap'd, we
56 * fallback to the default.
57 */
58static inline void __iomem *
59__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned int mtype)
60{
61 if((addr < PCIBIOS_MIN_MEM) || (addr > 0x4fffffff))
62 return __arm_ioremap(addr, size, mtype);
63
64 return (void __iomem *)addr;
65}
66
67static inline void
68__ixp4xx_iounmap(void __iomem *addr)
69{
70 if ((__force u32)addr >= VMALLOC_START)
71 __iounmap(addr);
72}
73
74#define __arch_ioremap(a, s, f) __ixp4xx_ioremap(a, s, f)
75#define __arch_iounmap(a) __ixp4xx_iounmap(a)
76
77#define writeb(v, p) __ixp4xx_writeb(v, p)
78#define writew(v, p) __ixp4xx_writew(v, p)
79#define writel(v, p) __ixp4xx_writel(v, p)
80
81#define writesb(p, v, l) __ixp4xx_writesb(p, v, l)
82#define writesw(p, v, l) __ixp4xx_writesw(p, v, l)
83#define writesl(p, v, l) __ixp4xx_writesl(p, v, l)
84
85#define readb(p) __ixp4xx_readb(p)
86#define readw(p) __ixp4xx_readw(p)
87#define readl(p) __ixp4xx_readl(p)
88
89#define readsb(p, v, l) __ixp4xx_readsb(p, v, l)
90#define readsw(p, v, l) __ixp4xx_readsw(p, v, l)
91#define readsl(p, v, l) __ixp4xx_readsl(p, v, l)
92
93static inline void
94__ixp4xx_writeb(u8 value, volatile void __iomem *p)
95{
96 u32 addr = (u32)p;
97 u32 n, byte_enables, data;
98
99 if (addr >= VMALLOC_START) {
100 __raw_writeb(value, addr);
101 return;
102 }
103
104 n = addr % 4;
105 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
106 data = value << (8*n);
107 ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
108}
109
110static inline void
111__ixp4xx_writesb(volatile void __iomem *bus_addr, const u8 *vaddr, int count)
112{
113 while (count--)
114 writeb(*vaddr++, bus_addr);
115}
116
117static inline void
118__ixp4xx_writew(u16 value, volatile void __iomem *p)
119{
120 u32 addr = (u32)p;
121 u32 n, byte_enables, data;
122
123 if (addr >= VMALLOC_START) {
124 __raw_writew(value, addr);
125 return;
126 }
127
128 n = addr % 4;
129 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
130 data = value << (8*n);
131 ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
132}
133
134static inline void
135__ixp4xx_writesw(volatile void __iomem *bus_addr, const u16 *vaddr, int count)
136{
137 while (count--)
138 writew(*vaddr++, bus_addr);
139}
140
141static inline void
142__ixp4xx_writel(u32 value, volatile void __iomem *p)
143{
144 u32 addr = (__force u32)p;
145 if (addr >= VMALLOC_START) {
146 __raw_writel(value, p);
147 return;
148 }
149
150 ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
151}
152
153static inline void
154__ixp4xx_writesl(volatile void __iomem *bus_addr, const u32 *vaddr, int count)
155{
156 while (count--)
157 writel(*vaddr++, bus_addr);
158}
159
160static inline unsigned char
161__ixp4xx_readb(const volatile void __iomem *p)
162{
163 u32 addr = (u32)p;
164 u32 n, byte_enables, data;
165
166 if (addr >= VMALLOC_START)
167 return __raw_readb(addr);
168
169 n = addr % 4;
170 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
171 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
172 return 0xff;
173
174 return data >> (8*n);
175}
176
177static inline void
178__ixp4xx_readsb(const volatile void __iomem *bus_addr, u8 *vaddr, u32 count)
179{
180 while (count--)
181 *vaddr++ = readb(bus_addr);
182}
183
184static inline unsigned short
185__ixp4xx_readw(const volatile void __iomem *p)
186{
187 u32 addr = (u32)p;
188 u32 n, byte_enables, data;
189
190 if (addr >= VMALLOC_START)
191 return __raw_readw(addr);
192
193 n = addr % 4;
194 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
195 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
196 return 0xffff;
197
198 return data>>(8*n);
199}
200
201static inline void
202__ixp4xx_readsw(const volatile void __iomem *bus_addr, u16 *vaddr, u32 count)
203{
204 while (count--)
205 *vaddr++ = readw(bus_addr);
206}
207
208static inline unsigned long
209__ixp4xx_readl(const volatile void __iomem *p)
210{
211 u32 addr = (__force u32)p;
212 u32 data;
213
214 if (addr >= VMALLOC_START)
215 return __raw_readl(p);
216
217 if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
218 return 0xffffffff;
219
220 return data;
221}
222
223static inline void
224__ixp4xx_readsl(const volatile void __iomem *bus_addr, u32 *vaddr, u32 count)
225{
226 while (count--)
227 *vaddr++ = readl(bus_addr);
228}
229
230
231/*
232 * We can use the built-in functions b/c they end up calling writeb/readb
233 */
234#define memset_io(c,v,l) _memset_io((c),(v),(l))
235#define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
236#define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
237
238#endif
239
240#ifndef CONFIG_PCI
241
Russell King0560cf52008-11-30 11:45:54 +0000242#define __io(v) __typesafe_io(v)
Russell Kinga09e64f2008-08-05 16:14:15 +0100243
244#else
245
246/*
247 * IXP4xx does not have a transparent cpu -> PCI I/O translation
248 * window. Instead, it has a set of registers that must be tweaked
249 * with the proper byte lanes, command types, and address for the
250 * transaction. This means that we need to override the default
251 * I/O functions.
252 */
253#define outb(p, v) __ixp4xx_outb(p, v)
254#define outw(p, v) __ixp4xx_outw(p, v)
255#define outl(p, v) __ixp4xx_outl(p, v)
256
257#define outsb(p, v, l) __ixp4xx_outsb(p, v, l)
258#define outsw(p, v, l) __ixp4xx_outsw(p, v, l)
259#define outsl(p, v, l) __ixp4xx_outsl(p, v, l)
260
261#define inb(p) __ixp4xx_inb(p)
262#define inw(p) __ixp4xx_inw(p)
263#define inl(p) __ixp4xx_inl(p)
264
265#define insb(p, v, l) __ixp4xx_insb(p, v, l)
266#define insw(p, v, l) __ixp4xx_insw(p, v, l)
267#define insl(p, v, l) __ixp4xx_insl(p, v, l)
268
269
270static inline void
271__ixp4xx_outb(u8 value, u32 addr)
272{
273 u32 n, byte_enables, data;
274 n = addr % 4;
275 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
276 data = value << (8*n);
277 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
278}
279
280static inline void
281__ixp4xx_outsb(u32 io_addr, const u8 *vaddr, u32 count)
282{
283 while (count--)
284 outb(*vaddr++, io_addr);
285}
286
287static inline void
288__ixp4xx_outw(u16 value, u32 addr)
289{
290 u32 n, byte_enables, data;
291 n = addr % 4;
292 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
293 data = value << (8*n);
294 ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
295}
296
297static inline void
298__ixp4xx_outsw(u32 io_addr, const u16 *vaddr, u32 count)
299{
300 while (count--)
301 outw(cpu_to_le16(*vaddr++), io_addr);
302}
303
304static inline void
305__ixp4xx_outl(u32 value, u32 addr)
306{
307 ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
308}
309
310static inline void
311__ixp4xx_outsl(u32 io_addr, const u32 *vaddr, u32 count)
312{
313 while (count--)
Krzysztof Hałasa9f2c9492009-11-11 00:21:48 +0100314 outl(cpu_to_le32(*vaddr++), io_addr);
Russell Kinga09e64f2008-08-05 16:14:15 +0100315}
316
317static inline u8
318__ixp4xx_inb(u32 addr)
319{
320 u32 n, byte_enables, data;
321 n = addr % 4;
322 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
323 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
324 return 0xff;
325
326 return data >> (8*n);
327}
328
329static inline void
330__ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
331{
332 while (count--)
333 *vaddr++ = inb(io_addr);
334}
335
336static inline u16
337__ixp4xx_inw(u32 addr)
338{
339 u32 n, byte_enables, data;
340 n = addr % 4;
341 byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
342 if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
343 return 0xffff;
344
345 return data>>(8*n);
346}
347
348static inline void
349__ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
350{
351 while (count--)
352 *vaddr++ = le16_to_cpu(inw(io_addr));
353}
354
355static inline u32
356__ixp4xx_inl(u32 addr)
357{
358 u32 data;
359 if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
360 return 0xffffffff;
361
362 return data;
363}
364
365static inline void
366__ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
367{
368 while (count--)
Krzysztof Hałasa9f2c9492009-11-11 00:21:48 +0100369 *vaddr++ = le32_to_cpu(inl(io_addr));
Russell Kinga09e64f2008-08-05 16:14:15 +0100370}
371
372#define PIO_OFFSET 0x10000UL
373#define PIO_MASK 0x0ffffUL
374
375#define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
376 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
Krzysztof Hałasa9f2c9492009-11-11 00:21:48 +0100377
Russell Kinga09e64f2008-08-05 16:14:15 +0100378static inline unsigned int
379__ixp4xx_ioread8(const void __iomem *addr)
380{
381 unsigned long port = (unsigned long __force)addr;
382 if (__is_io_address(port))
Krzysztof Hałasa9f2c9492009-11-11 00:21:48 +0100383 return (unsigned int)__ixp4xx_inb(port & PIO_MASK);
Russell Kinga09e64f2008-08-05 16:14:15 +0100384 else
385#ifndef CONFIG_IXP4XX_INDIRECT_PCI
386 return (unsigned int)__raw_readb(port);
387#else
388 return (unsigned int)__ixp4xx_readb(addr);
389#endif
390}
391
392static inline void
393__ixp4xx_ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
394{
395 unsigned long port = (unsigned long __force)addr;
396 if (__is_io_address(port))
397 __ixp4xx_insb(port & PIO_MASK, vaddr, count);
398 else
399#ifndef CONFIG_IXP4XX_INDIRECT_PCI
400 __raw_readsb(addr, vaddr, count);
401#else
402 __ixp4xx_readsb(addr, vaddr, count);
403#endif
404}
405
406static inline unsigned int
407__ixp4xx_ioread16(const void __iomem *addr)
408{
409 unsigned long port = (unsigned long __force)addr;
410 if (__is_io_address(port))
411 return (unsigned int)__ixp4xx_inw(port & PIO_MASK);
412 else
413#ifndef CONFIG_IXP4XX_INDIRECT_PCI
414 return le16_to_cpu(__raw_readw((u32)port));
415#else
416 return (unsigned int)__ixp4xx_readw(addr);
417#endif
418}
419
420static inline void
421__ixp4xx_ioread16_rep(const void __iomem *addr, void *vaddr, u32 count)
422{
423 unsigned long port = (unsigned long __force)addr;
424 if (__is_io_address(port))
425 __ixp4xx_insw(port & PIO_MASK, vaddr, count);
426 else
427#ifndef CONFIG_IXP4XX_INDIRECT_PCI
428 __raw_readsw(addr, vaddr, count);
429#else
430 __ixp4xx_readsw(addr, vaddr, count);
431#endif
432}
433
434static inline unsigned int
435__ixp4xx_ioread32(const void __iomem *addr)
436{
437 unsigned long port = (unsigned long __force)addr;
438 if (__is_io_address(port))
439 return (unsigned int)__ixp4xx_inl(port & PIO_MASK);
440 else {
441#ifndef CONFIG_IXP4XX_INDIRECT_PCI
442 return le32_to_cpu((__force __le32)__raw_readl(addr));
443#else
444 return (unsigned int)__ixp4xx_readl(addr);
445#endif
446 }
447}
448
449static inline void
450__ixp4xx_ioread32_rep(const void __iomem *addr, void *vaddr, u32 count)
451{
452 unsigned long port = (unsigned long __force)addr;
453 if (__is_io_address(port))
454 __ixp4xx_insl(port & PIO_MASK, vaddr, count);
455 else
456#ifndef CONFIG_IXP4XX_INDIRECT_PCI
457 __raw_readsl(addr, vaddr, count);
458#else
459 __ixp4xx_readsl(addr, vaddr, count);
460#endif
461}
462
463static inline void
464__ixp4xx_iowrite8(u8 value, void __iomem *addr)
465{
466 unsigned long port = (unsigned long __force)addr;
467 if (__is_io_address(port))
468 __ixp4xx_outb(value, port & PIO_MASK);
469 else
470#ifndef CONFIG_IXP4XX_INDIRECT_PCI
471 __raw_writeb(value, port);
472#else
473 __ixp4xx_writeb(value, addr);
474#endif
475}
476
477static inline void
478__ixp4xx_iowrite8_rep(void __iomem *addr, const void *vaddr, u32 count)
479{
480 unsigned long port = (unsigned long __force)addr;
481 if (__is_io_address(port))
482 __ixp4xx_outsb(port & PIO_MASK, vaddr, count);
483 else
484#ifndef CONFIG_IXP4XX_INDIRECT_PCI
485 __raw_writesb(addr, vaddr, count);
486#else
487 __ixp4xx_writesb(addr, vaddr, count);
488#endif
489}
490
491static inline void
492__ixp4xx_iowrite16(u16 value, void __iomem *addr)
493{
494 unsigned long port = (unsigned long __force)addr;
495 if (__is_io_address(port))
496 __ixp4xx_outw(value, port & PIO_MASK);
497 else
498#ifndef CONFIG_IXP4XX_INDIRECT_PCI
499 __raw_writew(cpu_to_le16(value), addr);
500#else
501 __ixp4xx_writew(value, addr);
502#endif
503}
504
505static inline void
506__ixp4xx_iowrite16_rep(void __iomem *addr, const void *vaddr, u32 count)
507{
508 unsigned long port = (unsigned long __force)addr;
509 if (__is_io_address(port))
510 __ixp4xx_outsw(port & PIO_MASK, vaddr, count);
511 else
512#ifndef CONFIG_IXP4XX_INDIRECT_PCI
513 __raw_writesw(addr, vaddr, count);
514#else
515 __ixp4xx_writesw(addr, vaddr, count);
516#endif
517}
518
519static inline void
520__ixp4xx_iowrite32(u32 value, void __iomem *addr)
521{
522 unsigned long port = (unsigned long __force)addr;
523 if (__is_io_address(port))
524 __ixp4xx_outl(value, port & PIO_MASK);
525 else
526#ifndef CONFIG_IXP4XX_INDIRECT_PCI
527 __raw_writel((u32 __force)cpu_to_le32(value), addr);
528#else
529 __ixp4xx_writel(value, addr);
530#endif
531}
532
533static inline void
534__ixp4xx_iowrite32_rep(void __iomem *addr, const void *vaddr, u32 count)
535{
536 unsigned long port = (unsigned long __force)addr;
537 if (__is_io_address(port))
538 __ixp4xx_outsl(port & PIO_MASK, vaddr, count);
539 else
540#ifndef CONFIG_IXP4XX_INDIRECT_PCI
541 __raw_writesl(addr, vaddr, count);
542#else
543 __ixp4xx_writesl(addr, vaddr, count);
544#endif
545}
546
547#define ioread8(p) __ixp4xx_ioread8(p)
548#define ioread16(p) __ixp4xx_ioread16(p)
549#define ioread32(p) __ixp4xx_ioread32(p)
550
551#define ioread8_rep(p, v, c) __ixp4xx_ioread8_rep(p, v, c)
552#define ioread16_rep(p, v, c) __ixp4xx_ioread16_rep(p, v, c)
553#define ioread32_rep(p, v, c) __ixp4xx_ioread32_rep(p, v, c)
554
555#define iowrite8(v,p) __ixp4xx_iowrite8(v,p)
556#define iowrite16(v,p) __ixp4xx_iowrite16(v,p)
557#define iowrite32(v,p) __ixp4xx_iowrite32(v,p)
558
559#define iowrite8_rep(p, v, c) __ixp4xx_iowrite8_rep(p, v, c)
560#define iowrite16_rep(p, v, c) __ixp4xx_iowrite16_rep(p, v, c)
561#define iowrite32_rep(p, v, c) __ixp4xx_iowrite32_rep(p, v, c)
562
563#define ioport_map(port, nr) ((void __iomem*)(port + PIO_OFFSET))
564#define ioport_unmap(addr)
565#endif // !CONFIG_PCI
566
567#endif // __ASM_ARM_ARCH_IO_H
568