blob: 4990c736bc4b6864df839b45b85dcfa8b3d361e2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Implement the default iomap interfaces
3 *
4 * (C) Copyright 2004 Linus Torvalds
5 */
6#include <linux/pci.h>
Tejun Heo9ac78492007-01-20 16:00:26 +09007#include <linux/io.h>
8
9#ifdef CONFIG_GENERIC_IOMAP
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12/*
13 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
14 * access or a MMIO access, these functions don't care. The info is
15 * encoded in the hardware mapping set up by the mapping functions
16 * (or the cookie itself, depending on implementation and hw).
17 *
18 * The generic routines don't assume any hardware mappings, and just
19 * encode the PIO/MMIO as part of the cookie. They coldly assume that
20 * the MMIO IO mappings are not in the low address range.
21 *
22 * Architectures for which this is not true can't use this generic
23 * implementation and should do their own copy.
24 */
25
26#ifndef HAVE_ARCH_PIO_SIZE
27/*
28 * We encode the physical PIO addresses (0-0xffff) into the
29 * pointer by offsetting them with a constant (0x10000) and
30 * assuming that all the low addresses are always PIO. That means
31 * we can do some sanity checks on the low bits, and don't
32 * need to just take things for granted.
33 */
34#define PIO_OFFSET 0x10000UL
35#define PIO_MASK 0x0ffffUL
36#define PIO_RESERVED 0x40000UL
37#endif
38
39/*
40 * Ugly macros are a way of life.
41 */
42#define VERIFY_PIO(port) BUG_ON((port & ~PIO_MASK) != PIO_OFFSET)
43
44#define IO_COND(addr, is_pio, is_mmio) do { \
45 unsigned long port = (unsigned long __force)addr; \
46 if (port < PIO_RESERVED) { \
47 VERIFY_PIO(port); \
48 port &= PIO_MASK; \
49 is_pio; \
50 } else { \
51 is_mmio; \
52 } \
53} while (0)
54
Linus Torvalds34ba8a52006-11-11 17:24:46 +110055#ifndef pio_read16be
56#define pio_read16be(port) swab16(inw(port))
57#define pio_read32be(port) swab32(inl(port))
58#endif
59
60#ifndef mmio_read16be
61#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
62#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
63#endif
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065unsigned int fastcall ioread8(void __iomem *addr)
66{
67 IO_COND(addr, return inb(port), return readb(addr));
68}
69unsigned int fastcall ioread16(void __iomem *addr)
70{
71 IO_COND(addr, return inw(port), return readw(addr));
72}
James Bottomleydae409a2005-04-16 15:25:54 -070073unsigned int fastcall ioread16be(void __iomem *addr)
74{
Linus Torvalds34ba8a52006-11-11 17:24:46 +110075 IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
James Bottomleydae409a2005-04-16 15:25:54 -070076}
Linus Torvalds1da177e2005-04-16 15:20:36 -070077unsigned int fastcall ioread32(void __iomem *addr)
78{
79 IO_COND(addr, return inl(port), return readl(addr));
80}
James Bottomleydae409a2005-04-16 15:25:54 -070081unsigned int fastcall ioread32be(void __iomem *addr)
82{
Linus Torvalds34ba8a52006-11-11 17:24:46 +110083 IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
James Bottomleydae409a2005-04-16 15:25:54 -070084}
Linus Torvalds1da177e2005-04-16 15:20:36 -070085EXPORT_SYMBOL(ioread8);
86EXPORT_SYMBOL(ioread16);
James Bottomleydae409a2005-04-16 15:25:54 -070087EXPORT_SYMBOL(ioread16be);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088EXPORT_SYMBOL(ioread32);
James Bottomleydae409a2005-04-16 15:25:54 -070089EXPORT_SYMBOL(ioread32be);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Linus Torvalds34ba8a52006-11-11 17:24:46 +110091#ifndef pio_write16be
92#define pio_write16be(val,port) outw(swab16(val),port)
93#define pio_write32be(val,port) outl(swab32(val),port)
94#endif
95
96#ifndef mmio_write16be
97#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
98#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
99#endif
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101void fastcall iowrite8(u8 val, void __iomem *addr)
102{
103 IO_COND(addr, outb(val,port), writeb(val, addr));
104}
105void fastcall iowrite16(u16 val, void __iomem *addr)
106{
107 IO_COND(addr, outw(val,port), writew(val, addr));
108}
James Bottomleydae409a2005-04-16 15:25:54 -0700109void fastcall iowrite16be(u16 val, void __iomem *addr)
110{
Linus Torvalds34ba8a52006-11-11 17:24:46 +1100111 IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
James Bottomleydae409a2005-04-16 15:25:54 -0700112}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113void fastcall iowrite32(u32 val, void __iomem *addr)
114{
115 IO_COND(addr, outl(val,port), writel(val, addr));
116}
James Bottomleydae409a2005-04-16 15:25:54 -0700117void fastcall iowrite32be(u32 val, void __iomem *addr)
118{
Linus Torvalds34ba8a52006-11-11 17:24:46 +1100119 IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
James Bottomleydae409a2005-04-16 15:25:54 -0700120}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121EXPORT_SYMBOL(iowrite8);
122EXPORT_SYMBOL(iowrite16);
James Bottomleydae409a2005-04-16 15:25:54 -0700123EXPORT_SYMBOL(iowrite16be);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124EXPORT_SYMBOL(iowrite32);
James Bottomleydae409a2005-04-16 15:25:54 -0700125EXPORT_SYMBOL(iowrite32be);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127/*
128 * These are the "repeat MMIO read/write" functions.
129 * Note the "__raw" accesses, since we don't want to
130 * convert to CPU byte order. We write in "IO byte
131 * order" (we also don't have IO barriers).
132 */
Linus Torvalds34ba8a52006-11-11 17:24:46 +1100133#ifndef mmio_insb
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
135{
136 while (--count >= 0) {
137 u8 data = __raw_readb(addr);
138 *dst = data;
139 dst++;
140 }
141}
142static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
143{
144 while (--count >= 0) {
145 u16 data = __raw_readw(addr);
146 *dst = data;
147 dst++;
148 }
149}
150static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
151{
152 while (--count >= 0) {
153 u32 data = __raw_readl(addr);
154 *dst = data;
155 dst++;
156 }
157}
Linus Torvalds34ba8a52006-11-11 17:24:46 +1100158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Linus Torvalds34ba8a52006-11-11 17:24:46 +1100160#ifndef mmio_outsb
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
162{
163 while (--count >= 0) {
164 __raw_writeb(*src, addr);
165 src++;
166 }
167}
168static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
169{
170 while (--count >= 0) {
171 __raw_writew(*src, addr);
172 src++;
173 }
174}
175static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
176{
177 while (--count >= 0) {
178 __raw_writel(*src, addr);
179 src++;
180 }
181}
Linus Torvalds34ba8a52006-11-11 17:24:46 +1100182#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
184void fastcall ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
185{
186 IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
187}
188void fastcall ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
189{
190 IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
191}
192void fastcall ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
193{
194 IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
195}
196EXPORT_SYMBOL(ioread8_rep);
197EXPORT_SYMBOL(ioread16_rep);
198EXPORT_SYMBOL(ioread32_rep);
199
200void fastcall iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
201{
202 IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
203}
204void fastcall iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
205{
206 IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
207}
208void fastcall iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
209{
210 IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
211}
212EXPORT_SYMBOL(iowrite8_rep);
213EXPORT_SYMBOL(iowrite16_rep);
214EXPORT_SYMBOL(iowrite32_rep);
215
216/* Create a virtual mapping cookie for an IO port range */
217void __iomem *ioport_map(unsigned long port, unsigned int nr)
218{
219 if (port > PIO_MASK)
220 return NULL;
221 return (void __iomem *) (unsigned long) (port + PIO_OFFSET);
222}
223
224void ioport_unmap(void __iomem *addr)
225{
226 /* Nothing to do */
227}
228EXPORT_SYMBOL(ioport_map);
229EXPORT_SYMBOL(ioport_unmap);
230
231/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
232void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
233{
234 unsigned long start = pci_resource_start(dev, bar);
235 unsigned long len = pci_resource_len(dev, bar);
236 unsigned long flags = pci_resource_flags(dev, bar);
237
238 if (!len || !start)
239 return NULL;
240 if (maxlen && len > maxlen)
241 len = maxlen;
242 if (flags & IORESOURCE_IO)
243 return ioport_map(start, len);
244 if (flags & IORESOURCE_MEM) {
245 if (flags & IORESOURCE_CACHEABLE)
246 return ioremap(start, len);
247 return ioremap_nocache(start, len);
248 }
249 /* What? */
250 return NULL;
251}
252
253void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
254{
255 IO_COND(addr, /* nothing */, iounmap(addr));
256}
257EXPORT_SYMBOL(pci_iomap);
258EXPORT_SYMBOL(pci_iounmap);
Tejun Heo9ac78492007-01-20 16:00:26 +0900259
260#endif /* CONFIG_GENERIC_IOMAP */
261
262/*
263 * Generic iomap devres
264 */
265static void devm_ioport_map_release(struct device *dev, void *res)
266{
267 ioport_unmap(*(void __iomem **)res);
268}
269
270static int devm_ioport_map_match(struct device *dev, void *res,
271 void *match_data)
272{
273 return *(void **)res == match_data;
274}
275
276/**
277 * devm_ioport_map - Managed ioport_map()
278 * @dev: Generic device to map ioport for
279 * @port: Port to map
280 * @nr: Number of ports to map
281 *
282 * Managed ioport_map(). Map is automatically unmapped on driver
283 * detach.
284 */
285void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
286 unsigned int nr)
287{
288 void __iomem **ptr, *addr;
289
290 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
291 if (!ptr)
292 return NULL;
293
294 addr = ioport_map(port, nr);
295 if (addr) {
296 *ptr = addr;
297 devres_add(dev, ptr);
298 } else
299 devres_free(ptr);
300
301 return addr;
302}
303EXPORT_SYMBOL(devm_ioport_map);
304
305/**
306 * devm_ioport_unmap - Managed ioport_unmap()
307 * @dev: Generic device to unmap for
308 * @addr: Address to unmap
309 *
310 * Managed ioport_unmap(). @addr must have been mapped using
311 * devm_ioport_map().
312 */
313void devm_ioport_unmap(struct device *dev, void __iomem *addr)
314{
315 ioport_unmap(addr);
316 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
317 devm_ioport_map_match, (void *)addr));
318}
319EXPORT_SYMBOL(devm_ioport_unmap);
320
321static void devm_ioremap_release(struct device *dev, void *res)
322{
323 iounmap(*(void __iomem **)res);
324}
325
326static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
327{
328 return *(void **)res == match_data;
329}
330
331/**
332 * devm_ioremap - Managed ioremap()
333 * @dev: Generic device to remap IO address for
334 * @offset: BUS offset to map
335 * @size: Size of map
336 *
337 * Managed ioremap(). Map is automatically unmapped on driver detach.
338 */
339void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
340 unsigned long size)
341{
342 void __iomem **ptr, *addr;
343
344 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
345 if (!ptr)
346 return NULL;
347
348 addr = ioremap(offset, size);
349 if (addr) {
350 *ptr = addr;
351 devres_add(dev, ptr);
352 } else
353 devres_free(ptr);
354
355 return addr;
356}
357EXPORT_SYMBOL(devm_ioremap);
358
359/**
360 * devm_ioremap_nocache - Managed ioremap_nocache()
361 * @dev: Generic device to remap IO address for
362 * @offset: BUS offset to map
363 * @size: Size of map
364 *
365 * Managed ioremap_nocache(). Map is automatically unmapped on driver
366 * detach.
367 */
368void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
369 unsigned long size)
370{
371 void __iomem **ptr, *addr;
372
373 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
374 if (!ptr)
375 return NULL;
376
377 addr = ioremap_nocache(offset, size);
378 if (addr) {
379 *ptr = addr;
380 devres_add(dev, ptr);
381 } else
382 devres_free(ptr);
383
384 return addr;
385}
386EXPORT_SYMBOL(devm_ioremap_nocache);
387
388/**
389 * devm_iounmap - Managed iounmap()
390 * @dev: Generic device to unmap for
391 * @addr: Address to unmap
392 *
393 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
394 */
395void devm_iounmap(struct device *dev, void __iomem *addr)
396{
397 iounmap(addr);
398 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
399 (void *)addr));
400}
401EXPORT_SYMBOL(devm_iounmap);
402
403/*
404 * PCI iomap devres
405 */
406#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
407
408struct pcim_iomap_devres {
409 void __iomem *table[PCIM_IOMAP_MAX];
410};
411
412static void pcim_iomap_release(struct device *gendev, void *res)
413{
414 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
415 struct pcim_iomap_devres *this = res;
416 int i;
417
418 for (i = 0; i < PCIM_IOMAP_MAX; i++)
419 if (this->table[i])
420 pci_iounmap(dev, this->table[i]);
421}
422
423/**
424 * pcim_iomap_table - access iomap allocation table
425 * @pdev: PCI device to access iomap table for
426 *
427 * Access iomap allocation table for @dev. If iomap table doesn't
428 * exist and @pdev is managed, it will be allocated. All iomaps
429 * recorded in the iomap table are automatically unmapped on driver
430 * detach.
431 *
432 * This function might sleep when the table is first allocated but can
433 * be safely called without context and guaranteed to succed once
434 * allocated.
435 */
436void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
437{
438 struct pcim_iomap_devres *dr, *new_dr;
439
440 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
441 if (dr)
442 return dr->table;
443
444 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
445 if (!new_dr)
446 return NULL;
447 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
448 return dr->table;
449}
450EXPORT_SYMBOL(pcim_iomap_table);
451
452/**
453 * pcim_iomap - Managed pcim_iomap()
454 * @pdev: PCI device to iomap for
455 * @bar: BAR to iomap
456 * @maxlen: Maximum length of iomap
457 *
458 * Managed pci_iomap(). Map is automatically unmapped on driver
459 * detach.
460 */
461void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
462{
463 void __iomem **tbl;
464
465 BUG_ON(bar >= PCIM_IOMAP_MAX);
466
467 tbl = (void __iomem **)pcim_iomap_table(pdev);
468 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
469 return NULL;
470
471 tbl[bar] = pci_iomap(pdev, bar, maxlen);
472 return tbl[bar];
473}
474EXPORT_SYMBOL(pcim_iomap);
475
476/**
477 * pcim_iounmap - Managed pci_iounmap()
478 * @pdev: PCI device to iounmap for
479 * @addr: Address to unmap
480 *
481 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
482 */
483void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
484{
485 void __iomem **tbl;
486 int i;
487
488 pci_iounmap(pdev, addr);
489
490 tbl = (void __iomem **)pcim_iomap_table(pdev);
491 BUG_ON(!tbl);
492
493 for (i = 0; i < PCIM_IOMAP_MAX; i++)
494 if (tbl[i] == addr) {
495 tbl[i] = NULL;
496 return;
497 }
498 WARN_ON(1);
499}
500EXPORT_SYMBOL(pcim_iounmap);
Tejun Heod24bbbf2007-01-20 16:00:28 +0900501
502/**
503 * pcim_iomap_regions - Request and iomap PCI BARs
504 * @pdev: PCI device to map IO resources for
505 * @mask: Mask of BARs to request and iomap
506 * @name: Name used when requesting regions
507 *
508 * Request and iomap regions specified by @mask.
509 */
510int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
511{
512 void __iomem * const *iomap;
513 int i, rc;
514
515 iomap = pcim_iomap_table(pdev);
516 if (!iomap)
517 return -ENOMEM;
518
519 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
520 unsigned long len;
521
522 if (!(mask & (1 << i)))
523 continue;
524
525 rc = -EINVAL;
526 len = pci_resource_len(pdev, i);
527 if (!len)
528 goto err_inval;
529
530 rc = pci_request_region(pdev, i, name);
531 if (rc)
532 goto err_region;
533
534 rc = -ENOMEM;
535 if (!pcim_iomap(pdev, i, 0))
536 goto err_iomap;
537 }
538
539 return 0;
540
541 err_iomap:
542 pcim_iounmap(pdev, iomap[i]);
543 err_region:
544 pci_release_region(pdev, i);
545 err_inval:
546 while (--i >= 0) {
547 pcim_iounmap(pdev, iomap[i]);
548 pci_release_region(pdev, i);
549 }
550
551 return rc;
552}
553EXPORT_SYMBOL(pcim_iomap_regions);