blob: 82ef36eac8a16a8fc2b7b021f345910ac04b04b4 [file] [log] [blame]
Bryan O'Sullivanc27a0d72006-02-01 03:05:16 -08001/*
2 * Copyright 2006 PathScale, Inc. All Rights Reserved.
3 *
4 * This file is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18#ifndef _LINUX_IO_H
19#define _LINUX_IO_H
20
Al Virocb1055f2006-10-08 15:08:45 +010021#include <linux/types.h>
Stephen Rothwelld6472302015-06-02 19:01:38 +100022#include <linux/init.h>
Christoph Hellwig41e94a82015-08-17 16:00:35 +020023#include <linux/bug.h>
24#include <linux/err.h>
Bryan O'Sullivanc27a0d72006-02-01 03:05:16 -080025#include <asm/io.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070026#include <asm/page.h>
Bryan O'Sullivanc27a0d72006-02-01 03:05:16 -080027
Andrew Morton7f253772007-02-02 18:07:15 -080028struct device;
Christoph Hellwig41e94a82015-08-17 16:00:35 +020029struct resource;
Andrew Morton7f253772007-02-02 18:07:15 -080030
Andi Kleend47d5c82014-02-08 08:51:58 +010031__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
Stephen Boyda9aec582016-01-20 14:58:35 -080032void __ioread32_copy(void *to, const void __iomem *from, size_t count);
Brice Goglin22ae8132006-06-20 20:03:02 -070033void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
Bryan O'Sullivanc27a0d72006-02-01 03:05:16 -080034
Paul Mundt218f0aa2007-05-15 01:41:02 -070035#ifdef CONFIG_MMU
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070036int ioremap_page_range(unsigned long addr, unsigned long end,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090037 phys_addr_t phys_addr, pgprot_t prot);
Paul Mundt218f0aa2007-05-15 01:41:02 -070038#else
39static inline int ioremap_page_range(unsigned long addr, unsigned long end,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090040 phys_addr_t phys_addr, pgprot_t prot)
Paul Mundt218f0aa2007-05-15 01:41:02 -070041{
42 return 0;
43}
44#endif
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070045
Toshi Kani0ddab1d2015-04-14 15:47:20 -070046#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
47void __init ioremap_huge_init(void);
48int arch_ioremap_pud_supported(void);
49int arch_ioremap_pmd_supported(void);
50#else
51static inline void ioremap_huge_init(void) { }
52#endif
53
Tejun Heo9ac78492007-01-20 16:00:26 +090054/*
55 * Managed iomap interface
56 */
Uwe Kleine-Königce816fa2014-04-07 15:39:19 -070057#ifdef CONFIG_HAS_IOPORT_MAP
Tejun Heo9ac78492007-01-20 16:00:26 +090058void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
59 unsigned int nr);
60void devm_ioport_unmap(struct device *dev, void __iomem *addr);
Russell King93da2872007-04-17 00:32:26 -070061#else
62static inline void __iomem *devm_ioport_map(struct device *dev,
63 unsigned long port,
64 unsigned int nr)
65{
66 return NULL;
67}
68
69static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
70{
71}
72#endif
Tejun Heo9ac78492007-01-20 16:00:26 +090073
Matthias Bruggerefd342f2014-07-18 11:36:39 +020074#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
75
Kumar Gala4f452e82008-04-29 10:25:48 -050076void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
Cristian Stoica5559b7b2014-10-07 18:25:43 +030077 resource_size_t size);
Kumar Gala4f452e82008-04-29 10:25:48 -050078void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
Cristian Stoica5559b7b2014-10-07 18:25:43 +030079 resource_size_t size);
Abhilash Kesavan34644522015-02-06 19:15:27 +053080void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
81 resource_size_t size);
Tejun Heo9ac78492007-01-20 16:00:26 +090082void devm_iounmap(struct device *dev, void __iomem *addr);
Andrew Mortoncc2ea412007-07-15 23:41:38 -070083int check_signature(const volatile void __iomem *io_addr,
84 const unsigned char *signature, int length);
Emil Medveb41e5ff2008-05-03 06:34:04 +100085void devm_ioremap_release(struct device *dev, void *res);
Matthew Wilcoxe50190a2006-10-11 01:22:02 -070086
Christoph Hellwig7d3dcf22015-08-10 23:07:07 -040087void *devm_memremap(struct device *dev, resource_size_t offset,
88 size_t size, unsigned long flags);
89void devm_memunmap(struct device *dev, void *addr);
90
Christoph Hellwig41e94a82015-08-17 16:00:35 +020091void *__devm_memremap_pages(struct device *dev, struct resource *res);
92
Haren Mynenie1612de2012-07-11 15:18:44 +100093/*
94 * Some systems do not have legacy ISA devices.
95 * /dev/port is not a valid interface on these systems.
96 * So for those archs, <asm/io.h> should define the following symbol.
97 */
98#ifndef arch_has_dev_port
99#define arch_has_dev_port() (1)
100#endif
101
Andy Lutomirskid0d98ee2013-05-13 23:58:40 +0000102/*
103 * Some systems (x86 without PAT) have a somewhat reliable way to mark a
104 * physical address range such that uncached mappings will actually
105 * end up write-combining. This facility should be used in conjunction
106 * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
107 * no effect if the per-page mechanisms are functional.
108 * (On x86 without PAT, these functions manipulate MTRRs.)
109 *
110 * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
111 * to have no effect.
112 */
113#ifndef arch_phys_wc_add
114static inline int __must_check arch_phys_wc_add(unsigned long base,
115 unsigned long size)
116{
117 return 0; /* It worked (i.e. did nothing). */
118}
119
120static inline void arch_phys_wc_del(int handle)
121{
122}
123
124#define arch_phys_wc_add arch_phys_wc_add
Luis R. Rodriguez7d010fd2015-05-26 10:28:13 +0200125#ifndef arch_phys_wc_index
126static inline int arch_phys_wc_index(int handle)
127{
128 return -1;
129}
130#define arch_phys_wc_index arch_phys_wc_index
131#endif
Andy Lutomirskid0d98ee2013-05-13 23:58:40 +0000132#endif
133
Dan Williams92281dee2015-08-10 23:07:06 -0400134enum {
135 /* See memremap() kernel-doc for usage description... */
136 MEMREMAP_WB = 1 << 0,
137 MEMREMAP_WT = 1 << 1,
Brian Starkeyc907e0e2016-03-22 14:28:00 -0700138 MEMREMAP_WC = 1 << 2,
Dan Williams92281dee2015-08-10 23:07:06 -0400139};
140
141void *memremap(resource_size_t offset, size_t size, unsigned long flags);
142void memunmap(void *addr);
143
Dave Airlie8ef42272016-10-24 15:27:59 +1000144/*
145 * On x86 PAT systems we have memory tracking that keeps track of
146 * the allowed mappings on memory ranges. This tracking works for
147 * all the in-kernel mapping APIs (ioremap*), but where the user
148 * wishes to map a range from a physical device into user memory
149 * the tracking won't be updated. This API is to be used by
150 * drivers which remap physical device pages into userspace,
151 * and wants to make sure they are mapped WC and not UC.
152 */
153#ifndef arch_io_reserve_memtype_wc
154static inline int arch_io_reserve_memtype_wc(resource_size_t base,
155 resource_size_t size)
156{
157 return 0;
158}
159
160static inline void arch_io_free_memtype_wc(resource_size_t base,
161 resource_size_t size)
162{
163}
164#endif
165
Bryan O'Sullivanc27a0d72006-02-01 03:05:16 -0800166#endif /* _LINUX_IO_H */