Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 2 | #ifndef _ASM_X86_IO_H |
| 3 | #define _ASM_X86_IO_H |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 4 | |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 5 | /* |
| 6 | * This file contains the definitions for the x86 IO instructions |
| 7 | * inb/inw/inl/outb/outw/outl and the "string versions" of the same |
| 8 | * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" |
| 9 | * versions of the single-IO instructions (inb_p/inw_p/..). |
| 10 | * |
| 11 | * This file is not meant to be obfuscating: it's just complicated |
| 12 | * to (a) handle it all in a way that makes gcc able to optimize it |
| 13 | * as well as possible and (b) trying to avoid writing the same thing |
| 14 | * over and over again with slight variations and possibly making a |
| 15 | * mistake somewhere. |
| 16 | */ |
| 17 | |
| 18 | /* |
| 19 | * Thanks to James van Artsdalen for a better timing-fix than |
| 20 | * the two short jumps: using outb's to a nonexistent port seems |
| 21 | * to guarantee better timings even on fast machines. |
| 22 | * |
| 23 | * On the other hand, I'd like to be sure of a non-existent port: |
| 24 | * I feel a bit unsafe about using 0x80 (should be safe, though) |
| 25 | * |
| 26 | * Linus |
| 27 | */ |
| 28 | |
| 29 | /* |
| 30 | * Bit simplified and optimized by Jan Hubicka |
| 31 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. |
| 32 | * |
| 33 | * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, |
| 34 | * isa_read[wl] and isa_write[wl] fixed |
| 35 | * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
| 36 | */ |
| 37 | |
venkatesh.pallipadi@intel.com | b310f381d | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 38 | #define ARCH_HAS_IOREMAP_WC |
Toshi Kani | d838270 | 2015-06-04 18:55:15 +0200 | [diff] [blame] | 39 | #define ARCH_HAS_IOREMAP_WT |
venkatesh.pallipadi@intel.com | b310f381d | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 40 | |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 41 | #include <linux/string.h> |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 42 | #include <linux/compiler.h> |
Jeremy Fitzhardinge | 976e8f6 | 2009-02-06 13:29:44 -0800 | [diff] [blame] | 43 | #include <asm/page.h> |
Mark Salter | 5b7c73e | 2014-04-07 15:39:49 -0700 | [diff] [blame] | 44 | #include <asm/early_ioremap.h> |
Stephen Rothwell | d647230 | 2015-06-02 19:01:38 +1000 | [diff] [blame] | 45 | #include <asm/pgtable_types.h> |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 46 | |
| 47 | #define build_mmio_read(name, size, type, reg, barrier) \ |
| 48 | static inline type name(const volatile void __iomem *addr) \ |
Mikael Pettersson | 1c5b0eb | 2008-08-13 21:07:07 +0200 | [diff] [blame] | 49 | { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 50 | :"m" (*(volatile type __force *)addr) barrier); return ret; } |
| 51 | |
| 52 | #define build_mmio_write(name, size, type, reg, barrier) \ |
| 53 | static inline void name(type val, volatile void __iomem *addr) \ |
| 54 | { asm volatile("mov" size " %0,%1": :reg (val), \ |
| 55 | "m" (*(volatile type __force *)addr) barrier); } |
| 56 | |
Mikael Pettersson | 1c5b0eb | 2008-08-13 21:07:07 +0200 | [diff] [blame] | 57 | build_mmio_read(readb, "b", unsigned char, "=q", :"memory") |
| 58 | build_mmio_read(readw, "w", unsigned short, "=r", :"memory") |
| 59 | build_mmio_read(readl, "l", unsigned int, "=r", :"memory") |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 60 | |
Mikael Pettersson | 1c5b0eb | 2008-08-13 21:07:07 +0200 | [diff] [blame] | 61 | build_mmio_read(__readb, "b", unsigned char, "=q", ) |
| 62 | build_mmio_read(__readw, "w", unsigned short, "=r", ) |
| 63 | build_mmio_read(__readl, "l", unsigned int, "=r", ) |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 64 | |
| 65 | build_mmio_write(writeb, "b", unsigned char, "q", :"memory") |
| 66 | build_mmio_write(writew, "w", unsigned short, "r", :"memory") |
| 67 | build_mmio_write(writel, "l", unsigned int, "r", :"memory") |
| 68 | |
| 69 | build_mmio_write(__writeb, "b", unsigned char, "q", ) |
| 70 | build_mmio_write(__writew, "w", unsigned short, "r", ) |
| 71 | build_mmio_write(__writel, "l", unsigned int, "r", ) |
| 72 | |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 73 | #define readb readb |
| 74 | #define readw readw |
| 75 | #define readl readl |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 76 | #define readb_relaxed(a) __readb(a) |
| 77 | #define readw_relaxed(a) __readw(a) |
| 78 | #define readl_relaxed(a) __readl(a) |
| 79 | #define __raw_readb __readb |
| 80 | #define __raw_readw __readw |
| 81 | #define __raw_readl __readl |
| 82 | |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 83 | #define writeb writeb |
| 84 | #define writew writew |
| 85 | #define writel writel |
Will Deacon | cbc908e | 2013-09-04 11:34:08 +0100 | [diff] [blame] | 86 | #define writeb_relaxed(v, a) __writeb(v, a) |
| 87 | #define writew_relaxed(v, a) __writew(v, a) |
| 88 | #define writel_relaxed(v, a) __writel(v, a) |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 89 | #define __raw_writeb __writeb |
| 90 | #define __raw_writew __writew |
| 91 | #define __raw_writel __writel |
| 92 | |
| 93 | #define mmiowb() barrier() |
| 94 | |
| 95 | #ifdef CONFIG_X86_64 |
Ingo Molnar | 93093d0 | 2008-11-30 10:20:20 +0100 | [diff] [blame] | 96 | |
Andy Shevchenko | 6469a0e | 2018-05-15 14:52:11 +0300 | [diff] [blame] | 97 | build_mmio_read(readq, "q", u64, "=r", :"memory") |
| 98 | build_mmio_read(__readq, "q", u64, "=r", ) |
| 99 | build_mmio_write(writeq, "q", u64, "r", :"memory") |
| 100 | build_mmio_write(__writeq, "q", u64, "r", ) |
Linus Torvalds | c1f64a5 | 2008-05-27 09:47:13 -0700 | [diff] [blame] | 101 | |
Andy Shevchenko | 9683a64 | 2017-06-30 20:09:34 +0300 | [diff] [blame] | 102 | #define readq_relaxed(a) __readq(a) |
| 103 | #define writeq_relaxed(v, a) __writeq(v, a) |
Ingo Molnar | 93093d0 | 2008-11-30 10:20:20 +0100 | [diff] [blame] | 104 | |
Andy Shevchenko | 9683a64 | 2017-06-30 20:09:34 +0300 | [diff] [blame] | 105 | #define __raw_readq __readq |
| 106 | #define __raw_writeq __writeq |
Ingo Molnar | 93093d0 | 2008-11-30 10:20:20 +0100 | [diff] [blame] | 107 | |
Ingo Molnar | a0b1131 | 2008-11-30 09:33:55 +0100 | [diff] [blame] | 108 | /* Let people know that we have them */ |
Ingo Molnar | 93093d0 | 2008-11-30 10:20:20 +0100 | [diff] [blame] | 109 | #define readq readq |
| 110 | #define writeq writeq |
Hitoshi Mitake | 2c5643b | 2008-11-30 17:16:04 +0900 | [diff] [blame] | 111 | |
Roland Dreier | dbee8a0 | 2011-05-24 17:13:09 -0700 | [diff] [blame] | 112 | #endif |
| 113 | |
Craig Bergstrom | be62a32 | 2017-11-15 15:29:51 -0700 | [diff] [blame] | 114 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
| 115 | extern int valid_phys_addr_range(phys_addr_t addr, size_t size); |
| 116 | extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); |
| 117 | |
Jeremy Fitzhardinge | 976e8f6 | 2009-02-06 13:29:44 -0800 | [diff] [blame] | 118 | /** |
| 119 | * virt_to_phys - map virtual addresses to physical |
| 120 | * @address: address to remap |
| 121 | * |
| 122 | * The returned physical address is the physical (CPU) mapping for |
| 123 | * the memory address given. It is only valid to use this function on |
| 124 | * addresses directly mapped or allocated via kmalloc. |
| 125 | * |
| 126 | * This function does not give bus mappings for DMA transfers. In |
| 127 | * almost all conceivable cases a device driver should not be using |
| 128 | * this function |
| 129 | */ |
| 130 | |
| 131 | static inline phys_addr_t virt_to_phys(volatile void *address) |
| 132 | { |
| 133 | return __pa(address); |
| 134 | } |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 135 | #define virt_to_phys virt_to_phys |
Jeremy Fitzhardinge | 976e8f6 | 2009-02-06 13:29:44 -0800 | [diff] [blame] | 136 | |
| 137 | /** |
| 138 | * phys_to_virt - map physical address to virtual |
| 139 | * @address: address to remap |
| 140 | * |
| 141 | * The returned virtual address is a current CPU mapping for |
| 142 | * the memory address given. It is only valid to use this function on |
| 143 | * addresses that have a kernel mapping |
| 144 | * |
| 145 | * This function does not handle bus mappings for DMA transfers. In |
| 146 | * almost all conceivable cases a device driver should not be using |
| 147 | * this function |
| 148 | */ |
| 149 | |
| 150 | static inline void *phys_to_virt(phys_addr_t address) |
| 151 | { |
| 152 | return __va(address); |
| 153 | } |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 154 | #define phys_to_virt phys_to_virt |
Jeremy Fitzhardinge | 976e8f6 | 2009-02-06 13:29:44 -0800 | [diff] [blame] | 155 | |
| 156 | /* |
| 157 | * Change "struct page" to physical address. |
| 158 | */ |
| 159 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
| 160 | |
| 161 | /* |
| 162 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
H. Peter Anvin | a7eb518 | 2009-02-17 13:01:51 -0800 | [diff] [blame] | 163 | * However, we truncate the address to unsigned int to avoid undesirable |
| 164 | * promitions in legacy drivers. |
Jeremy Fitzhardinge | 976e8f6 | 2009-02-06 13:29:44 -0800 | [diff] [blame] | 165 | */ |
H. Peter Anvin | a7eb518 | 2009-02-17 13:01:51 -0800 | [diff] [blame] | 166 | static inline unsigned int isa_virt_to_bus(volatile void *address) |
| 167 | { |
| 168 | return (unsigned int)virt_to_phys(address); |
| 169 | } |
| 170 | #define isa_page_to_bus(page) ((unsigned int)page_to_phys(page)) |
| 171 | #define isa_bus_to_virt phys_to_virt |
Jeremy Fitzhardinge | 976e8f6 | 2009-02-06 13:29:44 -0800 | [diff] [blame] | 172 | |
| 173 | /* |
| 174 | * However PCI ones are not necessarily 1:1 and therefore these interfaces |
| 175 | * are forbidden in portable PCI drivers. |
| 176 | * |
| 177 | * Allow them on x86 for legacy drivers, though. |
| 178 | */ |
| 179 | #define virt_to_bus virt_to_phys |
| 180 | #define bus_to_virt phys_to_virt |
| 181 | |
Jonathan Corbet | f585766 | 2017-01-27 16:17:52 -0700 | [diff] [blame] | 182 | /* |
| 183 | * The default ioremap() behavior is non-cached; if you need something |
| 184 | * else, you probably want one of the following. |
| 185 | */ |
| 186 | extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 187 | #define ioremap_nocache ioremap_nocache |
Jonathan Corbet | f585766 | 2017-01-27 16:17:52 -0700 | [diff] [blame] | 188 | extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); |
| 189 | #define ioremap_uc ioremap_uc |
| 190 | |
| 191 | extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 192 | #define ioremap_cache ioremap_cache |
Jonathan Corbet | f585766 | 2017-01-27 16:17:52 -0700 | [diff] [blame] | 193 | extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 194 | #define ioremap_prot ioremap_prot |
Jonathan Corbet | f585766 | 2017-01-27 16:17:52 -0700 | [diff] [blame] | 195 | |
Jeremy Fitzhardinge | 133822c | 2009-02-06 13:29:52 -0800 | [diff] [blame] | 196 | /** |
| 197 | * ioremap - map bus memory into CPU space |
| 198 | * @offset: bus address of the memory |
| 199 | * @size: size of the resource to map |
| 200 | * |
| 201 | * ioremap performs a platform specific sequence of operations to |
| 202 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 203 | * writew/writel functions and the other mmio helpers. The returned |
| 204 | * address is not guaranteed to be usable directly as a virtual |
| 205 | * address. |
| 206 | * |
| 207 | * If the area you are trying to map is a PCI BAR you should have a |
| 208 | * look at pci_iomap(). |
| 209 | */ |
Jeremy Fitzhardinge | 133822c | 2009-02-06 13:29:52 -0800 | [diff] [blame] | 210 | static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) |
| 211 | { |
| 212 | return ioremap_nocache(offset, size); |
| 213 | } |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 214 | #define ioremap ioremap |
Jeremy Fitzhardinge | 133822c | 2009-02-06 13:29:52 -0800 | [diff] [blame] | 215 | |
| 216 | extern void iounmap(volatile void __iomem *addr); |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 217 | #define iounmap iounmap |
Jeremy Fitzhardinge | 133822c | 2009-02-06 13:29:52 -0800 | [diff] [blame] | 218 | |
Cliff Wickman | 3ee48b6 | 2010-09-16 11:44:02 -0500 | [diff] [blame] | 219 | extern void set_iounmap_nonlazy(void); |
Jaswinder Singh | 9321b8c | 2008-07-21 22:24:29 +0530 | [diff] [blame] | 220 | |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 221 | #ifdef __KERNEL__ |
| 222 | |
| 223 | #include <asm-generic/iomap.h> |
| 224 | |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 225 | /* |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 226 | * ISA space is 'always mapped' on a typical x86 system, no need to |
| 227 | * explicitly ioremap() it. The fact that the ISA IO space is mapped |
| 228 | * to PAGE_OFFSET is pure coincidence - it does not mean ISA values |
| 229 | * are physical addresses. The following constant pointer can be |
| 230 | * used as the IO-area pointer (it can be iounmapped as well, so the |
| 231 | * analogy with PCI is quite large): |
| 232 | */ |
| 233 | #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) |
| 234 | |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 235 | #endif /* __KERNEL__ */ |
| 236 | |
| 237 | extern void native_io_delay(void); |
| 238 | |
| 239 | extern int io_delay_type; |
| 240 | extern void io_delay_init(void); |
| 241 | |
| 242 | #if defined(CONFIG_PARAVIRT) |
| 243 | #include <asm/paravirt.h> |
| 244 | #else |
| 245 | |
| 246 | static inline void slow_down_io(void) |
| 247 | { |
| 248 | native_io_delay(); |
| 249 | #ifdef REALLY_SLOW_IO |
| 250 | native_io_delay(); |
| 251 | native_io_delay(); |
| 252 | native_io_delay(); |
| 253 | #endif |
| 254 | } |
| 255 | |
| 256 | #endif |
| 257 | |
Tom Lendacky | 606b21d | 2017-10-20 09:30:55 -0500 | [diff] [blame] | 258 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
| 259 | #include <linux/jump_label.h> |
| 260 | |
| 261 | extern struct static_key_false sev_enable_key; |
| 262 | static inline bool sev_key_active(void) |
| 263 | { |
| 264 | return static_branch_unlikely(&sev_enable_key); |
| 265 | } |
| 266 | |
| 267 | #else /* !CONFIG_AMD_MEM_ENCRYPT */ |
| 268 | |
| 269 | static inline bool sev_key_active(void) { return false; } |
| 270 | |
| 271 | #endif /* CONFIG_AMD_MEM_ENCRYPT */ |
| 272 | |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 273 | #define BUILDIO(bwl, bw, type) \ |
| 274 | static inline void out##bwl(unsigned type value, int port) \ |
| 275 | { \ |
| 276 | asm volatile("out" #bwl " %" #bw "0, %w1" \ |
| 277 | : : "a"(value), "Nd"(port)); \ |
| 278 | } \ |
| 279 | \ |
| 280 | static inline unsigned type in##bwl(int port) \ |
| 281 | { \ |
| 282 | unsigned type value; \ |
| 283 | asm volatile("in" #bwl " %w1, %" #bw "0" \ |
| 284 | : "=a"(value) : "Nd"(port)); \ |
| 285 | return value; \ |
| 286 | } \ |
| 287 | \ |
| 288 | static inline void out##bwl##_p(unsigned type value, int port) \ |
| 289 | { \ |
| 290 | out##bwl(value, port); \ |
| 291 | slow_down_io(); \ |
| 292 | } \ |
| 293 | \ |
| 294 | static inline unsigned type in##bwl##_p(int port) \ |
| 295 | { \ |
| 296 | unsigned type value = in##bwl(port); \ |
| 297 | slow_down_io(); \ |
| 298 | return value; \ |
| 299 | } \ |
| 300 | \ |
| 301 | static inline void outs##bwl(int port, const void *addr, unsigned long count) \ |
| 302 | { \ |
Tom Lendacky | 606b21d | 2017-10-20 09:30:55 -0500 | [diff] [blame] | 303 | if (sev_key_active()) { \ |
| 304 | unsigned type *value = (unsigned type *)addr; \ |
| 305 | while (count) { \ |
| 306 | out##bwl(*value, port); \ |
| 307 | value++; \ |
| 308 | count--; \ |
| 309 | } \ |
| 310 | } else { \ |
| 311 | asm volatile("rep; outs" #bwl \ |
| 312 | : "+S"(addr), "+c"(count) \ |
| 313 | : "d"(port) : "memory"); \ |
| 314 | } \ |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 315 | } \ |
| 316 | \ |
| 317 | static inline void ins##bwl(int port, void *addr, unsigned long count) \ |
| 318 | { \ |
Tom Lendacky | 606b21d | 2017-10-20 09:30:55 -0500 | [diff] [blame] | 319 | if (sev_key_active()) { \ |
| 320 | unsigned type *value = (unsigned type *)addr; \ |
| 321 | while (count) { \ |
| 322 | *value = in##bwl(port); \ |
| 323 | value++; \ |
| 324 | count--; \ |
| 325 | } \ |
| 326 | } else { \ |
| 327 | asm volatile("rep; ins" #bwl \ |
| 328 | : "+D"(addr), "+c"(count) \ |
| 329 | : "d"(port) : "memory"); \ |
| 330 | } \ |
Brian Gerst | 1c5b906 | 2010-02-05 09:37:09 -0500 | [diff] [blame] | 331 | } |
| 332 | |
| 333 | BUILDIO(b, b, char) |
| 334 | BUILDIO(w, w, short) |
| 335 | BUILDIO(l, , int) |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 336 | |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 337 | #define inb inb |
| 338 | #define inw inw |
| 339 | #define inl inl |
| 340 | #define inb_p inb_p |
| 341 | #define inw_p inw_p |
| 342 | #define inl_p inl_p |
| 343 | #define insb insb |
| 344 | #define insw insw |
| 345 | #define insl insl |
| 346 | |
| 347 | #define outb outb |
| 348 | #define outw outw |
| 349 | #define outl outl |
| 350 | #define outb_p outb_p |
| 351 | #define outw_p outw_p |
| 352 | #define outl_p outl_p |
| 353 | #define outsb outsb |
| 354 | #define outsw outsw |
| 355 | #define outsl outsl |
| 356 | |
Thierry Reding | 4707a34 | 2014-07-28 17:20:33 +0200 | [diff] [blame] | 357 | extern void *xlate_dev_mem_ptr(phys_addr_t phys); |
| 358 | extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 359 | |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 360 | #define xlate_dev_mem_ptr xlate_dev_mem_ptr |
| 361 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr |
| 362 | |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 363 | extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
Juergen Gross | b14097b | 2014-11-03 14:01:58 +0100 | [diff] [blame] | 364 | enum page_cache_mode pcm); |
venkatesh.pallipadi@intel.com | d639bab | 2009-01-09 16:13:13 -0800 | [diff] [blame] | 365 | extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 366 | #define ioremap_wc ioremap_wc |
Toshi Kani | d838270 | 2015-06-04 18:55:15 +0200 | [diff] [blame] | 367 | extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); |
Andy Shevchenko | 80b9ece | 2017-06-30 20:09:30 +0300 | [diff] [blame] | 368 | #define ioremap_wt ioremap_wt |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 369 | |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 370 | extern bool is_early_ioremap_ptep(pte_t *ptep); |
Jeremy Fitzhardinge | 4583ed5 | 2008-06-25 00:19:03 -0400 | [diff] [blame] | 371 | |
Jeremy Fitzhardinge | a448720 | 2009-01-28 15:42:23 -0800 | [diff] [blame] | 372 | #define IO_SPACE_LIMIT 0xffff |
Jeremy Fitzhardinge | 4583ed5 | 2008-06-25 00:19:03 -0400 | [diff] [blame] | 373 | |
Andy Shevchenko | 3195201 | 2017-06-30 20:09:31 +0300 | [diff] [blame] | 374 | #include <asm-generic/io.h> |
| 375 | #undef PCI_IOBASE |
| 376 | |
Andy Lutomirski | d0d98ee | 2013-05-13 23:58:40 +0000 | [diff] [blame] | 377 | #ifdef CONFIG_MTRR |
Luis R. Rodriguez | 7d010fd | 2015-05-26 10:28:13 +0200 | [diff] [blame] | 378 | extern int __must_check arch_phys_wc_index(int handle); |
| 379 | #define arch_phys_wc_index arch_phys_wc_index |
| 380 | |
Andy Lutomirski | d0d98ee | 2013-05-13 23:58:40 +0000 | [diff] [blame] | 381 | extern int __must_check arch_phys_wc_add(unsigned long base, |
| 382 | unsigned long size); |
| 383 | extern void arch_phys_wc_del(int handle); |
| 384 | #define arch_phys_wc_add arch_phys_wc_add |
| 385 | #endif |
| 386 | |
Dave Airlie | 8ef4227 | 2016-10-24 15:27:59 +1000 | [diff] [blame] | 387 | #ifdef CONFIG_X86_PAT |
| 388 | extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); |
| 389 | extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); |
| 390 | #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc |
| 391 | #endif |
| 392 | |
Tom Lendacky | 8f716c9 | 2017-07-17 16:10:16 -0500 | [diff] [blame] | 393 | extern bool arch_memremap_can_ram_remap(resource_size_t offset, |
| 394 | unsigned long size, |
| 395 | unsigned long flags); |
| 396 | #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap |
| 397 | |
Tom Lendacky | 8458bf9 | 2017-07-17 16:10:30 -0500 | [diff] [blame] | 398 | extern bool phys_mem_access_encrypted(unsigned long phys_addr, |
| 399 | unsigned long size); |
| 400 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 401 | #endif /* _ASM_X86_IO_H */ |