Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/memory.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2000-2002 Russell King |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 5 | * modification for nommu, Hyok S. Choi, 2004 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * Note: this file should not be included by non-asm/.h files |
| 12 | */ |
| 13 | #ifndef __ASM_ARM_MEMORY_H |
| 14 | #define __ASM_ARM_MEMORY_H |
| 15 | |
Lennert Buytenhek | 8d5796d | 2008-08-25 21:03:32 +0100 | [diff] [blame] | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/const.h> |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 18 | #include <linux/types.h> |
Alessandro Rubini | 158e8bf | 2012-06-24 12:46:26 +0100 | [diff] [blame] | 19 | #include <linux/sizes.h> |
Lennert Buytenhek | 8d5796d | 2008-08-25 21:03:32 +0100 | [diff] [blame] | 20 | |
Nicolas Pitre | 0cdc8b9 | 2011-09-02 22:26:55 -0400 | [diff] [blame] | 21 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
Nicolas Pitre | 1b9f95f | 2011-07-05 22:52:51 -0400 | [diff] [blame] | 22 | #include <mach/memory.h> |
| 23 | #endif |
| 24 | |
Nicolas Pitre | f09b997 | 2005-10-29 21:44:55 +0100 | [diff] [blame] | 25 | /* |
| 26 | * Allow for constants defined here to be used from assembly code |
| 27 | * by prepending the UL suffix only with actual C code compilation. |
| 28 | */ |
Lennert Buytenhek | 8d5796d | 2008-08-25 21:03:32 +0100 | [diff] [blame] | 29 | #define UL(x) _AC(x, UL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Russell King | 006fa25 | 2014-02-26 19:40:46 +0000 | [diff] [blame] | 31 | /* PAGE_OFFSET - the virtual address of the start of the kernel image */ |
| 32 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
| 33 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 34 | #ifdef CONFIG_MMU |
| 35 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | /* |
| 37 | * TASK_SIZE - the maximum size of a user space task. |
| 38 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area |
| 39 | */ |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 40 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) |
Will Deacon | 79d1f5c | 2013-02-08 12:52:29 +0100 | [diff] [blame] | 41 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * The maximum size of a 26-bit user space task. |
| 45 | */ |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 46 | #define TASK_SIZE_26 (UL(1) << 26) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
| 48 | /* |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 49 | * The module space lives between the addresses given by TASK_SIZE |
| 50 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. |
| 51 | */ |
Catalin Marinas | adca6dc | 2009-07-24 12:32:59 +0100 | [diff] [blame] | 52 | #ifndef CONFIG_THUMB2_KERNEL |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 53 | #define MODULES_VADDR (PAGE_OFFSET - SZ_16M) |
Catalin Marinas | adca6dc | 2009-07-24 12:32:59 +0100 | [diff] [blame] | 54 | #else |
| 55 | /* smaller range for Thumb-2 symbols relocation (2^24)*/ |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 56 | #define MODULES_VADDR (PAGE_OFFSET - SZ_8M) |
Catalin Marinas | adca6dc | 2009-07-24 12:32:59 +0100 | [diff] [blame] | 57 | #endif |
| 58 | |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 59 | #if TASK_SIZE > MODULES_VADDR |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 60 | #error Top of user space clashes with start of module space |
| 61 | #endif |
| 62 | |
| 63 | /* |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 64 | * The highmem pkmap virtual space shares the end of the module area. |
| 65 | */ |
| 66 | #ifdef CONFIG_HIGHMEM |
| 67 | #define MODULES_END (PAGE_OFFSET - PMD_SIZE) |
| 68 | #else |
| 69 | #define MODULES_END (PAGE_OFFSET) |
| 70 | #endif |
| 71 | |
| 72 | /* |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 73 | * The XIP kernel gets mapped at the bottom of the module vm area. |
| 74 | * Since we use sections to map it, this macro replaces the physical address |
| 75 | * with its virtual address while keeping offset from the base section. |
| 76 | */ |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 77 | #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 78 | |
Sergey Dyasly | 803e3db | 2015-09-09 16:27:18 +0100 | [diff] [blame] | 79 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 80 | /* |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 81 | * Allow 16MB-aligned ioremap pages |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 82 | */ |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 83 | #define IOREMAP_MAX_ORDER 24 |
Sergey Dyasly | 803e3db | 2015-09-09 16:27:18 +0100 | [diff] [blame] | 84 | #endif |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 85 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 86 | #else /* CONFIG_MMU */ |
| 87 | |
| 88 | /* |
| 89 | * The limitation of user task size can grow up to the end of free ram region. |
| 90 | * It is difficult to define and perhaps will never meet the original meaning |
| 91 | * of this define that was meant to. |
| 92 | * Fortunately, there is no reference for this in noMMU mode, for now. |
| 93 | */ |
Uwe Kleine-König | 03eca20 | 2014-06-03 17:24:51 +0200 | [diff] [blame] | 94 | #define TASK_SIZE UL(0xffffffff) |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 95 | |
| 96 | #ifndef TASK_UNMAPPED_BASE |
| 97 | #define TASK_UNMAPPED_BASE UL(0x00000000) |
| 98 | #endif |
| 99 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 100 | #ifndef END_MEM |
Fenkart/Bostandzhyan | c931b4f | 2010-02-07 21:47:17 +0100 | [diff] [blame] | 101 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 102 | #endif |
| 103 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 104 | /* |
| 105 | * The module can be at any place in ram in nommu mode. |
| 106 | */ |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 107 | #define MODULES_END (END_MEM) |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 108 | #define MODULES_VADDR PAGE_OFFSET |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 109 | |
Uwe Kleine-König | 38b4205 | 2012-03-14 10:30:52 +0100 | [diff] [blame] | 110 | #define XIP_VIRT_ADDR(physaddr) (physaddr) |
| 111 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 112 | #endif /* !CONFIG_MMU */ |
| 113 | |
| 114 | /* |
Linus Walleij | 1dbd30e | 2010-07-12 21:53:28 +0100 | [diff] [blame] | 115 | * We fix the TCM memories max 32 KiB ITCM resp DTCM at these |
| 116 | * locations |
| 117 | */ |
| 118 | #ifdef CONFIG_HAVE_TCM |
| 119 | #define ITCM_OFFSET UL(0xfffe0000) |
| 120 | #define DTCM_OFFSET UL(0xfffe8000) |
| 121 | #endif |
| 122 | |
| 123 | /* |
Russell King | 719301f | 2009-10-31 17:51:57 +0000 | [diff] [blame] | 124 | * Convert a page to/from a physical address |
| 125 | */ |
| 126 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
| 127 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
| 128 | |
Cyril Chemparathy | 4756dcb | 2012-07-21 15:55:04 -0400 | [diff] [blame] | 129 | /* |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 130 | * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical |
Uwe Kleine-König | c6f54a9 | 2014-07-23 20:37:43 +0100 | [diff] [blame] | 131 | * memory. This is used for XIP and NoMMU kernels, and on platforms that don't |
| 132 | * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 133 | * PLAT_PHYS_OFFSET and not PHYS_OFFSET. |
| 134 | */ |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 135 | #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 136 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | #ifndef __ASSEMBLY__ |
| 138 | |
| 139 | /* |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 140 | * Physical vs virtual RAM address space conversion. These are |
| 141 | * private definitions which should NOT be used outside memory.h |
| 142 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 143 | * |
| 144 | * PFNs are used to describe any physical page; this means |
| 145 | * PFN 0 == physical address 0. |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 146 | */ |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 147 | #if defined(__virt_to_phys) |
| 148 | #define PHYS_OFFSET PLAT_PHYS_OFFSET |
| 149 | #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) |
| 150 | |
| 151 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
| 152 | |
| 153 | #elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 154 | |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 155 | /* |
| 156 | * Constants used to force the right instruction encodings and shifts |
| 157 | * so that all we need to do is modify the 8-bit constant field. |
| 158 | */ |
| 159 | #define __PV_BITS_31_24 0x81000000 |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 160 | #define __PV_BITS_7_0 0x81 |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 161 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 162 | extern unsigned long __pv_phys_pfn_offset; |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 163 | extern u64 __pv_offset; |
| 164 | extern void fixup_pv_table(const void *, unsigned long); |
| 165 | extern const void *__pv_table_begin, *__pv_table_end; |
| 166 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 167 | #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) |
| 168 | #define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) |
| 169 | |
| 170 | #define virt_to_pfn(kaddr) \ |
| 171 | ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ |
| 172 | PHYS_PFN_OFFSET) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 173 | |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 174 | #define __pv_stub(from,to,instr,type) \ |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 175 | __asm__("@ __pv_stub\n" \ |
| 176 | "1: " instr " %0, %1, %2\n" \ |
| 177 | " .pushsection .pv_table,\"a\"\n" \ |
| 178 | " .long 1b\n" \ |
| 179 | " .popsection\n" \ |
| 180 | : "=r" (to) \ |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 181 | : "r" (from), "I" (type)) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 182 | |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 183 | #define __pv_stub_mov_hi(t) \ |
| 184 | __asm__ volatile("@ __pv_stub_mov\n" \ |
| 185 | "1: mov %R0, %1\n" \ |
| 186 | " .pushsection .pv_table,\"a\"\n" \ |
| 187 | " .long 1b\n" \ |
| 188 | " .popsection\n" \ |
| 189 | : "=r" (t) \ |
| 190 | : "I" (__PV_BITS_7_0)) |
| 191 | |
| 192 | #define __pv_add_carry_stub(x, y) \ |
| 193 | __asm__ volatile("@ __pv_add_carry_stub\n" \ |
| 194 | "1: adds %Q0, %1, %2\n" \ |
| 195 | " adc %R0, %R0, #0\n" \ |
| 196 | " .pushsection .pv_table,\"a\"\n" \ |
| 197 | " .long 1b\n" \ |
| 198 | " .popsection\n" \ |
| 199 | : "+r" (y) \ |
| 200 | : "r" (x), "I" (__PV_BITS_31_24) \ |
| 201 | : "cc") |
| 202 | |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 203 | static inline phys_addr_t __virt_to_phys(unsigned long x) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 204 | { |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 205 | phys_addr_t t; |
| 206 | |
| 207 | if (sizeof(phys_addr_t) == 4) { |
| 208 | __pv_stub(x, t, "add", __PV_BITS_31_24); |
| 209 | } else { |
| 210 | __pv_stub_mov_hi(t); |
| 211 | __pv_add_carry_stub(x, t); |
| 212 | } |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 213 | return t; |
| 214 | } |
| 215 | |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 216 | static inline unsigned long __phys_to_virt(phys_addr_t x) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 217 | { |
| 218 | unsigned long t; |
Victor Kamensky | 139cc2b | 2013-11-07 08:42:41 +0100 | [diff] [blame] | 219 | |
| 220 | /* |
| 221 | * 'unsigned long' cast discard upper word when |
| 222 | * phys_addr_t is 64 bit, and makes sure that inline |
| 223 | * assembler expression receives 32 bit argument |
| 224 | * in place where 'r' 32 bit operand is expected. |
| 225 | */ |
| 226 | __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24); |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 227 | return t; |
| 228 | } |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 229 | |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 230 | #else |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 231 | |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 232 | #define PHYS_OFFSET PLAT_PHYS_OFFSET |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 233 | #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 234 | |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 235 | static inline phys_addr_t __virt_to_phys(unsigned long x) |
| 236 | { |
| 237 | return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; |
| 238 | } |
| 239 | |
| 240 | static inline unsigned long __phys_to_virt(phys_addr_t x) |
| 241 | { |
| 242 | return x - PHYS_OFFSET + PAGE_OFFSET; |
| 243 | } |
| 244 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 245 | #define virt_to_pfn(kaddr) \ |
| 246 | ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ |
| 247 | PHYS_PFN_OFFSET) |
Stephen Boyd | b4ad515 | 2012-09-04 20:04:35 +0100 | [diff] [blame] | 248 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 249 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * These are *only* valid on the kernel direct mapped RAM memory. |
| 253 | * Note: Drivers should NOT use these. They are the wrong |
| 254 | * translation for translating DMA addresses. Use the driver |
| 255 | * DMA support - see dma-mapping.h. |
| 256 | */ |
Thierry Reding | 84c4d3a | 2014-07-28 16:34:18 +0200 | [diff] [blame] | 257 | #define virt_to_phys virt_to_phys |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 258 | static inline phys_addr_t virt_to_phys(const volatile void *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | { |
| 260 | return __virt_to_phys((unsigned long)(x)); |
| 261 | } |
| 262 | |
Thierry Reding | 84c4d3a | 2014-07-28 16:34:18 +0200 | [diff] [blame] | 263 | #define phys_to_virt phys_to_virt |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 264 | static inline void *phys_to_virt(phys_addr_t x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | { |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 266 | return (void *)__phys_to_virt(x); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | } |
| 268 | |
| 269 | /* |
| 270 | * Drivers should NOT use these either. |
| 271 | */ |
| 272 | #define __pa(x) __virt_to_phys((unsigned long)(x)) |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 273 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
Vitaly Andrianov | e488666 | 2015-06-26 17:13:03 +0100 | [diff] [blame] | 274 | #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
Russell King | 2841029 | 2016-01-11 17:15:58 +0000 | [diff] [blame^] | 276 | extern unsigned long (*arch_virt_to_idmap)(unsigned long x); |
Russell King | 5e4432d | 2013-10-29 23:06:44 +0000 | [diff] [blame] | 277 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | /* |
Santosh Shilimkar | 4dc9a81 | 2013-07-31 12:44:42 -0400 | [diff] [blame] | 279 | * These are for systems that have a hardware interconnect supported alias of |
| 280 | * physical memory for idmap purposes. Most cases should leave these |
Russell King | 2841029 | 2016-01-11 17:15:58 +0000 | [diff] [blame^] | 281 | * untouched. Note: this can only return addresses less than 4GiB. |
Santosh Shilimkar | 4dc9a81 | 2013-07-31 12:44:42 -0400 | [diff] [blame] | 282 | */ |
Russell King | 2841029 | 2016-01-11 17:15:58 +0000 | [diff] [blame^] | 283 | static inline unsigned long __virt_to_idmap(unsigned long x) |
Santosh Shilimkar | 4dc9a81 | 2013-07-31 12:44:42 -0400 | [diff] [blame] | 284 | { |
Russell King | 0871b72 | 2015-07-17 10:33:04 +0100 | [diff] [blame] | 285 | if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap) |
Santosh Shilimkar | 4dc9a81 | 2013-07-31 12:44:42 -0400 | [diff] [blame] | 286 | return arch_virt_to_idmap(x); |
| 287 | else |
| 288 | return __virt_to_phys(x); |
| 289 | } |
| 290 | |
| 291 | #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) |
| 292 | |
| 293 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | * Virtual <-> DMA view memory address translations |
| 295 | * Again, these are *only* valid on the kernel direct mapped RAM |
| 296 | * memory. Use of these is *deprecated* (and that doesn't mean |
| 297 | * use the __ prefixed forms instead.) See dma-mapping.h. |
| 298 | */ |
Nicolas Pitre | b5ee900 | 2008-09-05 21:53:30 -0400 | [diff] [blame] | 299 | #ifndef __virt_to_bus |
| 300 | #define __virt_to_bus __virt_to_phys |
| 301 | #define __bus_to_virt __phys_to_virt |
Russell King | 1c4a4f4 | 2009-10-31 15:58:30 +0000 | [diff] [blame] | 302 | #define __pfn_to_bus(x) __pfn_to_phys(x) |
| 303 | #define __bus_to_pfn(x) __phys_to_pfn(x) |
Nicolas Pitre | b5ee900 | 2008-09-05 21:53:30 -0400 | [diff] [blame] | 304 | #endif |
| 305 | |
Arnd Bergmann | a5d533e | 2012-11-12 22:16:12 +0000 | [diff] [blame] | 306 | #ifdef CONFIG_VIRT_TO_BUS |
Thierry Reding | 84c4d3a | 2014-07-28 16:34:18 +0200 | [diff] [blame] | 307 | #define virt_to_bus virt_to_bus |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | static inline __deprecated unsigned long virt_to_bus(void *x) |
| 309 | { |
| 310 | return __virt_to_bus((unsigned long)x); |
| 311 | } |
| 312 | |
Thierry Reding | 84c4d3a | 2014-07-28 16:34:18 +0200 | [diff] [blame] | 313 | #define bus_to_virt bus_to_virt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | static inline __deprecated void *bus_to_virt(unsigned long x) |
| 315 | { |
| 316 | return (void *)__bus_to_virt(x); |
| 317 | } |
Arnd Bergmann | a5d533e | 2012-11-12 22:16:12 +0000 | [diff] [blame] | 318 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | |
| 320 | /* |
| 321 | * Conversion between a struct page and a physical address. |
| 322 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | * page_to_pfn(page) convert a struct page * to a PFN number |
| 324 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | * |
| 326 | * virt_to_page(k) convert a _valid_ virtual address to struct page * |
| 327 | * virt_addr_valid(k) indicates whether a virtual address is valid |
| 328 | */ |
Russell King | 7d12963 | 2006-04-04 16:25:47 +0100 | [diff] [blame] | 329 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET |
Russell King | 05944d7 | 2006-11-30 20:43:51 +0000 | [diff] [blame] | 330 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 331 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
Laura Abbott | efea340 | 2013-12-21 01:03:06 +0100 | [diff] [blame] | 332 | #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 333 | && pfn_valid(virt_to_pfn(kaddr))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | #endif |
| 336 | |
KAMEZAWA Hiroyuki | 7eb98a2 | 2006-03-27 01:15:37 -0800 | [diff] [blame] | 337 | #include <asm-generic/memory_model.h> |
| 338 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 339 | #endif |