Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Russell King | 4baa992 | 2008-08-02 10:55:55 +0100 | [diff] [blame] | 2 | * arch/arm/include/asm/memory.h |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2000-2002 Russell King |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 5 | * modification for nommu, Hyok S. Choi, 2004 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * Note: this file should not be included by non-asm/.h files |
| 12 | */ |
| 13 | #ifndef __ASM_ARM_MEMORY_H |
| 14 | #define __ASM_ARM_MEMORY_H |
| 15 | |
Lennert Buytenhek | 8d5796d | 2008-08-25 21:03:32 +0100 | [diff] [blame] | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/const.h> |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 18 | #include <linux/types.h> |
Alessandro Rubini | 158e8bf | 2012-06-24 12:46:26 +0100 | [diff] [blame] | 19 | #include <linux/sizes.h> |
Lennert Buytenhek | 8d5796d | 2008-08-25 21:03:32 +0100 | [diff] [blame] | 20 | |
Cyril Chemparathy | 4756dcb | 2012-07-21 15:55:04 -0400 | [diff] [blame] | 21 | #include <asm/cache.h> |
| 22 | |
Nicolas Pitre | 0cdc8b9 | 2011-09-02 22:26:55 -0400 | [diff] [blame] | 23 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
Nicolas Pitre | 1b9f95f | 2011-07-05 22:52:51 -0400 | [diff] [blame] | 24 | #include <mach/memory.h> |
| 25 | #endif |
| 26 | |
Nicolas Pitre | f09b997 | 2005-10-29 21:44:55 +0100 | [diff] [blame] | 27 | /* |
| 28 | * Allow for constants defined here to be used from assembly code |
| 29 | * by prepending the UL suffix only with actual C code compilation. |
| 30 | */ |
Lennert Buytenhek | 8d5796d | 2008-08-25 21:03:32 +0100 | [diff] [blame] | 31 | #define UL(x) _AC(x, UL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Russell King | 006fa25 | 2014-02-26 19:40:46 +0000 | [diff] [blame] | 33 | /* PAGE_OFFSET - the virtual address of the start of the kernel image */ |
| 34 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) |
| 35 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 36 | #ifdef CONFIG_MMU |
| 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | /* |
| 39 | * TASK_SIZE - the maximum size of a user space task. |
| 40 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area |
| 41 | */ |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 42 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) |
Will Deacon | 79d1f5c | 2013-02-08 12:52:29 +0100 | [diff] [blame] | 43 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
| 45 | /* |
| 46 | * The maximum size of a 26-bit user space task. |
| 47 | */ |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 48 | #define TASK_SIZE_26 (UL(1) << 26) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | /* |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 51 | * The module space lives between the addresses given by TASK_SIZE |
| 52 | * and PAGE_OFFSET - it must be within 32MB of the kernel text. |
| 53 | */ |
Catalin Marinas | adca6dc | 2009-07-24 12:32:59 +0100 | [diff] [blame] | 54 | #ifndef CONFIG_THUMB2_KERNEL |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 55 | #define MODULES_VADDR (PAGE_OFFSET - SZ_16M) |
Catalin Marinas | adca6dc | 2009-07-24 12:32:59 +0100 | [diff] [blame] | 56 | #else |
| 57 | /* smaller range for Thumb-2 symbols relocation (2^24)*/ |
Will Deacon | 5d1c20b | 2013-01-31 19:19:30 +0100 | [diff] [blame] | 58 | #define MODULES_VADDR (PAGE_OFFSET - SZ_8M) |
Catalin Marinas | adca6dc | 2009-07-24 12:32:59 +0100 | [diff] [blame] | 59 | #endif |
| 60 | |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 61 | #if TASK_SIZE > MODULES_VADDR |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 62 | #error Top of user space clashes with start of module space |
| 63 | #endif |
| 64 | |
| 65 | /* |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 66 | * The highmem pkmap virtual space shares the end of the module area. |
| 67 | */ |
| 68 | #ifdef CONFIG_HIGHMEM |
| 69 | #define MODULES_END (PAGE_OFFSET - PMD_SIZE) |
| 70 | #else |
| 71 | #define MODULES_END (PAGE_OFFSET) |
| 72 | #endif |
| 73 | |
| 74 | /* |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 75 | * The XIP kernel gets mapped at the bottom of the module vm area. |
| 76 | * Since we use sections to map it, this macro replaces the physical address |
| 77 | * with its virtual address while keeping offset from the base section. |
| 78 | */ |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 79 | #define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff)) |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 80 | |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 81 | /* |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 82 | * Allow 16MB-aligned ioremap pages |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 83 | */ |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 84 | #define IOREMAP_MAX_ORDER 24 |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 85 | |
Fenkart/Bostandzhyan | a7bd08c | 2010-02-07 21:46:33 +0100 | [diff] [blame] | 86 | #define CONSISTENT_END (0xffe00000UL) |
Fenkart/Bostandzhyan | a7bd08c | 2010-02-07 21:46:33 +0100 | [diff] [blame] | 87 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 88 | #else /* CONFIG_MMU */ |
| 89 | |
| 90 | /* |
| 91 | * The limitation of user task size can grow up to the end of free ram region. |
| 92 | * It is difficult to define and perhaps will never meet the original meaning |
| 93 | * of this define that was meant to. |
| 94 | * Fortunately, there is no reference for this in noMMU mode, for now. |
| 95 | */ |
| 96 | #ifndef TASK_SIZE |
| 97 | #define TASK_SIZE (CONFIG_DRAM_SIZE) |
| 98 | #endif |
| 99 | |
| 100 | #ifndef TASK_UNMAPPED_BASE |
| 101 | #define TASK_UNMAPPED_BASE UL(0x00000000) |
| 102 | #endif |
| 103 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 104 | #ifndef END_MEM |
Fenkart/Bostandzhyan | c931b4f | 2010-02-07 21:47:17 +0100 | [diff] [blame] | 105 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 106 | #endif |
| 107 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 108 | /* |
| 109 | * The module can be at any place in ram in nommu mode. |
| 110 | */ |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 111 | #define MODULES_END (END_MEM) |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 112 | #define MODULES_VADDR PAGE_OFFSET |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 113 | |
Uwe Kleine-König | 38b4205 | 2012-03-14 10:30:52 +0100 | [diff] [blame] | 114 | #define XIP_VIRT_ADDR(physaddr) (physaddr) |
| 115 | |
Russell King | 002547b | 2006-06-20 20:46:52 +0100 | [diff] [blame] | 116 | #endif /* !CONFIG_MMU */ |
| 117 | |
| 118 | /* |
Linus Walleij | 1dbd30e | 2010-07-12 21:53:28 +0100 | [diff] [blame] | 119 | * We fix the TCM memories max 32 KiB ITCM resp DTCM at these |
| 120 | * locations |
| 121 | */ |
| 122 | #ifdef CONFIG_HAVE_TCM |
| 123 | #define ITCM_OFFSET UL(0xfffe0000) |
| 124 | #define DTCM_OFFSET UL(0xfffe8000) |
| 125 | #endif |
| 126 | |
| 127 | /* |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 128 | * Convert a physical address to a Page Frame Number and back |
| 129 | */ |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 130 | #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) |
| 131 | #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 132 | |
Russell King | 719301f | 2009-10-31 17:51:57 +0000 | [diff] [blame] | 133 | /* |
| 134 | * Convert a page to/from a physical address |
| 135 | */ |
| 136 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
| 137 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
| 138 | |
Cyril Chemparathy | 4756dcb | 2012-07-21 15:55:04 -0400 | [diff] [blame] | 139 | /* |
| 140 | * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed |
| 141 | * around in head.S and proc-*.S are shifted by this amount, in order to |
| 142 | * leave spare high bits for systems with physical address extension. This |
| 143 | * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but |
| 144 | * gives us about 38-bits or so. |
| 145 | */ |
| 146 | #ifdef CONFIG_ARM_LPAE |
| 147 | #define ARCH_PGD_SHIFT L1_CACHE_SHIFT |
| 148 | #else |
| 149 | #define ARCH_PGD_SHIFT 0 |
| 150 | #endif |
| 151 | #define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1) |
| 152 | |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 153 | /* |
| 154 | * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical |
| 155 | * memory. This is used for XIP and NoMMU kernels, or by kernels which |
| 156 | * have their own mach/memory.h. Assembly code must always use |
| 157 | * PLAT_PHYS_OFFSET and not PHYS_OFFSET. |
| 158 | */ |
| 159 | #ifndef PLAT_PHYS_OFFSET |
| 160 | #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) |
| 161 | #endif |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | #ifndef __ASSEMBLY__ |
| 164 | |
| 165 | /* |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 166 | * Physical vs virtual RAM address space conversion. These are |
| 167 | * private definitions which should NOT be used outside memory.h |
| 168 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 169 | * |
| 170 | * PFNs are used to describe any physical page; this means |
| 171 | * PFN 0 == physical address 0. |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 172 | */ |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 173 | #if defined(__virt_to_phys) |
| 174 | #define PHYS_OFFSET PLAT_PHYS_OFFSET |
| 175 | #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) |
| 176 | |
| 177 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
| 178 | |
| 179 | #elif defined(CONFIG_ARM_PATCH_PHYS_VIRT) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 180 | |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 181 | /* |
| 182 | * Constants used to force the right instruction encodings and shifts |
| 183 | * so that all we need to do is modify the 8-bit constant field. |
| 184 | */ |
| 185 | #define __PV_BITS_31_24 0x81000000 |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 186 | #define __PV_BITS_7_0 0x81 |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 187 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 188 | extern unsigned long __pv_phys_pfn_offset; |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 189 | extern u64 __pv_offset; |
| 190 | extern void fixup_pv_table(const void *, unsigned long); |
| 191 | extern const void *__pv_table_begin, *__pv_table_end; |
| 192 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 193 | #define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT) |
| 194 | #define PHYS_PFN_OFFSET (__pv_phys_pfn_offset) |
| 195 | |
| 196 | #define virt_to_pfn(kaddr) \ |
| 197 | ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ |
| 198 | PHYS_PFN_OFFSET) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 199 | |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 200 | #define __pv_stub(from,to,instr,type) \ |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 201 | __asm__("@ __pv_stub\n" \ |
| 202 | "1: " instr " %0, %1, %2\n" \ |
| 203 | " .pushsection .pv_table,\"a\"\n" \ |
| 204 | " .long 1b\n" \ |
| 205 | " .popsection\n" \ |
| 206 | : "=r" (to) \ |
Russell King | cada3c0 | 2011-01-04 19:39:29 +0000 | [diff] [blame] | 207 | : "r" (from), "I" (type)) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 208 | |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 209 | #define __pv_stub_mov_hi(t) \ |
| 210 | __asm__ volatile("@ __pv_stub_mov\n" \ |
| 211 | "1: mov %R0, %1\n" \ |
| 212 | " .pushsection .pv_table,\"a\"\n" \ |
| 213 | " .long 1b\n" \ |
| 214 | " .popsection\n" \ |
| 215 | : "=r" (t) \ |
| 216 | : "I" (__PV_BITS_7_0)) |
| 217 | |
| 218 | #define __pv_add_carry_stub(x, y) \ |
| 219 | __asm__ volatile("@ __pv_add_carry_stub\n" \ |
| 220 | "1: adds %Q0, %1, %2\n" \ |
| 221 | " adc %R0, %R0, #0\n" \ |
| 222 | " .pushsection .pv_table,\"a\"\n" \ |
| 223 | " .long 1b\n" \ |
| 224 | " .popsection\n" \ |
| 225 | : "+r" (y) \ |
| 226 | : "r" (x), "I" (__PV_BITS_31_24) \ |
| 227 | : "cc") |
| 228 | |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 229 | static inline phys_addr_t __virt_to_phys(unsigned long x) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 230 | { |
Sricharan R | f52bb72 | 2013-07-29 20:26:22 +0530 | [diff] [blame] | 231 | phys_addr_t t; |
| 232 | |
| 233 | if (sizeof(phys_addr_t) == 4) { |
| 234 | __pv_stub(x, t, "add", __PV_BITS_31_24); |
| 235 | } else { |
| 236 | __pv_stub_mov_hi(t); |
| 237 | __pv_add_carry_stub(x, t); |
| 238 | } |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 239 | return t; |
| 240 | } |
| 241 | |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 242 | static inline unsigned long __phys_to_virt(phys_addr_t x) |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 243 | { |
| 244 | unsigned long t; |
Victor Kamensky | 139cc2b | 2013-11-07 08:42:41 +0100 | [diff] [blame] | 245 | |
| 246 | /* |
| 247 | * 'unsigned long' cast discard upper word when |
| 248 | * phys_addr_t is 64 bit, and makes sure that inline |
| 249 | * assembler expression receives 32 bit argument |
| 250 | * in place where 'r' 32 bit operand is expected. |
| 251 | */ |
| 252 | __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24); |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 253 | return t; |
| 254 | } |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 255 | |
Russell King | dc21af9 | 2011-01-04 19:09:43 +0000 | [diff] [blame] | 256 | #else |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 257 | |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 258 | #define PHYS_OFFSET PLAT_PHYS_OFFSET |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 259 | #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) |
Russell King | b713aa0 | 2013-12-10 19:21:08 +0000 | [diff] [blame] | 260 | |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 261 | static inline phys_addr_t __virt_to_phys(unsigned long x) |
| 262 | { |
| 263 | return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; |
| 264 | } |
| 265 | |
| 266 | static inline unsigned long __phys_to_virt(phys_addr_t x) |
| 267 | { |
| 268 | return x - PHYS_OFFSET + PAGE_OFFSET; |
| 269 | } |
| 270 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 271 | #define virt_to_pfn(kaddr) \ |
| 272 | ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ |
| 273 | PHYS_PFN_OFFSET) |
Stephen Boyd | b4ad515 | 2012-09-04 20:04:35 +0100 | [diff] [blame] | 274 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 275 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
| 277 | /* |
| 278 | * These are *only* valid on the kernel direct mapped RAM memory. |
| 279 | * Note: Drivers should NOT use these. They are the wrong |
| 280 | * translation for translating DMA addresses. Use the driver |
| 281 | * DMA support - see dma-mapping.h. |
| 282 | */ |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 283 | static inline phys_addr_t virt_to_phys(const volatile void *x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | { |
| 285 | return __virt_to_phys((unsigned long)(x)); |
| 286 | } |
| 287 | |
Will Deacon | 3a6b167 | 2011-02-15 17:28:28 +0100 | [diff] [blame] | 288 | static inline void *phys_to_virt(phys_addr_t x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | { |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 290 | return (void *)__phys_to_virt(x); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | /* |
| 294 | * Drivers should NOT use these either. |
| 295 | */ |
| 296 | #define __pa(x) __virt_to_phys((unsigned long)(x)) |
Santosh Shilimkar | ca5a45c | 2013-07-31 12:44:41 -0400 | [diff] [blame] | 297 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
Hiroki Kaminaga | 31a5539 | 2005-12-05 10:55:00 +0000 | [diff] [blame] | 298 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Russell King | 5e4432d | 2013-10-29 23:06:44 +0000 | [diff] [blame] | 300 | extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x); |
| 301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | /* |
Santosh Shilimkar | 4dc9a81 | 2013-07-31 12:44:42 -0400 | [diff] [blame] | 303 | * These are for systems that have a hardware interconnect supported alias of |
| 304 | * physical memory for idmap purposes. Most cases should leave these |
| 305 | * untouched. |
| 306 | */ |
| 307 | static inline phys_addr_t __virt_to_idmap(unsigned long x) |
| 308 | { |
| 309 | if (arch_virt_to_idmap) |
| 310 | return arch_virt_to_idmap(x); |
| 311 | else |
| 312 | return __virt_to_phys(x); |
| 313 | } |
| 314 | |
| 315 | #define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x)) |
| 316 | |
| 317 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | * Virtual <-> DMA view memory address translations |
| 319 | * Again, these are *only* valid on the kernel direct mapped RAM |
| 320 | * memory. Use of these is *deprecated* (and that doesn't mean |
| 321 | * use the __ prefixed forms instead.) See dma-mapping.h. |
| 322 | */ |
Nicolas Pitre | b5ee900 | 2008-09-05 21:53:30 -0400 | [diff] [blame] | 323 | #ifndef __virt_to_bus |
| 324 | #define __virt_to_bus __virt_to_phys |
| 325 | #define __bus_to_virt __phys_to_virt |
Russell King | 1c4a4f4 | 2009-10-31 15:58:30 +0000 | [diff] [blame] | 326 | #define __pfn_to_bus(x) __pfn_to_phys(x) |
| 327 | #define __bus_to_pfn(x) __phys_to_pfn(x) |
Nicolas Pitre | b5ee900 | 2008-09-05 21:53:30 -0400 | [diff] [blame] | 328 | #endif |
| 329 | |
Arnd Bergmann | a5d533e | 2012-11-12 22:16:12 +0000 | [diff] [blame] | 330 | #ifdef CONFIG_VIRT_TO_BUS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | static inline __deprecated unsigned long virt_to_bus(void *x) |
| 332 | { |
| 333 | return __virt_to_bus((unsigned long)x); |
| 334 | } |
| 335 | |
| 336 | static inline __deprecated void *bus_to_virt(unsigned long x) |
| 337 | { |
| 338 | return (void *)__bus_to_virt(x); |
| 339 | } |
Arnd Bergmann | a5d533e | 2012-11-12 22:16:12 +0000 | [diff] [blame] | 340 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
| 342 | /* |
| 343 | * Conversion between a struct page and a physical address. |
| 344 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | * page_to_pfn(page) convert a struct page * to a PFN number |
| 346 | * pfn_to_page(pfn) convert a _valid_ PFN number to struct page * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | * |
| 348 | * virt_to_page(k) convert a _valid_ virtual address to struct page * |
| 349 | * virt_addr_valid(k) indicates whether a virtual address is valid |
| 350 | */ |
Russell King | 7d12963 | 2006-04-04 16:25:47 +0100 | [diff] [blame] | 351 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET |
Russell King | 05944d7 | 2006-11-30 20:43:51 +0000 | [diff] [blame] | 352 | |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 353 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
Laura Abbott | efea340 | 2013-12-21 01:03:06 +0100 | [diff] [blame] | 354 | #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ |
Russell King | e26a9e0 | 2014-03-25 19:45:31 +0000 | [diff] [blame] | 355 | && pfn_valid(virt_to_pfn(kaddr))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | #endif |
| 358 | |
KAMEZAWA Hiroyuki | 7eb98a2 | 2006-03-27 01:15:37 -0800 | [diff] [blame] | 359 | #include <asm-generic/memory_model.h> |
| 360 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | #endif |