Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Based on arch/arm/include/asm/memory.h |
| 3 | * |
| 4 | * Copyright (C) 2000-2002 Russell King |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | * |
| 19 | * Note: this file should not be included by non-asm/.h files |
| 20 | */ |
| 21 | #ifndef __ASM_MEMORY_H |
| 22 | #define __ASM_MEMORY_H |
| 23 | |
| 24 | #include <linux/compiler.h> |
| 25 | #include <linux/const.h> |
| 26 | #include <linux/types.h> |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 27 | #include <asm/bug.h> |
Mark Rutland | b653145 | 2017-07-14 19:43:56 +0100 | [diff] [blame] | 28 | #include <asm/page-def.h> |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 29 | #include <asm/sizes.h> |
| 30 | |
| 31 | /* |
Mark Rutland | aa03c42 | 2015-01-22 18:20:35 +0000 | [diff] [blame] | 32 | * Size of the PCI I/O space. This must remain a power of two so that |
| 33 | * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses. |
| 34 | */ |
| 35 | #define PCI_IO_SIZE SZ_16M |
| 36 | |
| 37 | /* |
Ard Biesheuvel | 3e1907d | 2016-03-30 16:46:00 +0200 | [diff] [blame] | 38 | * Log2 of the upper bound of the size of a struct page. Used for sizing |
| 39 | * the vmemmap region only, does not affect actual memory footprint. |
| 40 | * We don't use sizeof(struct page) directly since taking its size here |
| 41 | * requires its definition to be available at this point in the inclusion |
| 42 | * chain, and it may not be a power of 2 in the first place. |
| 43 | */ |
| 44 | #define STRUCT_PAGE_MAX_SHIFT 6 |
| 45 | |
| 46 | /* |
| 47 | * VMEMMAP_SIZE - allows the whole linear region to be covered by |
| 48 | * a struct page array |
| 49 | */ |
| 50 | #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) |
| 51 | |
| 52 | /* |
Mark Rutland | a13e3a5 | 2016-06-01 12:07:17 +0100 | [diff] [blame] | 53 | * PAGE_OFFSET - the virtual address of the start of the linear map (top |
Catalin Marinas | 847264fb | 2013-10-23 16:50:07 +0100 | [diff] [blame] | 54 | * (VA_BITS - 1)) |
Mark Rutland | a13e3a5 | 2016-06-01 12:07:17 +0100 | [diff] [blame] | 55 | * KIMAGE_VADDR - the virtual address of the start of the kernel image |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 56 | * VA_BITS - the maximum number of bits for virtual addresses. |
Andrey Ryabinin | 127db02 | 2015-09-17 12:38:07 +0300 | [diff] [blame] | 57 | * VA_START - the first kernel virtual address. |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 58 | */ |
Jungseok Lee | e41ceed | 2014-05-12 10:40:38 +0100 | [diff] [blame] | 59 | #define VA_BITS (CONFIG_ARM64_VA_BITS) |
Nick Desaulniers | 82cd588 | 2017-08-03 11:03:58 -0700 | [diff] [blame] | 60 | #define VA_START (UL(0xffffffffffffffff) - \ |
| 61 | (UL(1) << VA_BITS) + 1) |
| 62 | #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ |
| 63 | (UL(1) << (VA_BITS - 1)) + 1) |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 64 | #define KIMAGE_VADDR (MODULES_END) |
| 65 | #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) |
| 66 | #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 67 | #define MODULES_VSIZE (SZ_128M) |
Ard Biesheuvel | 3e1907d | 2016-03-30 16:46:00 +0200 | [diff] [blame] | 68 | #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) |
| 69 | #define PCI_IO_END (VMEMMAP_START - SZ_2M) |
Mark Rutland | aa03c42 | 2015-01-22 18:20:35 +0000 | [diff] [blame] | 70 | #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) |
| 71 | #define FIXADDR_TOP (PCI_IO_START - SZ_2M) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 72 | |
James Morse | 28c7258 | 2016-04-27 17:47:09 +0100 | [diff] [blame] | 73 | #define KERNEL_START _text |
| 74 | #define KERNEL_END _end |
| 75 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 76 | /* |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 77 | * KASAN requires 1/8th of the kernel virtual address space for the shadow |
| 78 | * region. KASAN can bloat the stack significantly, so double the (minimum) |
| 79 | * stack size when KASAN is in use. |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 80 | */ |
| 81 | #ifdef CONFIG_KASAN |
Andrey Konovalov | 917538e | 2018-02-06 15:36:44 -0800 | [diff] [blame] | 82 | #define KASAN_SHADOW_SCALE_SHIFT 3 |
| 83 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 84 | #define KASAN_THREAD_SHIFT 1 |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 85 | #else |
| 86 | #define KASAN_SHADOW_SIZE (0) |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 87 | #define KASAN_THREAD_SHIFT 0 |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 88 | #endif |
| 89 | |
Mark Rutland | b02faed | 2017-10-03 18:25:46 +0100 | [diff] [blame] | 90 | #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) |
Mark Rutland | e306786 | 2017-07-21 14:25:33 +0100 | [diff] [blame] | 91 | |
| 92 | /* |
| 93 | * VMAP'd stacks are allocated at page granularity, so we must ensure that such |
| 94 | * stacks are a multiple of page size. |
| 95 | */ |
| 96 | #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT) |
| 97 | #define THREAD_SHIFT PAGE_SHIFT |
| 98 | #else |
| 99 | #define THREAD_SHIFT MIN_THREAD_SHIFT |
| 100 | #endif |
Mark Rutland | dbc9344 | 2017-07-14 16:39:21 +0100 | [diff] [blame] | 101 | |
| 102 | #if THREAD_SHIFT >= PAGE_SHIFT |
| 103 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) |
| 104 | #endif |
| 105 | |
| 106 | #define THREAD_SIZE (UL(1) << THREAD_SHIFT) |
| 107 | |
Mark Rutland | e306786 | 2017-07-21 14:25:33 +0100 | [diff] [blame] | 108 | /* |
| 109 | * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by |
| 110 | * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry |
| 111 | * assembly. |
| 112 | */ |
| 113 | #ifdef CONFIG_VMAP_STACK |
| 114 | #define THREAD_ALIGN (2 * THREAD_SIZE) |
| 115 | #else |
| 116 | #define THREAD_ALIGN THREAD_SIZE |
| 117 | #endif |
| 118 | |
Mark Rutland | f60ad4e | 2017-07-20 12:26:48 +0100 | [diff] [blame] | 119 | #define IRQ_STACK_SIZE THREAD_SIZE |
| 120 | |
Mark Rutland | 872d832 | 2017-07-14 20:30:35 +0100 | [diff] [blame] | 121 | #define OVERFLOW_STACK_SIZE SZ_4K |
| 122 | |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 123 | /* |
Mark Rutland | 8018ba4 | 2017-07-14 15:38:43 +0100 | [diff] [blame] | 124 | * Alignment of kernel segments (e.g. .text, .data). |
| 125 | */ |
| 126 | #if defined(CONFIG_DEBUG_ALIGN_RODATA) |
| 127 | /* |
| 128 | * 4 KB granule: 1 level 2 entry |
| 129 | * 16 KB granule: 128 level 3 entries, with contiguous bit |
| 130 | * 64 KB granule: 32 level 3 entries, with contiguous bit |
| 131 | */ |
| 132 | #define SEGMENT_ALIGN SZ_2M |
| 133 | #else |
| 134 | /* |
| 135 | * 4 KB granule: 16 level 3 entries, with contiguous bit |
| 136 | * 16 KB granule: 4 level 3 entries, without contiguous bit |
| 137 | * 64 KB granule: 1 level 3 entry |
| 138 | */ |
| 139 | #define SEGMENT_ALIGN SZ_64K |
| 140 | #endif |
| 141 | |
Ard Biesheuvel | ab893fb | 2016-02-16 13:52:36 +0100 | [diff] [blame] | 142 | /* |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 143 | * Memory types available. |
| 144 | */ |
| 145 | #define MT_DEVICE_nGnRnE 0 |
| 146 | #define MT_DEVICE_nGnRE 1 |
| 147 | #define MT_DEVICE_GRE 2 |
| 148 | #define MT_NORMAL_NC 3 |
| 149 | #define MT_NORMAL 4 |
Jonathan (Zhixiong) Zhang | 8d446c8 | 2015-08-07 09:36:59 +0100 | [diff] [blame] | 150 | #define MT_NORMAL_WT 5 |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 151 | |
Marc Zyngier | 3631160 | 2012-12-07 18:35:41 +0000 | [diff] [blame] | 152 | /* |
| 153 | * Memory types for Stage-2 translation |
| 154 | */ |
| 155 | #define MT_S2_NORMAL 0xf |
| 156 | #define MT_S2_DEVICE_nGnRE 0x1 |
| 157 | |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 158 | #ifdef CONFIG_ARM64_4K_PAGES |
| 159 | #define IOREMAP_MAX_ORDER (PUD_SHIFT) |
| 160 | #else |
| 161 | #define IOREMAP_MAX_ORDER (PMD_SHIFT) |
| 162 | #endif |
| 163 | |
Ard Biesheuvel | a89dea5 | 2016-02-16 13:52:41 +0100 | [diff] [blame] | 164 | #ifdef CONFIG_BLK_DEV_INITRD |
| 165 | #define __early_init_dt_declare_initrd(__start, __end) \ |
| 166 | do { \ |
| 167 | initrd_start = (__start); \ |
| 168 | initrd_end = (__end); \ |
| 169 | } while (0) |
| 170 | #endif |
| 171 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 172 | #ifndef __ASSEMBLY__ |
| 173 | |
Ard Biesheuvel | 8439e62 | 2016-02-22 18:46:04 +0100 | [diff] [blame] | 174 | #include <linux/bitops.h> |
Ard Biesheuvel | a92405f | 2016-02-22 18:46:03 +0100 | [diff] [blame] | 175 | #include <linux/mmdebug.h> |
| 176 | |
Ard Biesheuvel | 020d044 | 2016-02-26 17:57:14 +0100 | [diff] [blame] | 177 | extern s64 memstart_addr; |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 178 | /* PHYS_OFFSET - the physical address of the start of memory. */ |
Ard Biesheuvel | a92405f | 2016-02-22 18:46:03 +0100 | [diff] [blame] | 179 | #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 180 | |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 181 | /* the virtual base of the kernel image (minus TEXT_OFFSET) */ |
| 182 | extern u64 kimage_vaddr; |
| 183 | |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 184 | /* the offset between the kernel virtual and physical mappings */ |
| 185 | extern u64 kimage_voffset; |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 186 | |
Alexander Popov | 7ede866 | 2016-12-19 16:23:06 -0800 | [diff] [blame] | 187 | static inline unsigned long kaslr_offset(void) |
| 188 | { |
| 189 | return kimage_vaddr - KIMAGE_VADDR; |
| 190 | } |
| 191 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 192 | /* |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 193 | * Allow all memory at the discovery stage. We will clip it later. |
Ard Biesheuvel | 34ba2c4 | 2015-08-18 10:34:42 +0100 | [diff] [blame] | 194 | */ |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 195 | #define MIN_MEMBLOCK_ADDR 0 |
| 196 | #define MAX_MEMBLOCK_ADDR U64_MAX |
Ard Biesheuvel | 34ba2c4 | 2015-08-18 10:34:42 +0100 | [diff] [blame] | 197 | |
| 198 | /* |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 199 | * PFNs are used to describe any physical page; this means |
| 200 | * PFN 0 == physical address 0. |
| 201 | * |
| 202 | * This is the PFN of the first RAM page in the kernel |
| 203 | * direct-mapped view. We assume this is the first page |
| 204 | * of RAM in the mem_map as well. |
| 205 | */ |
| 206 | #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) |
| 207 | |
| 208 | /* |
Laura Abbott | 9e22eb6 | 2017-01-10 13:35:47 -0800 | [diff] [blame] | 209 | * Physical vs virtual RAM address space conversion. These are |
| 210 | * private definitions which should NOT be used outside memory.h |
| 211 | * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. |
| 212 | */ |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 213 | |
| 214 | |
| 215 | /* |
| 216 | * The linear kernel range starts in the middle of the virtual adddress |
| 217 | * space. Testing the top bit for the start of the region is a |
| 218 | * sufficient check. |
| 219 | */ |
| 220 | #define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) |
| 221 | |
| 222 | #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) |
| 223 | #define __kimg_to_phys(addr) ((addr) - kimage_voffset) |
| 224 | |
| 225 | #define __virt_to_phys_nodebug(x) ({ \ |
Laura Abbott | 9e22eb6 | 2017-01-10 13:35:47 -0800 | [diff] [blame] | 226 | phys_addr_t __x = (phys_addr_t)(x); \ |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 227 | __is_lm_address(__x) ? __lm_to_phys(__x) : \ |
| 228 | __kimg_to_phys(__x); \ |
| 229 | }) |
| 230 | |
| 231 | #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) |
| 232 | |
| 233 | #ifdef CONFIG_DEBUG_VIRTUAL |
| 234 | extern phys_addr_t __virt_to_phys(unsigned long x); |
| 235 | extern phys_addr_t __phys_addr_symbol(unsigned long x); |
| 236 | #else |
| 237 | #define __virt_to_phys(x) __virt_to_phys_nodebug(x) |
| 238 | #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) |
| 239 | #endif |
Laura Abbott | 9e22eb6 | 2017-01-10 13:35:47 -0800 | [diff] [blame] | 240 | |
| 241 | #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) |
| 242 | #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) |
| 243 | |
| 244 | /* |
| 245 | * Convert a page to/from a physical address |
| 246 | */ |
| 247 | #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) |
| 248 | #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) |
| 249 | |
| 250 | /* |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 251 | * Note: Drivers should NOT use these. They are the wrong |
| 252 | * translation for translating DMA addresses. Use the driver |
| 253 | * DMA support - see dma-mapping.h. |
| 254 | */ |
Thierry Reding | 09a5723 | 2014-07-28 17:25:48 +0200 | [diff] [blame] | 255 | #define virt_to_phys virt_to_phys |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 256 | static inline phys_addr_t virt_to_phys(const volatile void *x) |
| 257 | { |
| 258 | return __virt_to_phys((unsigned long)(x)); |
| 259 | } |
| 260 | |
Thierry Reding | 09a5723 | 2014-07-28 17:25:48 +0200 | [diff] [blame] | 261 | #define phys_to_virt phys_to_virt |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 262 | static inline void *phys_to_virt(phys_addr_t x) |
| 263 | { |
| 264 | return (void *)(__phys_to_virt(x)); |
| 265 | } |
| 266 | |
| 267 | /* |
| 268 | * Drivers should NOT use these either. |
| 269 | */ |
| 270 | #define __pa(x) __virt_to_phys((unsigned long)(x)) |
Laura Abbott | ec6d06e | 2017-01-10 13:35:50 -0800 | [diff] [blame] | 271 | #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) |
| 272 | #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 273 | #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) |
| 274 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
Laura Abbott | 869dcfd | 2017-01-10 13:35:48 -0800 | [diff] [blame] | 275 | #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 276 | #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 277 | |
| 278 | /* |
| 279 | * virt_to_page(k) convert a _valid_ virtual address to struct page * |
| 280 | * virt_addr_valid(k) indicates whether a virtual address is valid |
| 281 | */ |
Neil Zhang | 5fd6690 | 2014-10-28 05:44:01 +0000 | [diff] [blame] | 282 | #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 283 | |
Ard Biesheuvel | 9f28759 | 2016-03-30 16:46:01 +0200 | [diff] [blame] | 284 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 285 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
Laura Abbott | ca21945 | 2016-09-21 15:25:04 -0700 | [diff] [blame] | 286 | #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
Ard Biesheuvel | 9f28759 | 2016-03-30 16:46:01 +0200 | [diff] [blame] | 287 | #else |
| 288 | #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) |
Oleksandr Andrushchenko | 1c8a946 | 2017-01-18 09:09:25 +0200 | [diff] [blame] | 289 | #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 290 | |
Ard Biesheuvel | 9f28759 | 2016-03-30 16:46:01 +0200 | [diff] [blame] | 291 | #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET)) |
| 292 | #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) |
| 293 | |
Laura Abbott | ca21945 | 2016-09-21 15:25:04 -0700 | [diff] [blame] | 294 | #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ |
Ard Biesheuvel | 9f28759 | 2016-03-30 16:46:01 +0200 | [diff] [blame] | 295 | + PHYS_OFFSET) >> PAGE_SHIFT) |
| 296 | #endif |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 297 | #endif |
| 298 | |
Laura Abbott | ca21945 | 2016-09-21 15:25:04 -0700 | [diff] [blame] | 299 | #define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET) |
| 300 | #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \ |
| 301 | _virt_addr_valid(kaddr)) |
| 302 | |
Catalin Marinas | 4f04d8f | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 303 | #include <asm-generic/memory_model.h> |
| 304 | |
| 305 | #endif |