Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 1 | //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 2 | // |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 7 | // |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines some functions for various memory management utilities. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "Unix.h" |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 15 | #include "llvm/Support/DataTypes.h" |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 16 | #include "llvm/Support/ErrorHandling.h" |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 17 | #include "llvm/Support/Process.h" |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 18 | |
| 19 | #ifdef HAVE_SYS_MMAN_H |
| 20 | #include <sys/mman.h> |
| 21 | #endif |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 22 | |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 23 | #ifdef __APPLE__ |
| 24 | #include <mach/mach.h> |
| 25 | #endif |
| 26 | |
Chandler Carruth | a699b6a | 2012-09-11 01:17:24 +0000 | [diff] [blame] | 27 | #if defined(__mips__) |
| 28 | # if defined(__OpenBSD__) |
| 29 | # include <mips64/sysarch.h> |
| 30 | # else |
| 31 | # include <sys/cachectl.h> |
| 32 | # endif |
| 33 | #endif |
| 34 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 35 | extern "C" void sys_icache_invalidate(const void *Addr, size_t len); |
| 36 | |
| 37 | namespace { |
| 38 | |
| 39 | int getPosixProtectionFlags(unsigned Flags) { |
| 40 | switch (Flags) { |
| 41 | case llvm::sys::Memory::MF_READ: |
| 42 | return PROT_READ; |
| 43 | case llvm::sys::Memory::MF_WRITE: |
| 44 | return PROT_WRITE; |
| 45 | case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: |
| 46 | return PROT_READ | PROT_WRITE; |
| 47 | case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: |
| 48 | return PROT_READ | PROT_EXEC; |
| 49 | case llvm::sys::Memory::MF_READ | |
| 50 | llvm::sys::Memory::MF_WRITE | |
| 51 | llvm::sys::Memory::MF_EXEC: |
| 52 | return PROT_READ | PROT_WRITE | PROT_EXEC; |
| 53 | case llvm::sys::Memory::MF_EXEC: |
| 54 | return PROT_EXEC; |
| 55 | default: |
| 56 | llvm_unreachable("Illegal memory protection flag specified!"); |
| 57 | } |
| 58 | // Provide a default return value as required by some compilers. |
| 59 | return PROT_NONE; |
| 60 | } |
| 61 | |
| 62 | } // namespace |
| 63 | |
| 64 | namespace llvm { |
| 65 | namespace sys { |
| 66 | |
| 67 | MemoryBlock |
| 68 | Memory::allocateMappedMemory(size_t NumBytes, |
| 69 | const MemoryBlock *const NearBlock, |
| 70 | unsigned PFlags, |
| 71 | error_code &EC) { |
| 72 | EC = error_code::success(); |
| 73 | if (NumBytes == 0) |
| 74 | return MemoryBlock(); |
| 75 | |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 76 | static const size_t PageSize = process::get_self()->page_size(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 77 | const size_t NumPages = (NumBytes+PageSize-1)/PageSize; |
| 78 | |
| 79 | int fd = -1; |
| 80 | #ifdef NEED_DEV_ZERO_FOR_MMAP |
| 81 | static int zero_fd = open("/dev/zero", O_RDWR); |
| 82 | if (zero_fd == -1) { |
| 83 | EC = error_code(errno, system_category()); |
| 84 | return MemoryBlock(); |
| 85 | } |
| 86 | fd = zero_fd; |
| 87 | #endif |
| 88 | |
| 89 | int MMFlags = MAP_PRIVATE | |
| 90 | #ifdef HAVE_MMAP_ANONYMOUS |
| 91 | MAP_ANONYMOUS |
| 92 | #else |
| 93 | MAP_ANON |
| 94 | #endif |
| 95 | ; // Ends statement above |
| 96 | |
| 97 | int Protect = getPosixProtectionFlags(PFlags); |
| 98 | |
| 99 | // Use any near hint and the page size to set a page-aligned starting address |
| 100 | uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + |
| 101 | NearBlock->size() : 0; |
| 102 | if (Start && Start % PageSize) |
| 103 | Start += PageSize - Start % PageSize; |
| 104 | |
| 105 | void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, |
| 106 | Protect, MMFlags, fd, 0); |
| 107 | if (Addr == MAP_FAILED) { |
| 108 | if (NearBlock) //Try again without a near hint |
| 109 | return allocateMappedMemory(NumBytes, 0, PFlags, EC); |
| 110 | |
| 111 | EC = error_code(errno, system_category()); |
| 112 | return MemoryBlock(); |
| 113 | } |
| 114 | |
| 115 | MemoryBlock Result; |
| 116 | Result.Address = Addr; |
| 117 | Result.Size = NumPages*PageSize; |
| 118 | |
| 119 | if (PFlags & MF_EXEC) |
| 120 | Memory::InvalidateInstructionCache(Result.Address, Result.Size); |
| 121 | |
| 122 | return Result; |
| 123 | } |
| 124 | |
| 125 | error_code |
| 126 | Memory::releaseMappedMemory(MemoryBlock &M) { |
| 127 | if (M.Address == 0 || M.Size == 0) |
| 128 | return error_code::success(); |
| 129 | |
| 130 | if (0 != ::munmap(M.Address, M.Size)) |
| 131 | return error_code(errno, system_category()); |
| 132 | |
| 133 | M.Address = 0; |
| 134 | M.Size = 0; |
| 135 | |
| 136 | return error_code::success(); |
| 137 | } |
| 138 | |
| 139 | error_code |
| 140 | Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { |
| 141 | if (M.Address == 0 || M.Size == 0) |
| 142 | return error_code::success(); |
| 143 | |
| 144 | if (!Flags) |
| 145 | return error_code(EINVAL, generic_category()); |
| 146 | |
| 147 | int Protect = getPosixProtectionFlags(Flags); |
| 148 | |
| 149 | int Result = ::mprotect(M.Address, M.Size, Protect); |
| 150 | if (Result != 0) |
| 151 | return error_code(errno, system_category()); |
| 152 | |
| 153 | if (Flags & MF_EXEC) |
| 154 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
| 155 | |
| 156 | return error_code::success(); |
| 157 | } |
| 158 | |
Evan Cheng | 7c5dbd9 | 2008-09-16 17:28:18 +0000 | [diff] [blame] | 159 | /// AllocateRWX - Allocate a slab of memory with read/write/execute |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 160 | /// permissions. This is typically used for JIT applications where we want |
| 161 | /// to emit code to the memory then jump to it. Getting this type of memory |
| 162 | /// is very OS specific. |
| 163 | /// |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 164 | MemoryBlock |
| 165 | Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, |
| 166 | std::string *ErrMsg) { |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 167 | if (NumBytes == 0) return MemoryBlock(); |
| 168 | |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 169 | size_t PageSize = process::get_self()->page_size(); |
| 170 | size_t NumPages = (NumBytes+PageSize-1)/PageSize; |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 171 | |
| 172 | int fd = -1; |
| 173 | #ifdef NEED_DEV_ZERO_FOR_MMAP |
| 174 | static int zero_fd = open("/dev/zero", O_RDWR); |
| 175 | if (zero_fd == -1) { |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 176 | MakeErrMsg(ErrMsg, "Can't open /dev/zero device"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 177 | return MemoryBlock(); |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 178 | } |
| 179 | fd = zero_fd; |
| 180 | #endif |
| 181 | |
| 182 | int flags = MAP_PRIVATE | |
| 183 | #ifdef HAVE_MMAP_ANONYMOUS |
| 184 | MAP_ANONYMOUS |
| 185 | #else |
| 186 | MAP_ANON |
| 187 | #endif |
| 188 | ; |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 189 | |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 190 | void* start = NearBlock ? (unsigned char*)NearBlock->base() + |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 191 | NearBlock->size() : 0; |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 192 | |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 193 | #if defined(__APPLE__) && defined(__arm__) |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 194 | void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 195 | flags, fd, 0); |
| 196 | #else |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 197 | void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC, |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 198 | flags, fd, 0); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 199 | #endif |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 200 | if (pa == MAP_FAILED) { |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 201 | if (NearBlock) //Try again without a near hint |
| 202 | return AllocateRWX(NumBytes, 0); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 203 | |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 204 | MakeErrMsg(ErrMsg, "Can't allocate RWX Memory"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 205 | return MemoryBlock(); |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 206 | } |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 207 | |
| 208 | #if defined(__APPLE__) && defined(__arm__) |
| 209 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa, |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 210 | (vm_size_t)(PageSize*NumPages), 0, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 211 | VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 212 | if (KERN_SUCCESS != kr) { |
Daniel Dunbar | 3222b9b | 2009-04-20 20:50:13 +0000 | [diff] [blame] | 213 | MakeErrMsg(ErrMsg, "vm_protect max RX failed"); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 214 | return MemoryBlock(); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | kr = vm_protect(mach_task_self(), (vm_address_t)pa, |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 218 | (vm_size_t)(PageSize*NumPages), 0, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 219 | VM_PROT_READ | VM_PROT_WRITE); |
| 220 | if (KERN_SUCCESS != kr) { |
Daniel Dunbar | 3222b9b | 2009-04-20 20:50:13 +0000 | [diff] [blame] | 221 | MakeErrMsg(ErrMsg, "vm_protect RW failed"); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 222 | return MemoryBlock(); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 223 | } |
| 224 | #endif |
| 225 | |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 226 | MemoryBlock result; |
| 227 | result.Address = pa; |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 228 | result.Size = NumPages*PageSize; |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 229 | |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 230 | return result; |
| 231 | } |
| 232 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 233 | bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 234 | if (M.Address == 0 || M.Size == 0) return false; |
| 235 | if (0 != ::munmap(M.Address, M.Size)) |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 236 | return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 237 | return false; |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 238 | } |
| 239 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 240 | bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 241 | #if defined(__APPLE__) && defined(__arm__) |
| 242 | if (M.Address == 0 || M.Size == 0) return false; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 243 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 244 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, |
| 245 | (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); |
| 246 | return KERN_SUCCESS == kr; |
| 247 | #else |
| 248 | return true; |
| 249 | #endif |
| 250 | } |
| 251 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 252 | bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 253 | #if defined(__APPLE__) && defined(__arm__) |
| 254 | if (M.Address == 0 || M.Size == 0) return false; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 255 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 256 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, |
| 257 | (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 258 | return KERN_SUCCESS == kr; |
| 259 | #else |
Jim Grosbach | 806d507 | 2011-03-18 18:51:03 +0000 | [diff] [blame] | 260 | return true; |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 261 | #endif |
| 262 | } |
| 263 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 264 | bool Memory::setRangeWritable(const void *Addr, size_t Size) { |
Jim Grosbach | 9396051 | 2008-10-20 21:39:23 +0000 | [diff] [blame] | 265 | #if defined(__APPLE__) && defined(__arm__) |
| 266 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, |
| 267 | (vm_size_t)Size, 0, |
| 268 | VM_PROT_READ | VM_PROT_WRITE); |
| 269 | return KERN_SUCCESS == kr; |
| 270 | #else |
| 271 | return true; |
| 272 | #endif |
| 273 | } |
| 274 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 275 | bool Memory::setRangeExecutable(const void *Addr, size_t Size) { |
Jim Grosbach | 9396051 | 2008-10-20 21:39:23 +0000 | [diff] [blame] | 276 | #if defined(__APPLE__) && defined(__arm__) |
| 277 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, |
| 278 | (vm_size_t)Size, 0, |
| 279 | VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 280 | return KERN_SUCCESS == kr; |
| 281 | #else |
| 282 | return true; |
| 283 | #endif |
| 284 | } |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 285 | |
| 286 | /// InvalidateInstructionCache - Before the JIT can run a block of code |
| 287 | /// that has been emitted it must invalidate the instruction cache on some |
| 288 | /// platforms. |
| 289 | void Memory::InvalidateInstructionCache(const void *Addr, |
| 290 | size_t Len) { |
| 291 | |
| 292 | // icache invalidation for PPC and ARM. |
| 293 | #if defined(__APPLE__) |
| 294 | |
| 295 | # if (defined(__POWERPC__) || defined (__ppc__) || \ |
| 296 | defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) |
| 297 | sys_icache_invalidate(const_cast<void *>(Addr), Len); |
| 298 | # endif |
| 299 | |
| 300 | #else |
| 301 | |
| 302 | # if (defined(__POWERPC__) || defined (__ppc__) || \ |
| 303 | defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) |
| 304 | const size_t LineSize = 32; |
| 305 | |
| 306 | const intptr_t Mask = ~(LineSize - 1); |
| 307 | const intptr_t StartLine = ((intptr_t) Addr) & Mask; |
| 308 | const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; |
| 309 | |
| 310 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 311 | asm volatile("dcbf 0, %0" : : "r"(Line)); |
| 312 | asm volatile("sync"); |
| 313 | |
| 314 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 315 | asm volatile("icbi 0, %0" : : "r"(Line)); |
| 316 | asm volatile("isync"); |
| 317 | # elif defined(__arm__) && defined(__GNUC__) |
| 318 | // FIXME: Can we safely always call this for __GNUC__ everywhere? |
| 319 | const char *Start = static_cast<const char *>(Addr); |
| 320 | const char *End = Start + Len; |
| 321 | __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); |
| 322 | # elif defined(__mips__) |
| 323 | const char *Start = static_cast<const char *>(Addr); |
| 324 | cacheflush(const_cast<char *>(Start), Len, BCACHE); |
| 325 | # endif |
| 326 | |
| 327 | #endif // end apple |
| 328 | |
| 329 | ValgrindDiscardTranslations(Addr, Len); |
| 330 | } |
| 331 | |
| 332 | } // namespace sys |
| 333 | } // namespace llvm |