Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 1 | //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 2 | // |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 7 | // |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines some functions for various memory management utilities. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "Unix.h" |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 15 | #include "llvm/Support/DataTypes.h" |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 16 | #include "llvm/Support/ErrorHandling.h" |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 17 | #include "llvm/Support/Process.h" |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 18 | |
| 19 | #ifdef HAVE_SYS_MMAN_H |
| 20 | #include <sys/mman.h> |
| 21 | #endif |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 22 | |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 23 | #ifdef __APPLE__ |
| 24 | #include <mach/mach.h> |
| 25 | #endif |
| 26 | |
Chandler Carruth | a699b6a | 2012-09-11 01:17:24 +0000 | [diff] [blame] | 27 | #if defined(__mips__) |
| 28 | # if defined(__OpenBSD__) |
| 29 | # include <mips64/sysarch.h> |
| 30 | # else |
| 31 | # include <sys/cachectl.h> |
| 32 | # endif |
| 33 | #endif |
| 34 | |
Bob Wilson | 111b0b6 | 2013-05-19 20:33:51 +0000 | [diff] [blame] | 35 | #ifdef __APPLE__ |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 36 | extern "C" void sys_icache_invalidate(const void *Addr, size_t len); |
Bob Wilson | 111b0b6 | 2013-05-19 20:33:51 +0000 | [diff] [blame] | 37 | #else |
Rafael Espindola | e16befb | 2013-05-14 18:06:14 +0000 | [diff] [blame] | 38 | extern "C" void __clear_cache(void *, void*); |
Bob Wilson | 111b0b6 | 2013-05-19 20:33:51 +0000 | [diff] [blame] | 39 | #endif |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 40 | |
| 41 | namespace { |
| 42 | |
| 43 | int getPosixProtectionFlags(unsigned Flags) { |
| 44 | switch (Flags) { |
| 45 | case llvm::sys::Memory::MF_READ: |
| 46 | return PROT_READ; |
| 47 | case llvm::sys::Memory::MF_WRITE: |
| 48 | return PROT_WRITE; |
| 49 | case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: |
| 50 | return PROT_READ | PROT_WRITE; |
| 51 | case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: |
| 52 | return PROT_READ | PROT_EXEC; |
NAKAMURA Takumi | 0a7d0ad | 2015-09-22 11:15:07 +0000 | [diff] [blame] | 53 | case llvm::sys::Memory::MF_READ | llvm::sys::Memory::MF_WRITE | |
| 54 | llvm::sys::Memory::MF_EXEC: |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 55 | return PROT_READ | PROT_WRITE | PROT_EXEC; |
| 56 | case llvm::sys::Memory::MF_EXEC: |
Krzysztof Parzyszek | 798679e | 2013-02-20 18:24:30 +0000 | [diff] [blame] | 57 | #if defined(__FreeBSD__) |
Krzysztof Parzyszek | 12ba711 | 2013-02-20 19:25:09 +0000 | [diff] [blame] | 58 | // On PowerPC, having an executable page that has no read permission |
| 59 | // can have unintended consequences. The function InvalidateInstruction- |
| 60 | // Cache uses instructions dcbf and icbi, both of which are treated by |
| 61 | // the processor as loads. If the page has no read permissions, |
| 62 | // executing these instructions will result in a segmentation fault. |
| 63 | // Somehow, this problem is not present on Linux, but it does happen |
| 64 | // on FreeBSD. |
Krzysztof Parzyszek | 798679e | 2013-02-20 18:24:30 +0000 | [diff] [blame] | 65 | return PROT_READ | PROT_EXEC; |
| 66 | #else |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 67 | return PROT_EXEC; |
Krzysztof Parzyszek | 798679e | 2013-02-20 18:24:30 +0000 | [diff] [blame] | 68 | #endif |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 69 | default: |
| 70 | llvm_unreachable("Illegal memory protection flag specified!"); |
| 71 | } |
| 72 | // Provide a default return value as required by some compilers. |
| 73 | return PROT_NONE; |
| 74 | } |
| 75 | |
| 76 | } // namespace |
| 77 | |
| 78 | namespace llvm { |
| 79 | namespace sys { |
| 80 | |
| 81 | MemoryBlock |
| 82 | Memory::allocateMappedMemory(size_t NumBytes, |
| 83 | const MemoryBlock *const NearBlock, |
| 84 | unsigned PFlags, |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 85 | std::error_code &EC) { |
| 86 | EC = std::error_code(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 87 | if (NumBytes == 0) |
| 88 | return MemoryBlock(); |
| 89 | |
Rafael Espindola | c0610bf | 2014-12-04 16:59:36 +0000 | [diff] [blame] | 90 | static const size_t PageSize = Process::getPageSize(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 91 | const size_t NumPages = (NumBytes+PageSize-1)/PageSize; |
| 92 | |
| 93 | int fd = -1; |
| 94 | #ifdef NEED_DEV_ZERO_FOR_MMAP |
| 95 | static int zero_fd = open("/dev/zero", O_RDWR); |
| 96 | if (zero_fd == -1) { |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 97 | EC = std::error_code(errno, std::generic_category()); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 98 | return MemoryBlock(); |
| 99 | } |
| 100 | fd = zero_fd; |
| 101 | #endif |
| 102 | |
| 103 | int MMFlags = MAP_PRIVATE | |
| 104 | #ifdef HAVE_MMAP_ANONYMOUS |
| 105 | MAP_ANONYMOUS |
| 106 | #else |
| 107 | MAP_ANON |
| 108 | #endif |
| 109 | ; // Ends statement above |
| 110 | |
| 111 | int Protect = getPosixProtectionFlags(PFlags); |
| 112 | |
| 113 | // Use any near hint and the page size to set a page-aligned starting address |
| 114 | uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + |
| 115 | NearBlock->size() : 0; |
| 116 | if (Start && Start % PageSize) |
| 117 | Start += PageSize - Start % PageSize; |
| 118 | |
| 119 | void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, |
| 120 | Protect, MMFlags, fd, 0); |
| 121 | if (Addr == MAP_FAILED) { |
| 122 | if (NearBlock) //Try again without a near hint |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 123 | return allocateMappedMemory(NumBytes, nullptr, PFlags, EC); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 124 | |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 125 | EC = std::error_code(errno, std::generic_category()); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 126 | return MemoryBlock(); |
| 127 | } |
| 128 | |
| 129 | MemoryBlock Result; |
| 130 | Result.Address = Addr; |
| 131 | Result.Size = NumPages*PageSize; |
| 132 | |
| 133 | if (PFlags & MF_EXEC) |
| 134 | Memory::InvalidateInstructionCache(Result.Address, Result.Size); |
| 135 | |
| 136 | return Result; |
| 137 | } |
| 138 | |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 139 | std::error_code |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 140 | Memory::releaseMappedMemory(MemoryBlock &M) { |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 141 | if (M.Address == nullptr || M.Size == 0) |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 142 | return std::error_code(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 143 | |
| 144 | if (0 != ::munmap(M.Address, M.Size)) |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 145 | return std::error_code(errno, std::generic_category()); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 146 | |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 147 | M.Address = nullptr; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 148 | M.Size = 0; |
| 149 | |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 150 | return std::error_code(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 151 | } |
| 152 | |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 153 | std::error_code |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 154 | Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 155 | if (M.Address == nullptr || M.Size == 0) |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 156 | return std::error_code(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 157 | |
| 158 | if (!Flags) |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 159 | return std::error_code(EINVAL, std::generic_category()); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 160 | |
| 161 | int Protect = getPosixProtectionFlags(Flags); |
| 162 | |
| 163 | int Result = ::mprotect(M.Address, M.Size, Protect); |
| 164 | if (Result != 0) |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 165 | return std::error_code(errno, std::generic_category()); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 166 | |
| 167 | if (Flags & MF_EXEC) |
| 168 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
| 169 | |
Rafael Espindola | db4ed0b | 2014-06-13 02:24:39 +0000 | [diff] [blame] | 170 | return std::error_code(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 171 | } |
| 172 | |
Evan Cheng | 7c5dbd9 | 2008-09-16 17:28:18 +0000 | [diff] [blame] | 173 | /// AllocateRWX - Allocate a slab of memory with read/write/execute |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 174 | /// permissions. This is typically used for JIT applications where we want |
| 175 | /// to emit code to the memory then jump to it. Getting this type of memory |
| 176 | /// is very OS specific. |
| 177 | /// |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 178 | MemoryBlock |
| 179 | Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, |
| 180 | std::string *ErrMsg) { |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 181 | if (NumBytes == 0) return MemoryBlock(); |
| 182 | |
Rafael Espindola | c0610bf | 2014-12-04 16:59:36 +0000 | [diff] [blame] | 183 | size_t PageSize = Process::getPageSize(); |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 184 | size_t NumPages = (NumBytes+PageSize-1)/PageSize; |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 185 | |
| 186 | int fd = -1; |
| 187 | #ifdef NEED_DEV_ZERO_FOR_MMAP |
| 188 | static int zero_fd = open("/dev/zero", O_RDWR); |
| 189 | if (zero_fd == -1) { |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 190 | MakeErrMsg(ErrMsg, "Can't open /dev/zero device"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 191 | return MemoryBlock(); |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 192 | } |
| 193 | fd = zero_fd; |
| 194 | #endif |
| 195 | |
| 196 | int flags = MAP_PRIVATE | |
| 197 | #ifdef HAVE_MMAP_ANONYMOUS |
| 198 | MAP_ANONYMOUS |
| 199 | #else |
| 200 | MAP_ANON |
| 201 | #endif |
| 202 | ; |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 203 | |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 204 | void* start = NearBlock ? (unsigned char*)NearBlock->base() + |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 205 | NearBlock->size() : nullptr; |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 206 | |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 207 | #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 208 | void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 209 | flags, fd, 0); |
| 210 | #else |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 211 | void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC, |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 212 | flags, fd, 0); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 213 | #endif |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 214 | if (pa == MAP_FAILED) { |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 215 | if (NearBlock) //Try again without a near hint |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 216 | return AllocateRWX(NumBytes, nullptr); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 217 | |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 218 | MakeErrMsg(ErrMsg, "Can't allocate RWX Memory"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 219 | return MemoryBlock(); |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 220 | } |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 221 | |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 222 | #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 223 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa, |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 224 | (vm_size_t)(PageSize*NumPages), 0, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 225 | VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 226 | if (KERN_SUCCESS != kr) { |
Daniel Dunbar | 3222b9b | 2009-04-20 20:50:13 +0000 | [diff] [blame] | 227 | MakeErrMsg(ErrMsg, "vm_protect max RX failed"); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 228 | return MemoryBlock(); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | kr = vm_protect(mach_task_self(), (vm_address_t)pa, |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 232 | (vm_size_t)(PageSize*NumPages), 0, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 233 | VM_PROT_READ | VM_PROT_WRITE); |
| 234 | if (KERN_SUCCESS != kr) { |
Daniel Dunbar | 3222b9b | 2009-04-20 20:50:13 +0000 | [diff] [blame] | 235 | MakeErrMsg(ErrMsg, "vm_protect RW failed"); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 236 | return MemoryBlock(); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 237 | } |
| 238 | #endif |
| 239 | |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 240 | MemoryBlock result; |
| 241 | result.Address = pa; |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 242 | result.Size = NumPages*PageSize; |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 243 | |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 244 | return result; |
| 245 | } |
| 246 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 247 | bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { |
Craig Topper | e73658d | 2014-04-28 04:05:08 +0000 | [diff] [blame] | 248 | if (M.Address == nullptr || M.Size == 0) return false; |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 249 | if (0 != ::munmap(M.Address, M.Size)) |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 250 | return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 251 | return false; |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 252 | } |
| 253 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 254 | bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 255 | #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 256 | if (M.Address == 0 || M.Size == 0) return false; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 257 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 258 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, |
| 259 | (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); |
| 260 | return KERN_SUCCESS == kr; |
| 261 | #else |
| 262 | return true; |
| 263 | #endif |
| 264 | } |
| 265 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 266 | bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 267 | if (M.Address == 0 || M.Size == 0) return false; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 268 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
Davide Italiano | 7f9f835 | 2015-11-17 16:34:28 +0000 | [diff] [blame^] | 269 | #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 270 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, |
| 271 | (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 272 | return KERN_SUCCESS == kr; |
| 273 | #else |
Jim Grosbach | 806d507 | 2011-03-18 18:51:03 +0000 | [diff] [blame] | 274 | return true; |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 275 | #endif |
| 276 | } |
| 277 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 278 | bool Memory::setRangeWritable(const void *Addr, size_t Size) { |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 279 | #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) |
Jim Grosbach | 9396051 | 2008-10-20 21:39:23 +0000 | [diff] [blame] | 280 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, |
| 281 | (vm_size_t)Size, 0, |
| 282 | VM_PROT_READ | VM_PROT_WRITE); |
| 283 | return KERN_SUCCESS == kr; |
| 284 | #else |
| 285 | return true; |
| 286 | #endif |
| 287 | } |
| 288 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 289 | bool Memory::setRangeExecutable(const void *Addr, size_t Size) { |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 290 | #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__)) |
Jim Grosbach | 9396051 | 2008-10-20 21:39:23 +0000 | [diff] [blame] | 291 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, |
| 292 | (vm_size_t)Size, 0, |
| 293 | VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 294 | return KERN_SUCCESS == kr; |
| 295 | #else |
| 296 | return true; |
| 297 | #endif |
| 298 | } |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 299 | |
| 300 | /// InvalidateInstructionCache - Before the JIT can run a block of code |
| 301 | /// that has been emitted it must invalidate the instruction cache on some |
| 302 | /// platforms. |
| 303 | void Memory::InvalidateInstructionCache(const void *Addr, |
| 304 | size_t Len) { |
| 305 | |
| 306 | // icache invalidation for PPC and ARM. |
| 307 | #if defined(__APPLE__) |
| 308 | |
Rafael Espindola | 05b5a46 | 2013-07-26 22:13:57 +0000 | [diff] [blame] | 309 | # if (defined(__POWERPC__) || defined (__ppc__) || \ |
Tim Northover | 00ed996 | 2014-03-29 10:18:08 +0000 | [diff] [blame] | 310 | defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \ |
| 311 | defined(__arm64__)) |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 312 | sys_icache_invalidate(const_cast<void *>(Addr), Len); |
| 313 | # endif |
| 314 | |
| 315 | #else |
| 316 | |
Rafael Espindola | 05b5a46 | 2013-07-26 22:13:57 +0000 | [diff] [blame] | 317 | # if (defined(__POWERPC__) || defined (__ppc__) || \ |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 318 | defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) |
| 319 | const size_t LineSize = 32; |
| 320 | |
| 321 | const intptr_t Mask = ~(LineSize - 1); |
| 322 | const intptr_t StartLine = ((intptr_t) Addr) & Mask; |
| 323 | const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; |
| 324 | |
| 325 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 326 | asm volatile("dcbf 0, %0" : : "r"(Line)); |
| 327 | asm volatile("sync"); |
| 328 | |
| 329 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 330 | asm volatile("icbi 0, %0" : : "r"(Line)); |
| 331 | asm volatile("isync"); |
Petar Jovanovic | 4a11849 | 2015-01-27 23:30:18 +0000 | [diff] [blame] | 332 | # elif (defined(__arm__) || defined(__aarch64__) || defined(__mips__)) && \ |
| 333 | defined(__GNUC__) |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 334 | // FIXME: Can we safely always call this for __GNUC__ everywhere? |
| 335 | const char *Start = static_cast<const char *>(Addr); |
| 336 | const char *End = Start + Len; |
| 337 | __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 338 | # endif |
| 339 | |
| 340 | #endif // end apple |
| 341 | |
| 342 | ValgrindDiscardTranslations(Addr, Len); |
| 343 | } |
| 344 | |
| 345 | } // namespace sys |
| 346 | } // namespace llvm |