Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 1 | //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===// |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 2 | // |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 7 | // |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines some functions for various memory management utilities. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "Unix.h" |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 15 | #include "llvm/Support/DataTypes.h" |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 16 | #include "llvm/Support/ErrorHandling.h" |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 17 | #include "llvm/Support/Process.h" |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 18 | |
| 19 | #ifdef HAVE_SYS_MMAN_H |
| 20 | #include <sys/mman.h> |
| 21 | #endif |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 22 | |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 23 | #ifdef __APPLE__ |
| 24 | #include <mach/mach.h> |
| 25 | #endif |
| 26 | |
Chandler Carruth | a699b6a | 2012-09-11 01:17:24 +0000 | [diff] [blame] | 27 | #if defined(__mips__) |
| 28 | # if defined(__OpenBSD__) |
| 29 | # include <mips64/sysarch.h> |
| 30 | # else |
| 31 | # include <sys/cachectl.h> |
| 32 | # endif |
| 33 | #endif |
| 34 | |
Bob Wilson | 111b0b6 | 2013-05-19 20:33:51 +0000 | [diff] [blame] | 35 | #ifdef __APPLE__ |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 36 | extern "C" void sys_icache_invalidate(const void *Addr, size_t len); |
Bob Wilson | 111b0b6 | 2013-05-19 20:33:51 +0000 | [diff] [blame] | 37 | #else |
Rafael Espindola | e16befb | 2013-05-14 18:06:14 +0000 | [diff] [blame] | 38 | extern "C" void __clear_cache(void *, void*); |
Bob Wilson | 111b0b6 | 2013-05-19 20:33:51 +0000 | [diff] [blame] | 39 | #endif |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 40 | |
| 41 | namespace { |
| 42 | |
| 43 | int getPosixProtectionFlags(unsigned Flags) { |
| 44 | switch (Flags) { |
| 45 | case llvm::sys::Memory::MF_READ: |
| 46 | return PROT_READ; |
| 47 | case llvm::sys::Memory::MF_WRITE: |
| 48 | return PROT_WRITE; |
| 49 | case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: |
| 50 | return PROT_READ | PROT_WRITE; |
| 51 | case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: |
| 52 | return PROT_READ | PROT_EXEC; |
| 53 | case llvm::sys::Memory::MF_READ | |
| 54 | llvm::sys::Memory::MF_WRITE | |
| 55 | llvm::sys::Memory::MF_EXEC: |
| 56 | return PROT_READ | PROT_WRITE | PROT_EXEC; |
| 57 | case llvm::sys::Memory::MF_EXEC: |
Krzysztof Parzyszek | 798679e | 2013-02-20 18:24:30 +0000 | [diff] [blame] | 58 | #if defined(__FreeBSD__) |
Krzysztof Parzyszek | 12ba711 | 2013-02-20 19:25:09 +0000 | [diff] [blame] | 59 | // On PowerPC, having an executable page that has no read permission |
| 60 | // can have unintended consequences. The function InvalidateInstruction- |
| 61 | // Cache uses instructions dcbf and icbi, both of which are treated by |
| 62 | // the processor as loads. If the page has no read permissions, |
| 63 | // executing these instructions will result in a segmentation fault. |
| 64 | // Somehow, this problem is not present on Linux, but it does happen |
| 65 | // on FreeBSD. |
Krzysztof Parzyszek | 798679e | 2013-02-20 18:24:30 +0000 | [diff] [blame] | 66 | return PROT_READ | PROT_EXEC; |
| 67 | #else |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 68 | return PROT_EXEC; |
Krzysztof Parzyszek | 798679e | 2013-02-20 18:24:30 +0000 | [diff] [blame] | 69 | #endif |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 70 | default: |
| 71 | llvm_unreachable("Illegal memory protection flag specified!"); |
| 72 | } |
| 73 | // Provide a default return value as required by some compilers. |
| 74 | return PROT_NONE; |
| 75 | } |
| 76 | |
| 77 | } // namespace |
| 78 | |
| 79 | namespace llvm { |
| 80 | namespace sys { |
| 81 | |
| 82 | MemoryBlock |
| 83 | Memory::allocateMappedMemory(size_t NumBytes, |
| 84 | const MemoryBlock *const NearBlock, |
| 85 | unsigned PFlags, |
| 86 | error_code &EC) { |
| 87 | EC = error_code::success(); |
| 88 | if (NumBytes == 0) |
| 89 | return MemoryBlock(); |
| 90 | |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 91 | static const size_t PageSize = process::get_self()->page_size(); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 92 | const size_t NumPages = (NumBytes+PageSize-1)/PageSize; |
| 93 | |
| 94 | int fd = -1; |
| 95 | #ifdef NEED_DEV_ZERO_FOR_MMAP |
| 96 | static int zero_fd = open("/dev/zero", O_RDWR); |
| 97 | if (zero_fd == -1) { |
| 98 | EC = error_code(errno, system_category()); |
| 99 | return MemoryBlock(); |
| 100 | } |
| 101 | fd = zero_fd; |
| 102 | #endif |
| 103 | |
| 104 | int MMFlags = MAP_PRIVATE | |
| 105 | #ifdef HAVE_MMAP_ANONYMOUS |
| 106 | MAP_ANONYMOUS |
| 107 | #else |
| 108 | MAP_ANON |
| 109 | #endif |
| 110 | ; // Ends statement above |
| 111 | |
| 112 | int Protect = getPosixProtectionFlags(PFlags); |
| 113 | |
| 114 | // Use any near hint and the page size to set a page-aligned starting address |
| 115 | uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + |
| 116 | NearBlock->size() : 0; |
| 117 | if (Start && Start % PageSize) |
| 118 | Start += PageSize - Start % PageSize; |
| 119 | |
| 120 | void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, |
| 121 | Protect, MMFlags, fd, 0); |
| 122 | if (Addr == MAP_FAILED) { |
| 123 | if (NearBlock) //Try again without a near hint |
| 124 | return allocateMappedMemory(NumBytes, 0, PFlags, EC); |
| 125 | |
| 126 | EC = error_code(errno, system_category()); |
| 127 | return MemoryBlock(); |
| 128 | } |
| 129 | |
| 130 | MemoryBlock Result; |
| 131 | Result.Address = Addr; |
| 132 | Result.Size = NumPages*PageSize; |
| 133 | |
| 134 | if (PFlags & MF_EXEC) |
| 135 | Memory::InvalidateInstructionCache(Result.Address, Result.Size); |
| 136 | |
| 137 | return Result; |
| 138 | } |
| 139 | |
| 140 | error_code |
| 141 | Memory::releaseMappedMemory(MemoryBlock &M) { |
| 142 | if (M.Address == 0 || M.Size == 0) |
| 143 | return error_code::success(); |
| 144 | |
| 145 | if (0 != ::munmap(M.Address, M.Size)) |
| 146 | return error_code(errno, system_category()); |
| 147 | |
| 148 | M.Address = 0; |
| 149 | M.Size = 0; |
| 150 | |
| 151 | return error_code::success(); |
| 152 | } |
| 153 | |
| 154 | error_code |
| 155 | Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { |
| 156 | if (M.Address == 0 || M.Size == 0) |
| 157 | return error_code::success(); |
| 158 | |
| 159 | if (!Flags) |
| 160 | return error_code(EINVAL, generic_category()); |
| 161 | |
| 162 | int Protect = getPosixProtectionFlags(Flags); |
| 163 | |
| 164 | int Result = ::mprotect(M.Address, M.Size, Protect); |
| 165 | if (Result != 0) |
| 166 | return error_code(errno, system_category()); |
| 167 | |
| 168 | if (Flags & MF_EXEC) |
| 169 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
| 170 | |
| 171 | return error_code::success(); |
| 172 | } |
| 173 | |
Evan Cheng | 7c5dbd9 | 2008-09-16 17:28:18 +0000 | [diff] [blame] | 174 | /// AllocateRWX - Allocate a slab of memory with read/write/execute |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 175 | /// permissions. This is typically used for JIT applications where we want |
| 176 | /// to emit code to the memory then jump to it. Getting this type of memory |
| 177 | /// is very OS specific. |
| 178 | /// |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 179 | MemoryBlock |
| 180 | Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, |
| 181 | std::string *ErrMsg) { |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 182 | if (NumBytes == 0) return MemoryBlock(); |
| 183 | |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 184 | size_t PageSize = process::get_self()->page_size(); |
| 185 | size_t NumPages = (NumBytes+PageSize-1)/PageSize; |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 186 | |
| 187 | int fd = -1; |
| 188 | #ifdef NEED_DEV_ZERO_FOR_MMAP |
| 189 | static int zero_fd = open("/dev/zero", O_RDWR); |
| 190 | if (zero_fd == -1) { |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 191 | MakeErrMsg(ErrMsg, "Can't open /dev/zero device"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 192 | return MemoryBlock(); |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 193 | } |
| 194 | fd = zero_fd; |
| 195 | #endif |
| 196 | |
| 197 | int flags = MAP_PRIVATE | |
| 198 | #ifdef HAVE_MMAP_ANONYMOUS |
| 199 | MAP_ANONYMOUS |
| 200 | #else |
| 201 | MAP_ANON |
| 202 | #endif |
| 203 | ; |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 204 | |
Michael J. Spencer | 447762d | 2010-11-29 18:16:10 +0000 | [diff] [blame] | 205 | void* start = NearBlock ? (unsigned char*)NearBlock->base() + |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 206 | NearBlock->size() : 0; |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 207 | |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 208 | #if defined(__APPLE__) && defined(__arm__) |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 209 | void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 210 | flags, fd, 0); |
| 211 | #else |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 212 | void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC, |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 213 | flags, fd, 0); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 214 | #endif |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 215 | if (pa == MAP_FAILED) { |
Andrew Lenharth | 0940218 | 2005-07-29 23:40:16 +0000 | [diff] [blame] | 216 | if (NearBlock) //Try again without a near hint |
| 217 | return AllocateRWX(NumBytes, 0); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 218 | |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 219 | MakeErrMsg(ErrMsg, "Can't allocate RWX Memory"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 220 | return MemoryBlock(); |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 221 | } |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 222 | |
| 223 | #if defined(__APPLE__) && defined(__arm__) |
| 224 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa, |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 225 | (vm_size_t)(PageSize*NumPages), 0, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 226 | VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 227 | if (KERN_SUCCESS != kr) { |
Daniel Dunbar | 3222b9b | 2009-04-20 20:50:13 +0000 | [diff] [blame] | 228 | MakeErrMsg(ErrMsg, "vm_protect max RX failed"); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 229 | return MemoryBlock(); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 230 | } |
| 231 | |
| 232 | kr = vm_protect(mach_task_self(), (vm_address_t)pa, |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 233 | (vm_size_t)(PageSize*NumPages), 0, |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 234 | VM_PROT_READ | VM_PROT_WRITE); |
| 235 | if (KERN_SUCCESS != kr) { |
Daniel Dunbar | 3222b9b | 2009-04-20 20:50:13 +0000 | [diff] [blame] | 236 | MakeErrMsg(ErrMsg, "vm_protect RW failed"); |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 237 | return MemoryBlock(); |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 238 | } |
| 239 | #endif |
| 240 | |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 241 | MemoryBlock result; |
| 242 | result.Address = pa; |
Chandler Carruth | acd64be | 2012-12-31 23:31:56 +0000 | [diff] [blame] | 243 | result.Size = NumPages*PageSize; |
Evan Cheng | 5cc53c3 | 2008-09-18 07:54:21 +0000 | [diff] [blame] | 244 | |
Reid Spencer | 47cd653 | 2004-12-27 06:17:03 +0000 | [diff] [blame] | 245 | return result; |
| 246 | } |
| 247 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 248 | bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 249 | if (M.Address == 0 || M.Size == 0) return false; |
| 250 | if (0 != ::munmap(M.Address, M.Size)) |
Reid Spencer | e4ca722 | 2006-08-23 20:34:57 +0000 | [diff] [blame] | 251 | return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); |
Chris Lattner | 5a9d2e5 | 2006-07-07 17:32:37 +0000 | [diff] [blame] | 252 | return false; |
Reid Spencer | 2896c95 | 2004-09-11 04:57:25 +0000 | [diff] [blame] | 253 | } |
| 254 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 255 | bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 256 | #if defined(__APPLE__) && defined(__arm__) |
| 257 | if (M.Address == 0 || M.Size == 0) return false; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 258 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 259 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, |
| 260 | (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); |
| 261 | return KERN_SUCCESS == kr; |
| 262 | #else |
| 263 | return true; |
| 264 | #endif |
| 265 | } |
| 266 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 267 | bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 268 | #if defined(__APPLE__) && defined(__arm__) |
| 269 | if (M.Address == 0 || M.Size == 0) return false; |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 270 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 271 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, |
| 272 | (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 273 | return KERN_SUCCESS == kr; |
Tim Northover | 77d0a4a | 2013-05-19 15:28:16 +0000 | [diff] [blame] | 274 | #elif defined(__arm__) || defined(__aarch64__) |
| 275 | Memory::InvalidateInstructionCache(M.Address, M.Size); |
| 276 | return true; |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 277 | #else |
Jim Grosbach | 806d507 | 2011-03-18 18:51:03 +0000 | [diff] [blame] | 278 | return true; |
Jim Grosbach | b22ef71 | 2008-10-03 16:17:20 +0000 | [diff] [blame] | 279 | #endif |
| 280 | } |
| 281 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 282 | bool Memory::setRangeWritable(const void *Addr, size_t Size) { |
Jim Grosbach | 9396051 | 2008-10-20 21:39:23 +0000 | [diff] [blame] | 283 | #if defined(__APPLE__) && defined(__arm__) |
| 284 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, |
| 285 | (vm_size_t)Size, 0, |
| 286 | VM_PROT_READ | VM_PROT_WRITE); |
| 287 | return KERN_SUCCESS == kr; |
| 288 | #else |
| 289 | return true; |
| 290 | #endif |
| 291 | } |
| 292 | |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 293 | bool Memory::setRangeExecutable(const void *Addr, size_t Size) { |
Jim Grosbach | 9396051 | 2008-10-20 21:39:23 +0000 | [diff] [blame] | 294 | #if defined(__APPLE__) && defined(__arm__) |
| 295 | kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, |
| 296 | (vm_size_t)Size, 0, |
| 297 | VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); |
| 298 | return KERN_SUCCESS == kr; |
| 299 | #else |
| 300 | return true; |
| 301 | #endif |
| 302 | } |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 303 | |
| 304 | /// InvalidateInstructionCache - Before the JIT can run a block of code |
| 305 | /// that has been emitted it must invalidate the instruction cache on some |
| 306 | /// platforms. |
| 307 | void Memory::InvalidateInstructionCache(const void *Addr, |
| 308 | size_t Len) { |
| 309 | |
| 310 | // icache invalidation for PPC and ARM. |
| 311 | #if defined(__APPLE__) |
| 312 | |
Rafael Espindola | 05b5a46 | 2013-07-26 22:13:57 +0000 | [diff] [blame] | 313 | # if (defined(__POWERPC__) || defined (__ppc__) || \ |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 314 | defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) |
| 315 | sys_icache_invalidate(const_cast<void *>(Addr), Len); |
| 316 | # endif |
| 317 | |
| 318 | #else |
| 319 | |
Rafael Espindola | 05b5a46 | 2013-07-26 22:13:57 +0000 | [diff] [blame] | 320 | # if (defined(__POWERPC__) || defined (__ppc__) || \ |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 321 | defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) |
| 322 | const size_t LineSize = 32; |
| 323 | |
| 324 | const intptr_t Mask = ~(LineSize - 1); |
| 325 | const intptr_t StartLine = ((intptr_t) Addr) & Mask; |
| 326 | const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; |
| 327 | |
| 328 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 329 | asm volatile("dcbf 0, %0" : : "r"(Line)); |
| 330 | asm volatile("sync"); |
| 331 | |
| 332 | for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) |
| 333 | asm volatile("icbi 0, %0" : : "r"(Line)); |
| 334 | asm volatile("isync"); |
Tim Northover | 6c26b32 | 2013-05-04 18:52:44 +0000 | [diff] [blame] | 335 | # elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__) |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 336 | // FIXME: Can we safely always call this for __GNUC__ everywhere? |
| 337 | const char *Start = static_cast<const char *>(Addr); |
| 338 | const char *End = Start + Len; |
| 339 | __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); |
| 340 | # elif defined(__mips__) |
| 341 | const char *Start = static_cast<const char *>(Addr); |
Akira Hatanaka | 7239a60 | 2013-03-14 19:01:00 +0000 | [diff] [blame] | 342 | # if defined(ANDROID) |
| 343 | // The declaration of "cacheflush" in Android bionic: |
| 344 | // extern int cacheflush(long start, long end, long flags); |
| 345 | const char *End = Start + Len; |
| 346 | long LStart = reinterpret_cast<long>(const_cast<char *>(Start)); |
| 347 | long LEnd = reinterpret_cast<long>(const_cast<char *>(End)); |
| 348 | cacheflush(LStart, LEnd, BCACHE); |
| 349 | # else |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 350 | cacheflush(const_cast<char *>(Start), Len, BCACHE); |
Akira Hatanaka | 7239a60 | 2013-03-14 19:01:00 +0000 | [diff] [blame] | 351 | # endif |
Andrew Kaylor | 1f66100 | 2012-09-19 20:46:12 +0000 | [diff] [blame] | 352 | # endif |
| 353 | |
| 354 | #endif // end apple |
| 355 | |
| 356 | ValgrindDiscardTranslations(Addr, Len); |
| 357 | } |
| 358 | |
| 359 | } // namespace sys |
| 360 | } // namespace llvm |