Kostya Kortchinsky | 596b8b4 | 2018-04-16 16:32:19 +0000 | [diff] [blame] | 1 | //===-- sanitizer_fuchsia.cc ----------------------------------------------===// |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
Kostya Kortchinsky | 596b8b4 | 2018-04-16 16:32:19 +0000 | [diff] [blame] | 8 | //===----------------------------------------------------------------------===// |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 9 | // |
| 10 | // This file is shared between AddressSanitizer and other sanitizer |
| 11 | // run-time libraries and implements Fuchsia-specific functions from |
| 12 | // sanitizer_common.h. |
Kostya Kortchinsky | 596b8b4 | 2018-04-16 16:32:19 +0000 | [diff] [blame] | 13 | //===----------------------------------------------------------------------===// |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 14 | |
| 15 | #include "sanitizer_fuchsia.h" |
| 16 | #if SANITIZER_FUCHSIA |
| 17 | |
| 18 | #include "sanitizer_common.h" |
| 19 | #include "sanitizer_libc.h" |
| 20 | #include "sanitizer_mutex.h" |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 21 | |
| 22 | #include <limits.h> |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 23 | #include <pthread.h> |
| 24 | #include <stdlib.h> |
| 25 | #include <unistd.h> |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 26 | #include <zircon/errors.h> |
| 27 | #include <zircon/process.h> |
| 28 | #include <zircon/syscalls.h> |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 29 | |
| 30 | namespace __sanitizer { |
| 31 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 32 | void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 33 | |
| 34 | uptr internal_sched_yield() { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 35 | zx_status_t status = _zx_nanosleep(0); |
| 36 | CHECK_EQ(status, ZX_OK); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 37 | return 0; // Why doesn't this return void? |
| 38 | } |
| 39 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 40 | static void internal_nanosleep(zx_time_t ns) { |
| 41 | zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns)); |
| 42 | CHECK_EQ(status, ZX_OK); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | unsigned int internal_sleep(unsigned int seconds) { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 46 | internal_nanosleep(ZX_SEC(seconds)); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 47 | return 0; |
| 48 | } |
| 49 | |
Petr Hosek | 9945c44 | 2018-01-27 23:58:23 +0000 | [diff] [blame] | 50 | u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); } |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 51 | |
Petr Hosek | 9945c44 | 2018-01-27 23:58:23 +0000 | [diff] [blame] | 52 | u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); } |
Kostya Kortchinsky | f50246d | 2017-12-13 16:23:54 +0000 | [diff] [blame] | 53 | |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 54 | uptr internal_getpid() { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 55 | zx_info_handle_basic_t info; |
| 56 | zx_status_t status = |
| 57 | _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 58 | sizeof(info), NULL, NULL); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 59 | CHECK_EQ(status, ZX_OK); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 60 | uptr pid = static_cast<uptr>(info.koid); |
| 61 | CHECK_EQ(pid, info.koid); |
| 62 | return pid; |
| 63 | } |
| 64 | |
| 65 | uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } |
| 66 | |
Petr Hosek | 5e3b41d | 2018-03-06 02:01:32 +0000 | [diff] [blame] | 67 | tid_t GetTid() { return GetThreadSelf(); } |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 68 | |
| 69 | void Abort() { abort(); } |
| 70 | |
| 71 | int Atexit(void (*function)(void)) { return atexit(function); } |
| 72 | |
| 73 | void SleepForSeconds(int seconds) { internal_sleep(seconds); } |
| 74 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 75 | void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); } |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 76 | |
| 77 | void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { |
| 78 | pthread_attr_t attr; |
| 79 | CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); |
| 80 | void *base; |
| 81 | size_t size; |
| 82 | CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0); |
| 83 | CHECK_EQ(pthread_attr_destroy(&attr), 0); |
| 84 | |
| 85 | *stack_bottom = reinterpret_cast<uptr>(base); |
| 86 | *stack_top = *stack_bottom + size; |
| 87 | } |
| 88 | |
| 89 | void MaybeReexec() {} |
Kamil Rytarowski | 7d26077 | 2018-06-05 07:29:23 +0000 | [diff] [blame] | 90 | void CheckASLR() {} |
Kostya Kortchinsky | 2c5f944 | 2018-04-03 18:07:22 +0000 | [diff] [blame] | 91 | void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 92 | void DisableCoreDumperIfNecessary() {} |
| 93 | void InstallDeadlySignalHandlers(SignalHandlerType handler) {} |
| 94 | void SetAlternateSignalStack() {} |
| 95 | void UnsetAlternateSignalStack() {} |
| 96 | void InitTlsSize() {} |
| 97 | |
| 98 | void PrintModuleMap() {} |
| 99 | |
Vitaly Buka | 9a4c73e | 2017-09-14 03:23:02 +0000 | [diff] [blame] | 100 | bool SignalContext::IsStackOverflow() const { return false; } |
Vitaly Buka | 83832fe | 2017-08-09 00:21:45 +0000 | [diff] [blame] | 101 | void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } |
Vitaly Buka | dbde6f3 | 2017-09-13 18:30:16 +0000 | [diff] [blame] | 102 | const char *SignalContext::Describe() const { UNIMPLEMENTED(); } |
Vitaly Buka | 83832fe | 2017-08-09 00:21:45 +0000 | [diff] [blame] | 103 | |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 104 | enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; |
| 105 | |
| 106 | BlockingMutex::BlockingMutex() { |
| 107 | // NOTE! It's important that this use internal_memset, because plain |
| 108 | // memset might be intercepted (e.g., actually be __asan_memset). |
| 109 | // Defining this so the compiler initializes each field, e.g.: |
| 110 | // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {} |
| 111 | // might result in the compiler generating a call to memset, which would |
| 112 | // have the same problem. |
| 113 | internal_memset(this, 0, sizeof(*this)); |
| 114 | } |
| 115 | |
| 116 | void BlockingMutex::Lock() { |
| 117 | CHECK_EQ(owner_, 0); |
| 118 | atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); |
| 119 | if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) |
| 120 | return; |
| 121 | while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 122 | zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), |
Petr Hosek | 118dc29 | 2018-08-27 17:51:52 +0000 | [diff] [blame] | 123 | MtxSleeping, ZX_TIME_INFINITE); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 124 | if (status != ZX_ERR_BAD_STATE) // Normal race. |
| 125 | CHECK_EQ(status, ZX_OK); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 126 | } |
| 127 | } |
| 128 | |
| 129 | void BlockingMutex::Unlock() { |
| 130 | atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); |
| 131 | u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); |
| 132 | CHECK_NE(v, MtxUnlocked); |
| 133 | if (v == MtxSleeping) { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 134 | zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1); |
| 135 | CHECK_EQ(status, ZX_OK); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 136 | } |
| 137 | } |
| 138 | |
| 139 | void BlockingMutex::CheckLocked() { |
| 140 | atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); |
| 141 | CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); |
| 142 | } |
| 143 | |
| 144 | uptr GetPageSize() { return PAGE_SIZE; } |
| 145 | |
| 146 | uptr GetMmapGranularity() { return PAGE_SIZE; } |
| 147 | |
| 148 | sanitizer_shadow_bounds_t ShadowBounds; |
| 149 | |
Evgeniy Stepanov | 0379d3f | 2017-11-07 23:51:22 +0000 | [diff] [blame] | 150 | uptr GetMaxUserVirtualAddress() { |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 151 | ShadowBounds = __sanitizer_shadow_bounds(); |
| 152 | return ShadowBounds.memory_limit - 1; |
| 153 | } |
| 154 | |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 155 | uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } |
Evgeniy Stepanov | 8e7018d | 2017-11-20 17:41:57 +0000 | [diff] [blame] | 156 | |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 157 | static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, |
| 158 | bool raw_report, bool die_for_nomem) { |
| 159 | size = RoundUpTo(size, PAGE_SIZE); |
| 160 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 161 | zx_handle_t vmo; |
| 162 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
| 163 | if (status != ZX_OK) { |
| 164 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
| 165 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 166 | raw_report); |
| 167 | return nullptr; |
| 168 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 169 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 170 | internal_strlen(mem_type)); |
| 171 | |
| 172 | // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? |
| 173 | uintptr_t addr; |
Petr Hosek | e2da642 | 2018-07-24 02:28:54 +0000 | [diff] [blame] | 174 | status = |
Petr Hosek | 6518929 | 2018-08-30 01:27:26 +0000 | [diff] [blame] | 175 | _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, |
| 176 | vmo, 0, size, &addr); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 177 | _zx_handle_close(vmo); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 178 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 179 | if (status != ZX_OK) { |
| 180 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
| 181 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 182 | raw_report); |
| 183 | return nullptr; |
| 184 | } |
| 185 | |
| 186 | IncreaseTotalMmap(size); |
| 187 | |
| 188 | return reinterpret_cast<void *>(addr); |
| 189 | } |
| 190 | |
| 191 | void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { |
| 192 | return DoAnonymousMmapOrDie(size, mem_type, raw_report, true); |
| 193 | } |
| 194 | |
| 195 | void *MmapNoReserveOrDie(uptr size, const char *mem_type) { |
| 196 | return MmapOrDie(size, mem_type); |
| 197 | } |
| 198 | |
| 199 | void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { |
| 200 | return DoAnonymousMmapOrDie(size, mem_type, false, false); |
| 201 | } |
| 202 | |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 203 | uptr ReservedAddressRange::Init(uptr init_size, const char *name, |
Kostya Kortchinsky | 96da9fa | 2017-10-30 17:56:24 +0000 | [diff] [blame] | 204 | uptr fixed_addr) { |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 205 | init_size = RoundUpTo(init_size, PAGE_SIZE); |
| 206 | DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID); |
| 207 | uintptr_t base; |
| 208 | zx_handle_t vmar; |
| 209 | zx_status_t status = |
Petr Hosek | e2da642 | 2018-07-24 02:28:54 +0000 | [diff] [blame] | 210 | _zx_vmar_allocate_old(_zx_vmar_root_self(), 0, init_size, |
| 211 | ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | |
| 212 | ZX_VM_FLAG_CAN_MAP_SPECIFIC, |
| 213 | &vmar, &base); |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 214 | if (status != ZX_OK) |
| 215 | ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status); |
| 216 | base_ = reinterpret_cast<void *>(base); |
Kostya Kortchinsky | 96da9fa | 2017-10-30 17:56:24 +0000 | [diff] [blame] | 217 | size_ = init_size; |
| 218 | name_ = name; |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 219 | os_handle_ = vmar; |
| 220 | |
Kostya Kortchinsky | 96da9fa | 2017-10-30 17:56:24 +0000 | [diff] [blame] | 221 | return reinterpret_cast<uptr>(base_); |
| 222 | } |
| 223 | |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 224 | static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, |
| 225 | void *base, const char *name, bool die_for_nomem) { |
| 226 | uptr offset = fixed_addr - reinterpret_cast<uptr>(base); |
| 227 | map_size = RoundUpTo(map_size, PAGE_SIZE); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 228 | zx_handle_t vmo; |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 229 | zx_status_t status = _zx_vmo_create(map_size, 0, &vmo); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 230 | if (status != ZX_OK) { |
| 231 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 232 | ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status); |
| 233 | return 0; |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 234 | } |
Kostya Kortchinsky | daca5ba | 2018-04-11 18:55:26 +0000 | [diff] [blame] | 235 | _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name)); |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 236 | DCHECK_GE(base + size_, map_size + offset); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 237 | uintptr_t addr; |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 238 | |
Petr Hosek | 6518929 | 2018-08-30 01:27:26 +0000 | [diff] [blame] | 239 | status = |
| 240 | _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC, |
| 241 | offset, vmo, 0, map_size, &addr); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 242 | _zx_handle_close(vmo); |
| 243 | if (status != ZX_OK) { |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 244 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) { |
| 245 | ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status); |
| 246 | } |
| 247 | return 0; |
| 248 | } |
| 249 | IncreaseTotalMmap(map_size); |
| 250 | return addr; |
| 251 | } |
| 252 | |
| 253 | uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) { |
| 254 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
| 255 | name_, false); |
| 256 | } |
| 257 | |
| 258 | uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) { |
| 259 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
| 260 | name_, true); |
| 261 | } |
| 262 | |
| 263 | void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) { |
| 264 | if (!addr || !size) return; |
| 265 | size = RoundUpTo(size, PAGE_SIZE); |
| 266 | |
| 267 | zx_status_t status = |
| 268 | _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size); |
| 269 | if (status != ZX_OK) { |
| 270 | Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", |
| 271 | SanitizerToolName, size, size, addr); |
| 272 | CHECK("unable to unmap" && 0); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 273 | } |
| 274 | |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 275 | DecreaseTotalMmap(size); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 276 | } |
| 277 | |
Kostya Kortchinsky | 46eab8d | 2018-04-19 18:38:15 +0000 | [diff] [blame] | 278 | void ReservedAddressRange::Unmap(uptr addr, uptr size) { |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 279 | CHECK_LE(size, size_); |
Kostya Kortchinsky | 851a7c9 | 2018-09-19 19:50:35 +0000 | [diff] [blame] | 280 | const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_); |
| 281 | if (addr == reinterpret_cast<uptr>(base_)) { |
| 282 | if (size == size_) { |
| 283 | // Destroying the vmar effectively unmaps the whole mapping. |
| 284 | _zx_vmar_destroy(vmar); |
| 285 | _zx_handle_close(vmar); |
| 286 | os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID); |
| 287 | DecreaseTotalMmap(size); |
| 288 | return; |
| 289 | } |
| 290 | } else { |
Kostya Kortchinsky | 46eab8d | 2018-04-19 18:38:15 +0000 | [diff] [blame] | 291 | CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); |
Kostya Kortchinsky | 851a7c9 | 2018-09-19 19:50:35 +0000 | [diff] [blame] | 292 | } |
| 293 | // Partial unmapping does not affect the fact that the initial range is still |
| 294 | // reserved, and the resulting unmapped memory can't be reused. |
| 295 | UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | // This should never be called. |
| 299 | void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { |
| 300 | UNIMPLEMENTED(); |
| 301 | } |
| 302 | |
| 303 | void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, |
| 304 | const char *mem_type) { |
| 305 | CHECK_GE(size, PAGE_SIZE); |
| 306 | CHECK(IsPowerOfTwo(size)); |
| 307 | CHECK(IsPowerOfTwo(alignment)); |
| 308 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 309 | zx_handle_t vmo; |
| 310 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
| 311 | if (status != ZX_OK) { |
| 312 | if (status != ZX_ERR_NO_MEMORY) |
| 313 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 314 | return nullptr; |
| 315 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 316 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 317 | internal_strlen(mem_type)); |
| 318 | |
| 319 | // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? |
| 320 | |
| 321 | // Map a larger size to get a chunk of address space big enough that |
| 322 | // it surely contains an aligned region of the requested size. Then |
| 323 | // overwrite the aligned middle portion with a mapping from the |
| 324 | // beginning of the VMO, and unmap the excess before and after. |
| 325 | size_t map_size = size + alignment; |
| 326 | uintptr_t addr; |
Petr Hosek | e2da642 | 2018-07-24 02:28:54 +0000 | [diff] [blame] | 327 | status = |
Petr Hosek | 6518929 | 2018-08-30 01:27:26 +0000 | [diff] [blame] | 328 | _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, |
| 329 | vmo, 0, map_size, &addr); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 330 | if (status == ZX_OK) { |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 331 | uintptr_t map_addr = addr; |
| 332 | uintptr_t map_end = map_addr + map_size; |
| 333 | addr = RoundUpTo(map_addr, alignment); |
| 334 | uintptr_t end = addr + size; |
| 335 | if (addr != map_addr) { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 336 | zx_info_vmar_t info; |
| 337 | status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info, |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 338 | sizeof(info), NULL, NULL); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 339 | if (status == ZX_OK) { |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 340 | uintptr_t new_addr; |
Petr Hosek | 6518929 | 2018-08-30 01:27:26 +0000 | [diff] [blame] | 341 | status = _zx_vmar_map( |
| 342 | _zx_vmar_root_self(), |
| 343 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE, |
| 344 | addr - info.base, vmo, 0, size, &new_addr); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 345 | if (status == ZX_OK) CHECK_EQ(new_addr, addr); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 346 | } |
| 347 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 348 | if (status == ZX_OK && addr != map_addr) |
| 349 | status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr); |
| 350 | if (status == ZX_OK && end != map_end) |
| 351 | status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 352 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 353 | _zx_handle_close(vmo); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 354 | |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 355 | if (status != ZX_OK) { |
| 356 | if (status != ZX_ERR_NO_MEMORY) |
| 357 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 358 | return nullptr; |
| 359 | } |
| 360 | |
| 361 | IncreaseTotalMmap(size); |
| 362 | |
| 363 | return reinterpret_cast<void *>(addr); |
| 364 | } |
| 365 | |
| 366 | void UnmapOrDie(void *addr, uptr size) { |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 367 | UnmapOrDieVmar(addr, size, _zx_vmar_root_self()); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | // This is used on the shadow mapping, which cannot be changed. |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 371 | // Zircon doesn't have anything like MADV_DONTNEED. |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 372 | void ReleaseMemoryPagesToOS(uptr beg, uptr end) {} |
| 373 | |
| 374 | void DumpProcessMap() { |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 375 | // TODO(mcgrathr): write it |
| 376 | return; |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | bool IsAccessibleMemoryRange(uptr beg, uptr size) { |
| 380 | // TODO(mcgrathr): Figure out a better way. |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 381 | zx_handle_t vmo; |
| 382 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
| 383 | if (status == ZX_OK) { |
Petr Hosek | f4ec686 | 2018-03-22 23:58:37 +0000 | [diff] [blame] | 384 | status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 385 | _zx_handle_close(vmo); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 386 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 387 | return status == ZX_OK; |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 388 | } |
| 389 | |
| 390 | // FIXME implement on this platform. |
| 391 | void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {} |
| 392 | |
| 393 | bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, |
| 394 | uptr *read_len, uptr max_len, error_t *errno_p) { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 395 | zx_handle_t vmo; |
| 396 | zx_status_t status = __sanitizer_get_configuration(file_name, &vmo); |
| 397 | if (status == ZX_OK) { |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 398 | uint64_t vmo_size; |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 399 | status = _zx_vmo_get_size(vmo, &vmo_size); |
| 400 | if (status == ZX_OK) { |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 401 | if (vmo_size < max_len) max_len = vmo_size; |
| 402 | size_t map_size = RoundUpTo(max_len, PAGE_SIZE); |
| 403 | uintptr_t addr; |
Petr Hosek | 6518929 | 2018-08-30 01:27:26 +0000 | [diff] [blame] | 404 | status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, |
| 405 | map_size, &addr); |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 406 | if (status == ZX_OK) { |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 407 | *buff = reinterpret_cast<char *>(addr); |
| 408 | *buff_size = map_size; |
| 409 | *read_len = max_len; |
| 410 | } |
| 411 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 412 | _zx_handle_close(vmo); |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 413 | } |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 414 | if (status != ZX_OK && errno_p) *errno_p = status; |
| 415 | return status == ZX_OK; |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | void RawWrite(const char *buffer) { |
Jake Ehrlich | 767d92e | 2018-05-23 22:27:12 +0000 | [diff] [blame] | 419 | constexpr size_t size = 128; |
| 420 | static _Thread_local char line[size]; |
| 421 | static _Thread_local size_t lastLineEnd = 0; |
| 422 | static _Thread_local size_t cur = 0; |
| 423 | |
| 424 | while (*buffer) { |
| 425 | if (cur >= size) { |
| 426 | if (lastLineEnd == 0) |
| 427 | lastLineEnd = size; |
| 428 | __sanitizer_log_write(line, lastLineEnd); |
| 429 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); |
| 430 | cur = cur - lastLineEnd; |
| 431 | lastLineEnd = 0; |
| 432 | } |
| 433 | if (*buffer == '\n') |
| 434 | lastLineEnd = cur + 1; |
| 435 | line[cur++] = *buffer++; |
| 436 | } |
| 437 | // Flush all complete lines before returning. |
| 438 | if (lastLineEnd != 0) { |
| 439 | __sanitizer_log_write(line, lastLineEnd); |
| 440 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); |
| 441 | cur = cur - lastLineEnd; |
| 442 | lastLineEnd = 0; |
| 443 | } |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 444 | } |
| 445 | |
| 446 | void CatastrophicErrorWrite(const char *buffer, uptr length) { |
| 447 | __sanitizer_log_write(buffer, length); |
| 448 | } |
| 449 | |
| 450 | char **StoredArgv; |
| 451 | char **StoredEnviron; |
| 452 | |
| 453 | char **GetArgv() { return StoredArgv; } |
| 454 | |
| 455 | const char *GetEnv(const char *name) { |
| 456 | if (StoredEnviron) { |
| 457 | uptr NameLen = internal_strlen(name); |
| 458 | for (char **Env = StoredEnviron; *Env != 0; Env++) { |
| 459 | if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') |
| 460 | return (*Env) + NameLen + 1; |
| 461 | } |
| 462 | } |
| 463 | return nullptr; |
| 464 | } |
| 465 | |
| 466 | uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { |
Petr Hosek | 716d994 | 2018-05-02 18:08:47 +0000 | [diff] [blame] | 467 | const char *argv0 = "<UNKNOWN>"; |
| 468 | if (StoredArgv && StoredArgv[0]) { |
| 469 | argv0 = StoredArgv[0]; |
| 470 | } |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 471 | internal_strncpy(buf, argv0, buf_len); |
| 472 | return internal_strlen(buf); |
| 473 | } |
| 474 | |
| 475 | uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { |
| 476 | return ReadBinaryName(buf, buf_len); |
| 477 | } |
| 478 | |
| 479 | uptr MainThreadStackBase, MainThreadStackSize; |
| 480 | |
Kostya Kortchinsky | 913d038 | 2017-08-29 21:52:56 +0000 | [diff] [blame] | 481 | bool GetRandom(void *buffer, uptr length, bool blocking) { |
Petr Hosek | d8328f1 | 2017-09-13 01:18:15 +0000 | [diff] [blame] | 482 | CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN); |
Petr Hosek | 61574fd | 2018-06-27 21:25:21 +0000 | [diff] [blame] | 483 | _zx_cprng_draw(buffer, length); |
Kostya Kortchinsky | 913d038 | 2017-08-29 21:52:56 +0000 | [diff] [blame] | 484 | return true; |
| 485 | } |
| 486 | |
Kostya Kortchinsky | 5a3fdbd | 2017-11-21 21:14:00 +0000 | [diff] [blame] | 487 | u32 GetNumberOfCPUs() { |
| 488 | return zx_system_get_num_cpus(); |
| 489 | } |
| 490 | |
Kostya Kortchinsky | fbff7fc | 2017-11-27 19:53:53 +0000 | [diff] [blame] | 491 | uptr GetRSS() { UNIMPLEMENTED(); } |
| 492 | |
Vitaly Buka | 5d960ec | 2017-08-01 22:22:25 +0000 | [diff] [blame] | 493 | } // namespace __sanitizer |
| 494 | |
| 495 | using namespace __sanitizer; // NOLINT |
| 496 | |
| 497 | extern "C" { |
| 498 | void __sanitizer_startup_hook(int argc, char **argv, char **envp, |
| 499 | void *stack_base, size_t stack_size) { |
| 500 | __sanitizer::StoredArgv = argv; |
| 501 | __sanitizer::StoredEnviron = envp; |
| 502 | __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base); |
| 503 | __sanitizer::MainThreadStackSize = stack_size; |
| 504 | } |
| 505 | |
| 506 | void __sanitizer_set_report_path(const char *path) { |
| 507 | // Handle the initialization code in each sanitizer, but no other calls. |
| 508 | // This setting is never consulted on Fuchsia. |
| 509 | DCHECK_EQ(path, common_flags()->log_path); |
| 510 | } |
| 511 | |
| 512 | void __sanitizer_set_report_fd(void *fd) { |
| 513 | UNREACHABLE("not available on Fuchsia"); |
| 514 | } |
| 515 | } // extern "C" |
| 516 | |
| 517 | #endif // SANITIZER_FUCHSIA |