blob: 9c54e1ed3eb42c3c8fcf37424c63a83227f8fbd6 [file] [log] [blame]
Kostya Kortchinsky596b8b42018-04-16 16:32:19 +00001//===-- sanitizer_fuchsia.cc ----------------------------------------------===//
Vitaly Buka5d960ec2017-08-01 22:22:25 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Kostya Kortchinsky596b8b42018-04-16 16:32:19 +00008//===----------------------------------------------------------------------===//
Vitaly Buka5d960ec2017-08-01 22:22:25 +00009//
10// This file is shared between AddressSanitizer and other sanitizer
11// run-time libraries and implements Fuchsia-specific functions from
12// sanitizer_common.h.
Kostya Kortchinsky596b8b42018-04-16 16:32:19 +000013//===----------------------------------------------------------------------===//
Vitaly Buka5d960ec2017-08-01 22:22:25 +000014
15#include "sanitizer_fuchsia.h"
16#if SANITIZER_FUCHSIA
17
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_mutex.h"
Vitaly Buka5d960ec2017-08-01 22:22:25 +000021
22#include <limits.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000023#include <pthread.h>
24#include <stdlib.h>
25#include <unistd.h>
Petr Hosekd8328f12017-09-13 01:18:15 +000026#include <zircon/errors.h>
27#include <zircon/process.h>
28#include <zircon/syscalls.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000029
30namespace __sanitizer {
31
Petr Hosekd8328f12017-09-13 01:18:15 +000032void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000033
34uptr internal_sched_yield() {
Petr Hosekd8328f12017-09-13 01:18:15 +000035 zx_status_t status = _zx_nanosleep(0);
36 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000037 return 0; // Why doesn't this return void?
38}
39
Petr Hosekd8328f12017-09-13 01:18:15 +000040static void internal_nanosleep(zx_time_t ns) {
41 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
42 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000043}
44
45unsigned int internal_sleep(unsigned int seconds) {
Petr Hosekd8328f12017-09-13 01:18:15 +000046 internal_nanosleep(ZX_SEC(seconds));
Vitaly Buka5d960ec2017-08-01 22:22:25 +000047 return 0;
48}
49
Petr Hosek9945c442018-01-27 23:58:23 +000050u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000051
Petr Hosek9945c442018-01-27 23:58:23 +000052u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
Kostya Kortchinskyf50246d2017-12-13 16:23:54 +000053
Vitaly Buka5d960ec2017-08-01 22:22:25 +000054uptr internal_getpid() {
Petr Hosekd8328f12017-09-13 01:18:15 +000055 zx_info_handle_basic_t info;
56 zx_status_t status =
57 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +000058 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +000059 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000060 uptr pid = static_cast<uptr>(info.koid);
61 CHECK_EQ(pid, info.koid);
62 return pid;
63}
64
65uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
66
Petr Hosek5e3b41d2018-03-06 02:01:32 +000067tid_t GetTid() { return GetThreadSelf(); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000068
69void Abort() { abort(); }
70
71int Atexit(void (*function)(void)) { return atexit(function); }
72
73void SleepForSeconds(int seconds) { internal_sleep(seconds); }
74
Petr Hosekd8328f12017-09-13 01:18:15 +000075void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000076
77void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
78 pthread_attr_t attr;
79 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
80 void *base;
81 size_t size;
82 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
83 CHECK_EQ(pthread_attr_destroy(&attr), 0);
84
85 *stack_bottom = reinterpret_cast<uptr>(base);
86 *stack_top = *stack_bottom + size;
87}
88
89void MaybeReexec() {}
Kamil Rytarowski7d260772018-06-05 07:29:23 +000090void CheckASLR() {}
Kostya Kortchinsky2c5f9442018-04-03 18:07:22 +000091void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
Vitaly Buka5d960ec2017-08-01 22:22:25 +000092void DisableCoreDumperIfNecessary() {}
93void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
94void SetAlternateSignalStack() {}
95void UnsetAlternateSignalStack() {}
96void InitTlsSize() {}
97
98void PrintModuleMap() {}
99
Vitaly Buka9a4c73e2017-09-14 03:23:02 +0000100bool SignalContext::IsStackOverflow() const { return false; }
Vitaly Buka83832fe2017-08-09 00:21:45 +0000101void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
Vitaly Bukadbde6f32017-09-13 18:30:16 +0000102const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
Vitaly Buka83832fe2017-08-09 00:21:45 +0000103
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000104enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
105
106BlockingMutex::BlockingMutex() {
107 // NOTE! It's important that this use internal_memset, because plain
108 // memset might be intercepted (e.g., actually be __asan_memset).
109 // Defining this so the compiler initializes each field, e.g.:
110 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
111 // might result in the compiler generating a call to memset, which would
112 // have the same problem.
113 internal_memset(this, 0, sizeof(*this));
114}
115
116void BlockingMutex::Lock() {
117 CHECK_EQ(owner_, 0);
118 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
119 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
120 return;
121 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000122 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
Petr Hosek118dc292018-08-27 17:51:52 +0000123 MtxSleeping, ZX_TIME_INFINITE);
Petr Hosekd8328f12017-09-13 01:18:15 +0000124 if (status != ZX_ERR_BAD_STATE) // Normal race.
125 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000126 }
127}
128
129void BlockingMutex::Unlock() {
130 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
131 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
132 CHECK_NE(v, MtxUnlocked);
133 if (v == MtxSleeping) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000134 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
135 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000136 }
137}
138
139void BlockingMutex::CheckLocked() {
140 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
141 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
142}
143
144uptr GetPageSize() { return PAGE_SIZE; }
145
146uptr GetMmapGranularity() { return PAGE_SIZE; }
147
148sanitizer_shadow_bounds_t ShadowBounds;
149
Evgeniy Stepanov0379d3f2017-11-07 23:51:22 +0000150uptr GetMaxUserVirtualAddress() {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000151 ShadowBounds = __sanitizer_shadow_bounds();
152 return ShadowBounds.memory_limit - 1;
153}
154
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000155uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
Evgeniy Stepanov8e7018d2017-11-20 17:41:57 +0000156
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000157static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
158 bool raw_report, bool die_for_nomem) {
159 size = RoundUpTo(size, PAGE_SIZE);
160
Petr Hosekd8328f12017-09-13 01:18:15 +0000161 zx_handle_t vmo;
162 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
163 if (status != ZX_OK) {
164 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
165 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000166 raw_report);
167 return nullptr;
168 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000169 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000170 internal_strlen(mem_type));
171
172 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
173 uintptr_t addr;
Petr Hoseke2da6422018-07-24 02:28:54 +0000174 status =
Petr Hosek65189292018-08-30 01:27:26 +0000175 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
176 vmo, 0, size, &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000177 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000178
Petr Hosekd8328f12017-09-13 01:18:15 +0000179 if (status != ZX_OK) {
180 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
181 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000182 raw_report);
183 return nullptr;
184 }
185
186 IncreaseTotalMmap(size);
187
188 return reinterpret_cast<void *>(addr);
189}
190
191void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
192 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
193}
194
195void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
196 return MmapOrDie(size, mem_type);
197}
198
199void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
200 return DoAnonymousMmapOrDie(size, mem_type, false, false);
201}
202
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000203uptr ReservedAddressRange::Init(uptr init_size, const char *name,
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000204 uptr fixed_addr) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000205 init_size = RoundUpTo(init_size, PAGE_SIZE);
206 DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
207 uintptr_t base;
208 zx_handle_t vmar;
209 zx_status_t status =
Petr Hoseke2da6422018-07-24 02:28:54 +0000210 _zx_vmar_allocate_old(_zx_vmar_root_self(), 0, init_size,
211 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
212 ZX_VM_FLAG_CAN_MAP_SPECIFIC,
213 &vmar, &base);
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000214 if (status != ZX_OK)
215 ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
216 base_ = reinterpret_cast<void *>(base);
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000217 size_ = init_size;
218 name_ = name;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000219 os_handle_ = vmar;
220
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000221 return reinterpret_cast<uptr>(base_);
222}
223
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000224static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
225 void *base, const char *name, bool die_for_nomem) {
226 uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
227 map_size = RoundUpTo(map_size, PAGE_SIZE);
Petr Hosekd8328f12017-09-13 01:18:15 +0000228 zx_handle_t vmo;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000229 zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
Petr Hosekd8328f12017-09-13 01:18:15 +0000230 if (status != ZX_OK) {
231 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000232 ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
233 return 0;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000234 }
Kostya Kortchinskydaca5ba2018-04-11 18:55:26 +0000235 _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000236 DCHECK_GE(base + size_, map_size + offset);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000237 uintptr_t addr;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000238
Petr Hosek65189292018-08-30 01:27:26 +0000239 status =
240 _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
241 offset, vmo, 0, map_size, &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000242 _zx_handle_close(vmo);
243 if (status != ZX_OK) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000244 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
245 ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
246 }
247 return 0;
248 }
249 IncreaseTotalMmap(map_size);
250 return addr;
251}
252
253uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
254 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
255 name_, false);
256}
257
258uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
259 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
260 name_, true);
261}
262
263void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
264 if (!addr || !size) return;
265 size = RoundUpTo(size, PAGE_SIZE);
266
267 zx_status_t status =
268 _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
269 if (status != ZX_OK) {
270 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
271 SanitizerToolName, size, size, addr);
272 CHECK("unable to unmap" && 0);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000273 }
274
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000275 DecreaseTotalMmap(size);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000276}
277
Kostya Kortchinsky46eab8d2018-04-19 18:38:15 +0000278void ReservedAddressRange::Unmap(uptr addr, uptr size) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000279 CHECK_LE(size, size_);
Kostya Kortchinsky851a7c92018-09-19 19:50:35 +0000280 const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
281 if (addr == reinterpret_cast<uptr>(base_)) {
282 if (size == size_) {
283 // Destroying the vmar effectively unmaps the whole mapping.
284 _zx_vmar_destroy(vmar);
285 _zx_handle_close(vmar);
286 os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
287 DecreaseTotalMmap(size);
288 return;
289 }
290 } else {
Kostya Kortchinsky46eab8d2018-04-19 18:38:15 +0000291 CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
Kostya Kortchinsky851a7c92018-09-19 19:50:35 +0000292 }
293 // Partial unmapping does not affect the fact that the initial range is still
294 // reserved, and the resulting unmapped memory can't be reused.
295 UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000296}
297
298// This should never be called.
299void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
300 UNIMPLEMENTED();
301}
302
303void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
304 const char *mem_type) {
305 CHECK_GE(size, PAGE_SIZE);
306 CHECK(IsPowerOfTwo(size));
307 CHECK(IsPowerOfTwo(alignment));
308
Petr Hosekd8328f12017-09-13 01:18:15 +0000309 zx_handle_t vmo;
310 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
311 if (status != ZX_OK) {
312 if (status != ZX_ERR_NO_MEMORY)
313 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000314 return nullptr;
315 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000316 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000317 internal_strlen(mem_type));
318
319 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
320
321 // Map a larger size to get a chunk of address space big enough that
322 // it surely contains an aligned region of the requested size. Then
323 // overwrite the aligned middle portion with a mapping from the
324 // beginning of the VMO, and unmap the excess before and after.
325 size_t map_size = size + alignment;
326 uintptr_t addr;
Petr Hoseke2da6422018-07-24 02:28:54 +0000327 status =
Petr Hosek65189292018-08-30 01:27:26 +0000328 _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
329 vmo, 0, map_size, &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000330 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000331 uintptr_t map_addr = addr;
332 uintptr_t map_end = map_addr + map_size;
333 addr = RoundUpTo(map_addr, alignment);
334 uintptr_t end = addr + size;
335 if (addr != map_addr) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000336 zx_info_vmar_t info;
337 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000338 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +0000339 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000340 uintptr_t new_addr;
Petr Hosek65189292018-08-30 01:27:26 +0000341 status = _zx_vmar_map(
342 _zx_vmar_root_self(),
343 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
344 addr - info.base, vmo, 0, size, &new_addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000345 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000346 }
347 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000348 if (status == ZX_OK && addr != map_addr)
349 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
350 if (status == ZX_OK && end != map_end)
351 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000352 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000353 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000354
Petr Hosekd8328f12017-09-13 01:18:15 +0000355 if (status != ZX_OK) {
356 if (status != ZX_ERR_NO_MEMORY)
357 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000358 return nullptr;
359 }
360
361 IncreaseTotalMmap(size);
362
363 return reinterpret_cast<void *>(addr);
364}
365
366void UnmapOrDie(void *addr, uptr size) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000367 UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000368}
369
370// This is used on the shadow mapping, which cannot be changed.
Petr Hosekd8328f12017-09-13 01:18:15 +0000371// Zircon doesn't have anything like MADV_DONTNEED.
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000372void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
373
374void DumpProcessMap() {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000375 // TODO(mcgrathr): write it
376 return;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000377}
378
379bool IsAccessibleMemoryRange(uptr beg, uptr size) {
380 // TODO(mcgrathr): Figure out a better way.
Petr Hosekd8328f12017-09-13 01:18:15 +0000381 zx_handle_t vmo;
382 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
383 if (status == ZX_OK) {
Petr Hosekf4ec6862018-03-22 23:58:37 +0000384 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
Petr Hosekd8328f12017-09-13 01:18:15 +0000385 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000386 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000387 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000388}
389
390// FIXME implement on this platform.
391void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
392
393bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
394 uptr *read_len, uptr max_len, error_t *errno_p) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000395 zx_handle_t vmo;
396 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
397 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000398 uint64_t vmo_size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000399 status = _zx_vmo_get_size(vmo, &vmo_size);
400 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000401 if (vmo_size < max_len) max_len = vmo_size;
402 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
403 uintptr_t addr;
Petr Hosek65189292018-08-30 01:27:26 +0000404 status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
405 map_size, &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000406 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000407 *buff = reinterpret_cast<char *>(addr);
408 *buff_size = map_size;
409 *read_len = max_len;
410 }
411 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000412 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000413 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000414 if (status != ZX_OK && errno_p) *errno_p = status;
415 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000416}
417
418void RawWrite(const char *buffer) {
Jake Ehrlich767d92e2018-05-23 22:27:12 +0000419 constexpr size_t size = 128;
420 static _Thread_local char line[size];
421 static _Thread_local size_t lastLineEnd = 0;
422 static _Thread_local size_t cur = 0;
423
424 while (*buffer) {
425 if (cur >= size) {
426 if (lastLineEnd == 0)
427 lastLineEnd = size;
428 __sanitizer_log_write(line, lastLineEnd);
429 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
430 cur = cur - lastLineEnd;
431 lastLineEnd = 0;
432 }
433 if (*buffer == '\n')
434 lastLineEnd = cur + 1;
435 line[cur++] = *buffer++;
436 }
437 // Flush all complete lines before returning.
438 if (lastLineEnd != 0) {
439 __sanitizer_log_write(line, lastLineEnd);
440 internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
441 cur = cur - lastLineEnd;
442 lastLineEnd = 0;
443 }
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000444}
445
446void CatastrophicErrorWrite(const char *buffer, uptr length) {
447 __sanitizer_log_write(buffer, length);
448}
449
450char **StoredArgv;
451char **StoredEnviron;
452
453char **GetArgv() { return StoredArgv; }
454
455const char *GetEnv(const char *name) {
456 if (StoredEnviron) {
457 uptr NameLen = internal_strlen(name);
458 for (char **Env = StoredEnviron; *Env != 0; Env++) {
459 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
460 return (*Env) + NameLen + 1;
461 }
462 }
463 return nullptr;
464}
465
466uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
Petr Hosek716d9942018-05-02 18:08:47 +0000467 const char *argv0 = "<UNKNOWN>";
468 if (StoredArgv && StoredArgv[0]) {
469 argv0 = StoredArgv[0];
470 }
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000471 internal_strncpy(buf, argv0, buf_len);
472 return internal_strlen(buf);
473}
474
475uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
476 return ReadBinaryName(buf, buf_len);
477}
478
479uptr MainThreadStackBase, MainThreadStackSize;
480
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000481bool GetRandom(void *buffer, uptr length, bool blocking) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000482 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
Petr Hosek61574fd2018-06-27 21:25:21 +0000483 _zx_cprng_draw(buffer, length);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000484 return true;
485}
486
Kostya Kortchinsky5a3fdbd2017-11-21 21:14:00 +0000487u32 GetNumberOfCPUs() {
488 return zx_system_get_num_cpus();
489}
490
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000491uptr GetRSS() { UNIMPLEMENTED(); }
492
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000493} // namespace __sanitizer
494
495using namespace __sanitizer; // NOLINT
496
497extern "C" {
498void __sanitizer_startup_hook(int argc, char **argv, char **envp,
499 void *stack_base, size_t stack_size) {
500 __sanitizer::StoredArgv = argv;
501 __sanitizer::StoredEnviron = envp;
502 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
503 __sanitizer::MainThreadStackSize = stack_size;
504}
505
506void __sanitizer_set_report_path(const char *path) {
507 // Handle the initialization code in each sanitizer, but no other calls.
508 // This setting is never consulted on Fuchsia.
509 DCHECK_EQ(path, common_flags()->log_path);
510}
511
512void __sanitizer_set_report_fd(void *fd) {
513 UNREACHABLE("not available on Fuchsia");
514}
515} // extern "C"
516
517#endif // SANITIZER_FUCHSIA