blob: 7d3729c689bb52c68a260febebafbb921721aec4 [file] [log] [blame]
Vitaly Buka5d960ec2017-08-01 22:22:25 +00001//===-- sanitizer_fuchsia.cc ---------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===---------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and other sanitizer
11// run-time libraries and implements Fuchsia-specific functions from
12// sanitizer_common.h.
13//===---------------------------------------------------------------------===//
14
15#include "sanitizer_fuchsia.h"
16#if SANITIZER_FUCHSIA
17
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_mutex.h"
Vitaly Buka5d960ec2017-08-01 22:22:25 +000021#include "sanitizer_stacktrace.h"
22
23#include <limits.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000024#include <pthread.h>
25#include <stdlib.h>
26#include <unistd.h>
27#include <unwind.h>
Petr Hosekd8328f12017-09-13 01:18:15 +000028#include <zircon/errors.h>
29#include <zircon/process.h>
30#include <zircon/syscalls.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000031
32namespace __sanitizer {
33
Petr Hosekd8328f12017-09-13 01:18:15 +000034void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000035
36uptr internal_sched_yield() {
Petr Hosekd8328f12017-09-13 01:18:15 +000037 zx_status_t status = _zx_nanosleep(0);
38 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000039 return 0; // Why doesn't this return void?
40}
41
Petr Hosekd8328f12017-09-13 01:18:15 +000042static void internal_nanosleep(zx_time_t ns) {
43 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
44 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000045}
46
47unsigned int internal_sleep(unsigned int seconds) {
Petr Hosekd8328f12017-09-13 01:18:15 +000048 internal_nanosleep(ZX_SEC(seconds));
Vitaly Buka5d960ec2017-08-01 22:22:25 +000049 return 0;
50}
51
Petr Hosekd8328f12017-09-13 01:18:15 +000052u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000053
54uptr internal_getpid() {
Petr Hosekd8328f12017-09-13 01:18:15 +000055 zx_info_handle_basic_t info;
56 zx_status_t status =
57 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +000058 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +000059 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000060 uptr pid = static_cast<uptr>(info.koid);
61 CHECK_EQ(pid, info.koid);
62 return pid;
63}
64
65uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
66
67uptr GetTid() { return GetThreadSelf(); }
68
69void Abort() { abort(); }
70
71int Atexit(void (*function)(void)) { return atexit(function); }
72
73void SleepForSeconds(int seconds) { internal_sleep(seconds); }
74
Petr Hosekd8328f12017-09-13 01:18:15 +000075void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000076
77void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
78 pthread_attr_t attr;
79 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
80 void *base;
81 size_t size;
82 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
83 CHECK_EQ(pthread_attr_destroy(&attr), 0);
84
85 *stack_bottom = reinterpret_cast<uptr>(base);
86 *stack_top = *stack_bottom + size;
87}
88
89void MaybeReexec() {}
90void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
91void DisableCoreDumperIfNecessary() {}
92void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
Vitaly Buka62f02d52017-09-14 08:14:56 +000093void StartReportDeadlySignal() {}
Vitaly Buka21ddc622017-09-14 22:44:03 +000094void ReportDeadlySignal(const SignalContext &sig, u32 tid,
95 UnwindSignalStackCallbackType unwind,
96 const void *unwind_context) {}
Vitaly Buka5d960ec2017-08-01 22:22:25 +000097void SetAlternateSignalStack() {}
98void UnsetAlternateSignalStack() {}
99void InitTlsSize() {}
100
101void PrintModuleMap() {}
102
Vitaly Buka9a4c73e2017-09-14 03:23:02 +0000103bool SignalContext::IsStackOverflow() const { return false; }
Vitaly Buka83832fe2017-08-09 00:21:45 +0000104void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
Vitaly Bukadbde6f32017-09-13 18:30:16 +0000105const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
Vitaly Buka83832fe2017-08-09 00:21:45 +0000106
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000107struct UnwindTraceArg {
108 BufferedStackTrace *stack;
109 u32 max_depth;
110};
111
112_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
113 UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
114 CHECK_LT(arg->stack->size, arg->max_depth);
115 uptr pc = _Unwind_GetIP(ctx);
116 if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
117 arg->stack->trace_buffer[arg->stack->size++] = pc;
118 return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
119 : _URC_NO_REASON);
120}
121
122void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
123 CHECK_GE(max_depth, 2);
124 size = 0;
125 UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
126 _Unwind_Backtrace(Unwind_Trace, &arg);
127 CHECK_GT(size, 0);
128 // We need to pop a few frames so that pc is on top.
129 uptr to_pop = LocatePcInTrace(pc);
130 // trace_buffer[0] belongs to the current function so we always pop it,
131 // unless there is only 1 frame in the stack trace (1 frame is always better
132 // than 0!).
133 PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
134 trace_buffer[0] = pc;
135}
136
137void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
138 u32 max_depth) {
139 CHECK_NE(context, nullptr);
140 UNREACHABLE("signal context doesn't exist");
141}
142
143enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
144
145BlockingMutex::BlockingMutex() {
146 // NOTE! It's important that this use internal_memset, because plain
147 // memset might be intercepted (e.g., actually be __asan_memset).
148 // Defining this so the compiler initializes each field, e.g.:
149 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
150 // might result in the compiler generating a call to memset, which would
151 // have the same problem.
152 internal_memset(this, 0, sizeof(*this));
153}
154
155void BlockingMutex::Lock() {
156 CHECK_EQ(owner_, 0);
157 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
158 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
159 return;
160 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000161 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
162 MtxSleeping, ZX_TIME_INFINITE);
163 if (status != ZX_ERR_BAD_STATE) // Normal race.
164 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000165 }
166}
167
168void BlockingMutex::Unlock() {
169 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
170 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
171 CHECK_NE(v, MtxUnlocked);
172 if (v == MtxSleeping) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000173 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
174 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000175 }
176}
177
178void BlockingMutex::CheckLocked() {
179 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
180 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
181}
182
183uptr GetPageSize() { return PAGE_SIZE; }
184
185uptr GetMmapGranularity() { return PAGE_SIZE; }
186
187sanitizer_shadow_bounds_t ShadowBounds;
188
189uptr GetMaxVirtualAddress() {
190 ShadowBounds = __sanitizer_shadow_bounds();
191 return ShadowBounds.memory_limit - 1;
192}
193
194static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
195 bool raw_report, bool die_for_nomem) {
196 size = RoundUpTo(size, PAGE_SIZE);
197
Petr Hosekd8328f12017-09-13 01:18:15 +0000198 zx_handle_t vmo;
199 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
200 if (status != ZX_OK) {
201 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
202 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000203 raw_report);
204 return nullptr;
205 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000206 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000207 internal_strlen(mem_type));
208
209 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
210 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000211 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size,
212 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
213 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000214
Petr Hosekd8328f12017-09-13 01:18:15 +0000215 if (status != ZX_OK) {
216 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
217 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000218 raw_report);
219 return nullptr;
220 }
221
222 IncreaseTotalMmap(size);
223
224 return reinterpret_cast<void *>(addr);
225}
226
227void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
228 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
229}
230
231void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
232 return MmapOrDie(size, mem_type);
233}
234
235void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
236 return DoAnonymousMmapOrDie(size, mem_type, false, false);
237}
238
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000239uptr ReservedAddressRange::Init(uptr init_size, const char* name,
240 uptr fixed_addr) {
241 base_ = MmapNoAccess(init_size);
242 size_ = init_size;
243 name_ = name;
244 return reinterpret_cast<uptr>(base_);
245}
246
247// Uses fixed_addr for now.
248// Will use offset instead once we've implemented this function for real.
249uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
250 bool tolerate_enomem) {
251 return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, map_size));
252}
253
254void ReservedAddressRange::Unmap(uptr addr, uptr size) {
255 void* addr_as_void = reinterpret_cast<void*>(addr);
256 uptr base_as_uptr = reinterpret_cast<uptr>(base_);
257 // Only unmap at the beginning or end of the range.
258 CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_))
259 CHECK_LE(size, size_);
260 UnmapOrDie(reinterpret_cast<void*>(addr), size);
261}
262
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000263// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
264// Instead of doing exactly what they say, we make MmapNoAccess actually
265// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie
266// uses that VMAR instead of the root.
267
Petr Hosekd8328f12017-09-13 01:18:15 +0000268zx_handle_t allocator_vmar = ZX_HANDLE_INVALID;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000269uintptr_t allocator_vmar_base;
270size_t allocator_vmar_size;
271
272void *MmapNoAccess(uptr size) {
273 size = RoundUpTo(size, PAGE_SIZE);
Petr Hosekd8328f12017-09-13 01:18:15 +0000274 CHECK_EQ(allocator_vmar, ZX_HANDLE_INVALID);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000275 uintptr_t base;
Petr Hosekd8328f12017-09-13 01:18:15 +0000276 zx_status_t status =
277 _zx_vmar_allocate(_zx_vmar_root_self(), 0, size,
278 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
279 ZX_VM_FLAG_CAN_MAP_SPECIFIC,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000280 &allocator_vmar, &base);
Petr Hosekd8328f12017-09-13 01:18:15 +0000281 if (status != ZX_OK)
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000282 ReportMmapFailureAndDie(size, "sanitizer allocator address space",
Petr Hosekd8328f12017-09-13 01:18:15 +0000283 "zx_vmar_allocate", status);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000284
285 allocator_vmar_base = base;
286 allocator_vmar_size = size;
287 return reinterpret_cast<void *>(base);
288}
289
290constexpr const char kAllocatorVmoName[] = "sanitizer_allocator";
291
292static void *DoMmapFixedOrDie(uptr fixed_addr, uptr size, bool die_for_nomem) {
293 size = RoundUpTo(size, PAGE_SIZE);
294
Petr Hosekd8328f12017-09-13 01:18:15 +0000295 zx_handle_t vmo;
296 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
297 if (status != ZX_OK) {
298 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
299 ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmo_create", status);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000300 return nullptr;
301 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000302 _zx_object_set_property(vmo, ZX_PROP_NAME, kAllocatorVmoName,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000303 sizeof(kAllocatorVmoName) - 1);
304
305 DCHECK_GE(fixed_addr, allocator_vmar_base);
306 uintptr_t offset = fixed_addr - allocator_vmar_base;
307 DCHECK_LE(size, allocator_vmar_size);
308 DCHECK_GE(allocator_vmar_size - offset, size);
309
310 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000311 status = _zx_vmar_map(
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000312 allocator_vmar, offset, vmo, 0, size,
Petr Hosekd8328f12017-09-13 01:18:15 +0000313 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000314 &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000315 _zx_handle_close(vmo);
316 if (status != ZX_OK) {
317 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
318 ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmar_map", status);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000319 return nullptr;
320 }
321
322 IncreaseTotalMmap(size);
323
324 return reinterpret_cast<void *>(addr);
325}
326
327void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
328 return DoMmapFixedOrDie(fixed_addr, size, true);
329}
330
331void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
332 return DoMmapFixedOrDie(fixed_addr, size, false);
333}
334
335// This should never be called.
336void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
337 UNIMPLEMENTED();
338}
339
340void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
341 const char *mem_type) {
342 CHECK_GE(size, PAGE_SIZE);
343 CHECK(IsPowerOfTwo(size));
344 CHECK(IsPowerOfTwo(alignment));
345
Petr Hosekd8328f12017-09-13 01:18:15 +0000346 zx_handle_t vmo;
347 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
348 if (status != ZX_OK) {
349 if (status != ZX_ERR_NO_MEMORY)
350 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000351 return nullptr;
352 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000353 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000354 internal_strlen(mem_type));
355
356 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
357
358 // Map a larger size to get a chunk of address space big enough that
359 // it surely contains an aligned region of the requested size. Then
360 // overwrite the aligned middle portion with a mapping from the
361 // beginning of the VMO, and unmap the excess before and after.
362 size_t map_size = size + alignment;
363 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000364 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
365 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
366 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000367 uintptr_t map_addr = addr;
368 uintptr_t map_end = map_addr + map_size;
369 addr = RoundUpTo(map_addr, alignment);
370 uintptr_t end = addr + size;
371 if (addr != map_addr) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000372 zx_info_vmar_t info;
373 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000374 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +0000375 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000376 uintptr_t new_addr;
377 status =
Petr Hosekd8328f12017-09-13 01:18:15 +0000378 _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size,
379 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
380 ZX_VM_FLAG_SPECIFIC_OVERWRITE,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000381 &new_addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000382 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000383 }
384 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000385 if (status == ZX_OK && addr != map_addr)
386 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
387 if (status == ZX_OK && end != map_end)
388 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000389 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000390 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000391
Petr Hosekd8328f12017-09-13 01:18:15 +0000392 if (status != ZX_OK) {
393 if (status != ZX_ERR_NO_MEMORY)
394 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000395 return nullptr;
396 }
397
398 IncreaseTotalMmap(size);
399
400 return reinterpret_cast<void *>(addr);
401}
402
403void UnmapOrDie(void *addr, uptr size) {
404 if (!addr || !size) return;
405 size = RoundUpTo(size, PAGE_SIZE);
406
Petr Hosekd8328f12017-09-13 01:18:15 +0000407 zx_status_t status = _zx_vmar_unmap(_zx_vmar_root_self(),
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000408 reinterpret_cast<uintptr_t>(addr), size);
Petr Hosekd8328f12017-09-13 01:18:15 +0000409 if (status != ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000410 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
411 SanitizerToolName, size, size, addr);
412 CHECK("unable to unmap" && 0);
413 }
414
415 DecreaseTotalMmap(size);
416}
417
418// This is used on the shadow mapping, which cannot be changed.
Petr Hosekd8328f12017-09-13 01:18:15 +0000419// Zircon doesn't have anything like MADV_DONTNEED.
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000420void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
421
422void DumpProcessMap() {
423 UNIMPLEMENTED(); // TODO(mcgrathr): write it
424}
425
426bool IsAccessibleMemoryRange(uptr beg, uptr size) {
427 // TODO(mcgrathr): Figure out a better way.
Petr Hosekd8328f12017-09-13 01:18:15 +0000428 zx_handle_t vmo;
429 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
430 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000431 while (size > 0) {
432 size_t wrote;
Petr Hosekd8328f12017-09-13 01:18:15 +0000433 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000434 &wrote);
Petr Hosekd8328f12017-09-13 01:18:15 +0000435 if (status != ZX_OK) break;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000436 CHECK_GT(wrote, 0);
437 CHECK_LE(wrote, size);
438 beg += wrote;
439 size -= wrote;
440 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000441 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000442 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000443 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000444}
445
446// FIXME implement on this platform.
447void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
448
449bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
450 uptr *read_len, uptr max_len, error_t *errno_p) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000451 zx_handle_t vmo;
452 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
453 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000454 uint64_t vmo_size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000455 status = _zx_vmo_get_size(vmo, &vmo_size);
456 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000457 if (vmo_size < max_len) max_len = vmo_size;
458 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
459 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000460 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
461 ZX_VM_FLAG_PERM_READ, &addr);
462 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000463 *buff = reinterpret_cast<char *>(addr);
464 *buff_size = map_size;
465 *read_len = max_len;
466 }
467 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000468 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000469 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000470 if (status != ZX_OK && errno_p) *errno_p = status;
471 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000472}
473
474void RawWrite(const char *buffer) {
475 __sanitizer_log_write(buffer, internal_strlen(buffer));
476}
477
478void CatastrophicErrorWrite(const char *buffer, uptr length) {
479 __sanitizer_log_write(buffer, length);
480}
481
482char **StoredArgv;
483char **StoredEnviron;
484
485char **GetArgv() { return StoredArgv; }
486
487const char *GetEnv(const char *name) {
488 if (StoredEnviron) {
489 uptr NameLen = internal_strlen(name);
490 for (char **Env = StoredEnviron; *Env != 0; Env++) {
491 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
492 return (*Env) + NameLen + 1;
493 }
494 }
495 return nullptr;
496}
497
498uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
499 const char *argv0 = StoredArgv[0];
500 if (!argv0) argv0 = "<UNKNOWN>";
501 internal_strncpy(buf, argv0, buf_len);
502 return internal_strlen(buf);
503}
504
505uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
506 return ReadBinaryName(buf, buf_len);
507}
508
509uptr MainThreadStackBase, MainThreadStackSize;
510
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000511bool GetRandom(void *buffer, uptr length, bool blocking) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000512 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000513 size_t size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000514 CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000515 CHECK_EQ(size, length);
516 return true;
517}
518
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000519} // namespace __sanitizer
520
521using namespace __sanitizer; // NOLINT
522
523extern "C" {
524void __sanitizer_startup_hook(int argc, char **argv, char **envp,
525 void *stack_base, size_t stack_size) {
526 __sanitizer::StoredArgv = argv;
527 __sanitizer::StoredEnviron = envp;
528 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
529 __sanitizer::MainThreadStackSize = stack_size;
530}
531
532void __sanitizer_set_report_path(const char *path) {
533 // Handle the initialization code in each sanitizer, but no other calls.
534 // This setting is never consulted on Fuchsia.
535 DCHECK_EQ(path, common_flags()->log_path);
536}
537
538void __sanitizer_set_report_fd(void *fd) {
539 UNREACHABLE("not available on Fuchsia");
540}
541} // extern "C"
542
543#endif // SANITIZER_FUCHSIA