blob: 7ea522a0d4dac8f82151c5b8f9535af08be4c1a8 [file] [log] [blame]
Vitaly Buka5d960ec2017-08-01 22:22:25 +00001//===-- sanitizer_fuchsia.cc ---------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===---------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and other sanitizer
11// run-time libraries and implements Fuchsia-specific functions from
12// sanitizer_common.h.
13//===---------------------------------------------------------------------===//
14
15#include "sanitizer_fuchsia.h"
16#if SANITIZER_FUCHSIA
17
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_mutex.h"
21#include "sanitizer_procmaps.h"
22#include "sanitizer_stacktrace.h"
23
24#include <limits.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000025#include <pthread.h>
26#include <stdlib.h>
27#include <unistd.h>
28#include <unwind.h>
Petr Hosekd8328f12017-09-13 01:18:15 +000029#include <zircon/errors.h>
30#include <zircon/process.h>
31#include <zircon/syscalls.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000032
33namespace __sanitizer {
34
Petr Hosekd8328f12017-09-13 01:18:15 +000035void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000036
37uptr internal_sched_yield() {
Petr Hosekd8328f12017-09-13 01:18:15 +000038 zx_status_t status = _zx_nanosleep(0);
39 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000040 return 0; // Why doesn't this return void?
41}
42
Petr Hosekd8328f12017-09-13 01:18:15 +000043static void internal_nanosleep(zx_time_t ns) {
44 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
45 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000046}
47
48unsigned int internal_sleep(unsigned int seconds) {
Petr Hosekd8328f12017-09-13 01:18:15 +000049 internal_nanosleep(ZX_SEC(seconds));
Vitaly Buka5d960ec2017-08-01 22:22:25 +000050 return 0;
51}
52
Petr Hosekd8328f12017-09-13 01:18:15 +000053u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000054
55uptr internal_getpid() {
Petr Hosekd8328f12017-09-13 01:18:15 +000056 zx_info_handle_basic_t info;
57 zx_status_t status =
58 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +000059 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +000060 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000061 uptr pid = static_cast<uptr>(info.koid);
62 CHECK_EQ(pid, info.koid);
63 return pid;
64}
65
66uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
67
68uptr GetTid() { return GetThreadSelf(); }
69
70void Abort() { abort(); }
71
72int Atexit(void (*function)(void)) { return atexit(function); }
73
74void SleepForSeconds(int seconds) { internal_sleep(seconds); }
75
Petr Hosekd8328f12017-09-13 01:18:15 +000076void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000077
78void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
79 pthread_attr_t attr;
80 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
81 void *base;
82 size_t size;
83 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
84 CHECK_EQ(pthread_attr_destroy(&attr), 0);
85
86 *stack_bottom = reinterpret_cast<uptr>(base);
87 *stack_top = *stack_bottom + size;
88}
89
90void MaybeReexec() {}
91void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
92void DisableCoreDumperIfNecessary() {}
93void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
94void SetAlternateSignalStack() {}
95void UnsetAlternateSignalStack() {}
96void InitTlsSize() {}
97
98void PrintModuleMap() {}
99
Vitaly Buka83832fe2017-08-09 00:21:45 +0000100void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
101const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
102
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000103struct UnwindTraceArg {
104 BufferedStackTrace *stack;
105 u32 max_depth;
106};
107
108_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
109 UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
110 CHECK_LT(arg->stack->size, arg->max_depth);
111 uptr pc = _Unwind_GetIP(ctx);
112 if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
113 arg->stack->trace_buffer[arg->stack->size++] = pc;
114 return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
115 : _URC_NO_REASON);
116}
117
118void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
119 CHECK_GE(max_depth, 2);
120 size = 0;
121 UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
122 _Unwind_Backtrace(Unwind_Trace, &arg);
123 CHECK_GT(size, 0);
124 // We need to pop a few frames so that pc is on top.
125 uptr to_pop = LocatePcInTrace(pc);
126 // trace_buffer[0] belongs to the current function so we always pop it,
127 // unless there is only 1 frame in the stack trace (1 frame is always better
128 // than 0!).
129 PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
130 trace_buffer[0] = pc;
131}
132
133void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
134 u32 max_depth) {
135 CHECK_NE(context, nullptr);
136 UNREACHABLE("signal context doesn't exist");
137}
138
139enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
140
141BlockingMutex::BlockingMutex() {
142 // NOTE! It's important that this use internal_memset, because plain
143 // memset might be intercepted (e.g., actually be __asan_memset).
144 // Defining this so the compiler initializes each field, e.g.:
145 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
146 // might result in the compiler generating a call to memset, which would
147 // have the same problem.
148 internal_memset(this, 0, sizeof(*this));
149}
150
151void BlockingMutex::Lock() {
152 CHECK_EQ(owner_, 0);
153 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
154 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
155 return;
156 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000157 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
158 MtxSleeping, ZX_TIME_INFINITE);
159 if (status != ZX_ERR_BAD_STATE) // Normal race.
160 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000161 }
162}
163
164void BlockingMutex::Unlock() {
165 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
166 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
167 CHECK_NE(v, MtxUnlocked);
168 if (v == MtxSleeping) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000169 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
170 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000171 }
172}
173
174void BlockingMutex::CheckLocked() {
175 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
176 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
177}
178
179uptr GetPageSize() { return PAGE_SIZE; }
180
181uptr GetMmapGranularity() { return PAGE_SIZE; }
182
183sanitizer_shadow_bounds_t ShadowBounds;
184
185uptr GetMaxVirtualAddress() {
186 ShadowBounds = __sanitizer_shadow_bounds();
187 return ShadowBounds.memory_limit - 1;
188}
189
190static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
191 bool raw_report, bool die_for_nomem) {
192 size = RoundUpTo(size, PAGE_SIZE);
193
Petr Hosekd8328f12017-09-13 01:18:15 +0000194 zx_handle_t vmo;
195 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
196 if (status != ZX_OK) {
197 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
198 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000199 raw_report);
200 return nullptr;
201 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000202 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000203 internal_strlen(mem_type));
204
205 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
206 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000207 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size,
208 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
209 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000210
Petr Hosekd8328f12017-09-13 01:18:15 +0000211 if (status != ZX_OK) {
212 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
213 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000214 raw_report);
215 return nullptr;
216 }
217
218 IncreaseTotalMmap(size);
219
220 return reinterpret_cast<void *>(addr);
221}
222
223void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
224 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
225}
226
227void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
228 return MmapOrDie(size, mem_type);
229}
230
231void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
232 return DoAnonymousMmapOrDie(size, mem_type, false, false);
233}
234
235// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
236// Instead of doing exactly what they say, we make MmapNoAccess actually
237// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie
238// uses that VMAR instead of the root.
239
Petr Hosekd8328f12017-09-13 01:18:15 +0000240zx_handle_t allocator_vmar = ZX_HANDLE_INVALID;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000241uintptr_t allocator_vmar_base;
242size_t allocator_vmar_size;
243
244void *MmapNoAccess(uptr size) {
245 size = RoundUpTo(size, PAGE_SIZE);
Petr Hosekd8328f12017-09-13 01:18:15 +0000246 CHECK_EQ(allocator_vmar, ZX_HANDLE_INVALID);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000247 uintptr_t base;
Petr Hosekd8328f12017-09-13 01:18:15 +0000248 zx_status_t status =
249 _zx_vmar_allocate(_zx_vmar_root_self(), 0, size,
250 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
251 ZX_VM_FLAG_CAN_MAP_SPECIFIC,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000252 &allocator_vmar, &base);
Petr Hosekd8328f12017-09-13 01:18:15 +0000253 if (status != ZX_OK)
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000254 ReportMmapFailureAndDie(size, "sanitizer allocator address space",
Petr Hosekd8328f12017-09-13 01:18:15 +0000255 "zx_vmar_allocate", status);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000256
257 allocator_vmar_base = base;
258 allocator_vmar_size = size;
259 return reinterpret_cast<void *>(base);
260}
261
262constexpr const char kAllocatorVmoName[] = "sanitizer_allocator";
263
264static void *DoMmapFixedOrDie(uptr fixed_addr, uptr size, bool die_for_nomem) {
265 size = RoundUpTo(size, PAGE_SIZE);
266
Petr Hosekd8328f12017-09-13 01:18:15 +0000267 zx_handle_t vmo;
268 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
269 if (status != ZX_OK) {
270 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
271 ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmo_create", status);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000272 return nullptr;
273 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000274 _zx_object_set_property(vmo, ZX_PROP_NAME, kAllocatorVmoName,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000275 sizeof(kAllocatorVmoName) - 1);
276
277 DCHECK_GE(fixed_addr, allocator_vmar_base);
278 uintptr_t offset = fixed_addr - allocator_vmar_base;
279 DCHECK_LE(size, allocator_vmar_size);
280 DCHECK_GE(allocator_vmar_size - offset, size);
281
282 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000283 status = _zx_vmar_map(
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000284 allocator_vmar, offset, vmo, 0, size,
Petr Hosekd8328f12017-09-13 01:18:15 +0000285 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000286 &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000287 _zx_handle_close(vmo);
288 if (status != ZX_OK) {
289 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
290 ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmar_map", status);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000291 return nullptr;
292 }
293
294 IncreaseTotalMmap(size);
295
296 return reinterpret_cast<void *>(addr);
297}
298
299void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
300 return DoMmapFixedOrDie(fixed_addr, size, true);
301}
302
303void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
304 return DoMmapFixedOrDie(fixed_addr, size, false);
305}
306
307// This should never be called.
308void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
309 UNIMPLEMENTED();
310}
311
312void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
313 const char *mem_type) {
314 CHECK_GE(size, PAGE_SIZE);
315 CHECK(IsPowerOfTwo(size));
316 CHECK(IsPowerOfTwo(alignment));
317
Petr Hosekd8328f12017-09-13 01:18:15 +0000318 zx_handle_t vmo;
319 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
320 if (status != ZX_OK) {
321 if (status != ZX_ERR_NO_MEMORY)
322 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000323 return nullptr;
324 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000325 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000326 internal_strlen(mem_type));
327
328 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
329
330 // Map a larger size to get a chunk of address space big enough that
331 // it surely contains an aligned region of the requested size. Then
332 // overwrite the aligned middle portion with a mapping from the
333 // beginning of the VMO, and unmap the excess before and after.
334 size_t map_size = size + alignment;
335 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000336 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
337 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
338 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000339 uintptr_t map_addr = addr;
340 uintptr_t map_end = map_addr + map_size;
341 addr = RoundUpTo(map_addr, alignment);
342 uintptr_t end = addr + size;
343 if (addr != map_addr) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000344 zx_info_vmar_t info;
345 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000346 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +0000347 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000348 uintptr_t new_addr;
349 status =
Petr Hosekd8328f12017-09-13 01:18:15 +0000350 _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size,
351 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
352 ZX_VM_FLAG_SPECIFIC_OVERWRITE,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000353 &new_addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000354 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000355 }
356 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000357 if (status == ZX_OK && addr != map_addr)
358 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
359 if (status == ZX_OK && end != map_end)
360 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000361 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000362 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000363
Petr Hosekd8328f12017-09-13 01:18:15 +0000364 if (status != ZX_OK) {
365 if (status != ZX_ERR_NO_MEMORY)
366 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000367 return nullptr;
368 }
369
370 IncreaseTotalMmap(size);
371
372 return reinterpret_cast<void *>(addr);
373}
374
375void UnmapOrDie(void *addr, uptr size) {
376 if (!addr || !size) return;
377 size = RoundUpTo(size, PAGE_SIZE);
378
Petr Hosekd8328f12017-09-13 01:18:15 +0000379 zx_status_t status = _zx_vmar_unmap(_zx_vmar_root_self(),
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000380 reinterpret_cast<uintptr_t>(addr), size);
Petr Hosekd8328f12017-09-13 01:18:15 +0000381 if (status != ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000382 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
383 SanitizerToolName, size, size, addr);
384 CHECK("unable to unmap" && 0);
385 }
386
387 DecreaseTotalMmap(size);
388}
389
390// This is used on the shadow mapping, which cannot be changed.
Petr Hosekd8328f12017-09-13 01:18:15 +0000391// Zircon doesn't have anything like MADV_DONTNEED.
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000392void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
393
394void DumpProcessMap() {
395 UNIMPLEMENTED(); // TODO(mcgrathr): write it
396}
397
398bool IsAccessibleMemoryRange(uptr beg, uptr size) {
399 // TODO(mcgrathr): Figure out a better way.
Petr Hosekd8328f12017-09-13 01:18:15 +0000400 zx_handle_t vmo;
401 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
402 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000403 while (size > 0) {
404 size_t wrote;
Petr Hosekd8328f12017-09-13 01:18:15 +0000405 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000406 &wrote);
Petr Hosekd8328f12017-09-13 01:18:15 +0000407 if (status != ZX_OK) break;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000408 CHECK_GT(wrote, 0);
409 CHECK_LE(wrote, size);
410 beg += wrote;
411 size -= wrote;
412 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000413 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000414 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000415 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000416}
417
418// FIXME implement on this platform.
419void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
420
421bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
422 uptr *read_len, uptr max_len, error_t *errno_p) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000423 zx_handle_t vmo;
424 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
425 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000426 uint64_t vmo_size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000427 status = _zx_vmo_get_size(vmo, &vmo_size);
428 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000429 if (vmo_size < max_len) max_len = vmo_size;
430 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
431 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000432 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
433 ZX_VM_FLAG_PERM_READ, &addr);
434 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000435 *buff = reinterpret_cast<char *>(addr);
436 *buff_size = map_size;
437 *read_len = max_len;
438 }
439 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000440 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000441 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000442 if (status != ZX_OK && errno_p) *errno_p = status;
443 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000444}
445
446void RawWrite(const char *buffer) {
447 __sanitizer_log_write(buffer, internal_strlen(buffer));
448}
449
450void CatastrophicErrorWrite(const char *buffer, uptr length) {
451 __sanitizer_log_write(buffer, length);
452}
453
454char **StoredArgv;
455char **StoredEnviron;
456
457char **GetArgv() { return StoredArgv; }
458
459const char *GetEnv(const char *name) {
460 if (StoredEnviron) {
461 uptr NameLen = internal_strlen(name);
462 for (char **Env = StoredEnviron; *Env != 0; Env++) {
463 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
464 return (*Env) + NameLen + 1;
465 }
466 }
467 return nullptr;
468}
469
470uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
471 const char *argv0 = StoredArgv[0];
472 if (!argv0) argv0 = "<UNKNOWN>";
473 internal_strncpy(buf, argv0, buf_len);
474 return internal_strlen(buf);
475}
476
477uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
478 return ReadBinaryName(buf, buf_len);
479}
480
481uptr MainThreadStackBase, MainThreadStackSize;
482
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000483bool GetRandom(void *buffer, uptr length, bool blocking) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000484 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000485 size_t size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000486 CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000487 CHECK_EQ(size, length);
488 return true;
489}
490
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000491} // namespace __sanitizer
492
493using namespace __sanitizer; // NOLINT
494
495extern "C" {
496void __sanitizer_startup_hook(int argc, char **argv, char **envp,
497 void *stack_base, size_t stack_size) {
498 __sanitizer::StoredArgv = argv;
499 __sanitizer::StoredEnviron = envp;
500 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
501 __sanitizer::MainThreadStackSize = stack_size;
502}
503
504void __sanitizer_set_report_path(const char *path) {
505 // Handle the initialization code in each sanitizer, but no other calls.
506 // This setting is never consulted on Fuchsia.
507 DCHECK_EQ(path, common_flags()->log_path);
508}
509
510void __sanitizer_set_report_fd(void *fd) {
511 UNREACHABLE("not available on Fuchsia");
512}
513} // extern "C"
514
515#endif // SANITIZER_FUCHSIA