blob: 14a3c53a2db9d4ed2f44e9bb24724d7750fce806 [file] [log] [blame]
Kostya Kortchinsky596b8b42018-04-16 16:32:19 +00001//===-- sanitizer_fuchsia.cc ----------------------------------------------===//
Vitaly Buka5d960ec2017-08-01 22:22:25 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
Kostya Kortchinsky596b8b42018-04-16 16:32:19 +00008//===----------------------------------------------------------------------===//
Vitaly Buka5d960ec2017-08-01 22:22:25 +00009//
10// This file is shared between AddressSanitizer and other sanitizer
11// run-time libraries and implements Fuchsia-specific functions from
12// sanitizer_common.h.
Kostya Kortchinsky596b8b42018-04-16 16:32:19 +000013//===----------------------------------------------------------------------===//
Vitaly Buka5d960ec2017-08-01 22:22:25 +000014
15#include "sanitizer_fuchsia.h"
16#if SANITIZER_FUCHSIA
17
18#include "sanitizer_common.h"
19#include "sanitizer_libc.h"
20#include "sanitizer_mutex.h"
Vitaly Buka5d960ec2017-08-01 22:22:25 +000021
22#include <limits.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000023#include <pthread.h>
24#include <stdlib.h>
25#include <unistd.h>
Petr Hosekd8328f12017-09-13 01:18:15 +000026#include <zircon/errors.h>
27#include <zircon/process.h>
28#include <zircon/syscalls.h>
Vitaly Buka5d960ec2017-08-01 22:22:25 +000029
30namespace __sanitizer {
31
Petr Hosekd8328f12017-09-13 01:18:15 +000032void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000033
34uptr internal_sched_yield() {
Petr Hosekd8328f12017-09-13 01:18:15 +000035 zx_status_t status = _zx_nanosleep(0);
36 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000037 return 0; // Why doesn't this return void?
38}
39
Petr Hosekd8328f12017-09-13 01:18:15 +000040static void internal_nanosleep(zx_time_t ns) {
41 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
42 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000043}
44
45unsigned int internal_sleep(unsigned int seconds) {
Petr Hosekd8328f12017-09-13 01:18:15 +000046 internal_nanosleep(ZX_SEC(seconds));
Vitaly Buka5d960ec2017-08-01 22:22:25 +000047 return 0;
48}
49
Petr Hosek9945c442018-01-27 23:58:23 +000050u64 NanoTime() { return _zx_clock_get(ZX_CLOCK_UTC); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000051
Petr Hosek9945c442018-01-27 23:58:23 +000052u64 MonotonicNanoTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
Kostya Kortchinskyf50246d2017-12-13 16:23:54 +000053
Vitaly Buka5d960ec2017-08-01 22:22:25 +000054uptr internal_getpid() {
Petr Hosekd8328f12017-09-13 01:18:15 +000055 zx_info_handle_basic_t info;
56 zx_status_t status =
57 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +000058 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +000059 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +000060 uptr pid = static_cast<uptr>(info.koid);
61 CHECK_EQ(pid, info.koid);
62 return pid;
63}
64
65uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
66
Petr Hosek5e3b41d2018-03-06 02:01:32 +000067tid_t GetTid() { return GetThreadSelf(); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000068
69void Abort() { abort(); }
70
71int Atexit(void (*function)(void)) { return atexit(function); }
72
73void SleepForSeconds(int seconds) { internal_sleep(seconds); }
74
Petr Hosekd8328f12017-09-13 01:18:15 +000075void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
Vitaly Buka5d960ec2017-08-01 22:22:25 +000076
77void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
78 pthread_attr_t attr;
79 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
80 void *base;
81 size_t size;
82 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
83 CHECK_EQ(pthread_attr_destroy(&attr), 0);
84
85 *stack_bottom = reinterpret_cast<uptr>(base);
86 *stack_top = *stack_bottom + size;
87}
88
89void MaybeReexec() {}
Kostya Kortchinsky2c5f9442018-04-03 18:07:22 +000090void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
Vitaly Buka5d960ec2017-08-01 22:22:25 +000091void DisableCoreDumperIfNecessary() {}
92void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
93void SetAlternateSignalStack() {}
94void UnsetAlternateSignalStack() {}
95void InitTlsSize() {}
96
97void PrintModuleMap() {}
98
Vitaly Buka9a4c73e2017-09-14 03:23:02 +000099bool SignalContext::IsStackOverflow() const { return false; }
Vitaly Buka83832fe2017-08-09 00:21:45 +0000100void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
Vitaly Bukadbde6f32017-09-13 18:30:16 +0000101const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
Vitaly Buka83832fe2017-08-09 00:21:45 +0000102
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000103enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
104
105BlockingMutex::BlockingMutex() {
106 // NOTE! It's important that this use internal_memset, because plain
107 // memset might be intercepted (e.g., actually be __asan_memset).
108 // Defining this so the compiler initializes each field, e.g.:
109 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
110 // might result in the compiler generating a call to memset, which would
111 // have the same problem.
112 internal_memset(this, 0, sizeof(*this));
113}
114
115void BlockingMutex::Lock() {
116 CHECK_EQ(owner_, 0);
117 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
118 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
119 return;
120 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000121 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
122 MtxSleeping, ZX_TIME_INFINITE);
123 if (status != ZX_ERR_BAD_STATE) // Normal race.
124 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000125 }
126}
127
128void BlockingMutex::Unlock() {
129 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
130 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
131 CHECK_NE(v, MtxUnlocked);
132 if (v == MtxSleeping) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000133 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
134 CHECK_EQ(status, ZX_OK);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000135 }
136}
137
138void BlockingMutex::CheckLocked() {
139 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
140 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
141}
142
143uptr GetPageSize() { return PAGE_SIZE; }
144
145uptr GetMmapGranularity() { return PAGE_SIZE; }
146
147sanitizer_shadow_bounds_t ShadowBounds;
148
Evgeniy Stepanov0379d3f2017-11-07 23:51:22 +0000149uptr GetMaxUserVirtualAddress() {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000150 ShadowBounds = __sanitizer_shadow_bounds();
151 return ShadowBounds.memory_limit - 1;
152}
153
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000154uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
Evgeniy Stepanov8e7018d2017-11-20 17:41:57 +0000155
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000156static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
157 bool raw_report, bool die_for_nomem) {
158 size = RoundUpTo(size, PAGE_SIZE);
159
Petr Hosekd8328f12017-09-13 01:18:15 +0000160 zx_handle_t vmo;
161 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
162 if (status != ZX_OK) {
163 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
164 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000165 raw_report);
166 return nullptr;
167 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000168 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000169 internal_strlen(mem_type));
170
171 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
172 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000173 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size,
174 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
175 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000176
Petr Hosekd8328f12017-09-13 01:18:15 +0000177 if (status != ZX_OK) {
178 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
179 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000180 raw_report);
181 return nullptr;
182 }
183
184 IncreaseTotalMmap(size);
185
186 return reinterpret_cast<void *>(addr);
187}
188
189void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
190 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
191}
192
193void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
194 return MmapOrDie(size, mem_type);
195}
196
197void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
198 return DoAnonymousMmapOrDie(size, mem_type, false, false);
199}
200
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000201uptr ReservedAddressRange::Init(uptr init_size, const char *name,
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000202 uptr fixed_addr) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000203 init_size = RoundUpTo(init_size, PAGE_SIZE);
204 DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
205 uintptr_t base;
206 zx_handle_t vmar;
207 zx_status_t status =
208 _zx_vmar_allocate(_zx_vmar_root_self(), 0, init_size,
209 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
210 ZX_VM_FLAG_CAN_MAP_SPECIFIC,
211 &vmar, &base);
212 if (status != ZX_OK)
213 ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
214 base_ = reinterpret_cast<void *>(base);
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000215 size_ = init_size;
216 name_ = name;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000217 os_handle_ = vmar;
218
Kostya Kortchinsky96da9fa2017-10-30 17:56:24 +0000219 return reinterpret_cast<uptr>(base_);
220}
221
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000222static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
223 void *base, const char *name, bool die_for_nomem) {
224 uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
225 map_size = RoundUpTo(map_size, PAGE_SIZE);
Petr Hosekd8328f12017-09-13 01:18:15 +0000226 zx_handle_t vmo;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000227 zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
Petr Hosekd8328f12017-09-13 01:18:15 +0000228 if (status != ZX_OK) {
229 if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000230 ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
231 return 0;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000232 }
Kostya Kortchinskydaca5ba2018-04-11 18:55:26 +0000233 _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000234 DCHECK_GE(base + size_, map_size + offset);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000235 uintptr_t addr;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000236
Petr Hosekd8328f12017-09-13 01:18:15 +0000237 status = _zx_vmar_map(
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000238 vmar, offset, vmo, 0, map_size,
Petr Hosekd8328f12017-09-13 01:18:15 +0000239 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000240 &addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000241 _zx_handle_close(vmo);
242 if (status != ZX_OK) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000243 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
244 ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
245 }
246 return 0;
247 }
248 IncreaseTotalMmap(map_size);
249 return addr;
250}
251
252uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size) {
253 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
254 name_, false);
255}
256
257uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size) {
258 return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
259 name_, true);
260}
261
262void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
263 if (!addr || !size) return;
264 size = RoundUpTo(size, PAGE_SIZE);
265
266 zx_status_t status =
267 _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
268 if (status != ZX_OK) {
269 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
270 SanitizerToolName, size, size, addr);
271 CHECK("unable to unmap" && 0);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000272 }
273
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000274 DecreaseTotalMmap(size);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000275}
276
Kostya Kortchinsky46eab8d2018-04-19 18:38:15 +0000277void ReservedAddressRange::Unmap(uptr addr, uptr size) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000278 CHECK_LE(size, size_);
Kostya Kortchinsky46eab8d2018-04-19 18:38:15 +0000279 if (addr == reinterpret_cast<uptr>(base_))
280 // If we unmap the whole range, just null out the base.
281 base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);
282 else
283 CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
284 size_ -= size;
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000285 UnmapOrDieVmar(reinterpret_cast<void *>(addr), size,
286 static_cast<zx_handle_t>(os_handle_));
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000287}
288
289// This should never be called.
290void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
291 UNIMPLEMENTED();
292}
293
294void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
295 const char *mem_type) {
296 CHECK_GE(size, PAGE_SIZE);
297 CHECK(IsPowerOfTwo(size));
298 CHECK(IsPowerOfTwo(alignment));
299
Petr Hosekd8328f12017-09-13 01:18:15 +0000300 zx_handle_t vmo;
301 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
302 if (status != ZX_OK) {
303 if (status != ZX_ERR_NO_MEMORY)
304 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000305 return nullptr;
306 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000307 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000308 internal_strlen(mem_type));
309
310 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
311
312 // Map a larger size to get a chunk of address space big enough that
313 // it surely contains an aligned region of the requested size. Then
314 // overwrite the aligned middle portion with a mapping from the
315 // beginning of the VMO, and unmap the excess before and after.
316 size_t map_size = size + alignment;
317 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000318 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
319 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
320 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000321 uintptr_t map_addr = addr;
322 uintptr_t map_end = map_addr + map_size;
323 addr = RoundUpTo(map_addr, alignment);
324 uintptr_t end = addr + size;
325 if (addr != map_addr) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000326 zx_info_vmar_t info;
327 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000328 sizeof(info), NULL, NULL);
Petr Hosekd8328f12017-09-13 01:18:15 +0000329 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000330 uintptr_t new_addr;
331 status =
Petr Hosekd8328f12017-09-13 01:18:15 +0000332 _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size,
333 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
334 ZX_VM_FLAG_SPECIFIC_OVERWRITE,
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000335 &new_addr);
Petr Hosekd8328f12017-09-13 01:18:15 +0000336 if (status == ZX_OK) CHECK_EQ(new_addr, addr);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000337 }
338 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000339 if (status == ZX_OK && addr != map_addr)
340 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
341 if (status == ZX_OK && end != map_end)
342 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000343 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000344 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000345
Petr Hosekd8328f12017-09-13 01:18:15 +0000346 if (status != ZX_OK) {
347 if (status != ZX_ERR_NO_MEMORY)
348 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000349 return nullptr;
350 }
351
352 IncreaseTotalMmap(size);
353
354 return reinterpret_cast<void *>(addr);
355}
356
357void UnmapOrDie(void *addr, uptr size) {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000358 UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000359}
360
361// This is used on the shadow mapping, which cannot be changed.
Petr Hosekd8328f12017-09-13 01:18:15 +0000362// Zircon doesn't have anything like MADV_DONTNEED.
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000363void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
364
365void DumpProcessMap() {
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000366 // TODO(mcgrathr): write it
367 return;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000368}
369
370bool IsAccessibleMemoryRange(uptr beg, uptr size) {
371 // TODO(mcgrathr): Figure out a better way.
Petr Hosekd8328f12017-09-13 01:18:15 +0000372 zx_handle_t vmo;
373 zx_status_t status = _zx_vmo_create(size, 0, &vmo);
374 if (status == ZX_OK) {
Petr Hosekf4ec6862018-03-22 23:58:37 +0000375 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
Petr Hosekd8328f12017-09-13 01:18:15 +0000376 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000377 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000378 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000379}
380
381// FIXME implement on this platform.
382void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
383
384bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
385 uptr *read_len, uptr max_len, error_t *errno_p) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000386 zx_handle_t vmo;
387 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
388 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000389 uint64_t vmo_size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000390 status = _zx_vmo_get_size(vmo, &vmo_size);
391 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000392 if (vmo_size < max_len) max_len = vmo_size;
393 size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
394 uintptr_t addr;
Petr Hosekd8328f12017-09-13 01:18:15 +0000395 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
396 ZX_VM_FLAG_PERM_READ, &addr);
397 if (status == ZX_OK) {
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000398 *buff = reinterpret_cast<char *>(addr);
399 *buff_size = map_size;
400 *read_len = max_len;
401 }
402 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000403 _zx_handle_close(vmo);
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000404 }
Petr Hosekd8328f12017-09-13 01:18:15 +0000405 if (status != ZX_OK && errno_p) *errno_p = status;
406 return status == ZX_OK;
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000407}
408
409void RawWrite(const char *buffer) {
410 __sanitizer_log_write(buffer, internal_strlen(buffer));
411}
412
413void CatastrophicErrorWrite(const char *buffer, uptr length) {
414 __sanitizer_log_write(buffer, length);
415}
416
417char **StoredArgv;
418char **StoredEnviron;
419
420char **GetArgv() { return StoredArgv; }
421
422const char *GetEnv(const char *name) {
423 if (StoredEnviron) {
424 uptr NameLen = internal_strlen(name);
425 for (char **Env = StoredEnviron; *Env != 0; Env++) {
426 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
427 return (*Env) + NameLen + 1;
428 }
429 }
430 return nullptr;
431}
432
433uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
434 const char *argv0 = StoredArgv[0];
435 if (!argv0) argv0 = "<UNKNOWN>";
436 internal_strncpy(buf, argv0, buf_len);
437 return internal_strlen(buf);
438}
439
440uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
441 return ReadBinaryName(buf, buf_len);
442}
443
444uptr MainThreadStackBase, MainThreadStackSize;
445
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000446bool GetRandom(void *buffer, uptr length, bool blocking) {
Petr Hosekd8328f12017-09-13 01:18:15 +0000447 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000448 size_t size;
Petr Hosekd8328f12017-09-13 01:18:15 +0000449 CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK);
Kostya Kortchinsky913d0382017-08-29 21:52:56 +0000450 CHECK_EQ(size, length);
451 return true;
452}
453
Kostya Kortchinsky5a3fdbd2017-11-21 21:14:00 +0000454u32 GetNumberOfCPUs() {
455 return zx_system_get_num_cpus();
456}
457
Kostya Kortchinskyfbff7fc2017-11-27 19:53:53 +0000458uptr GetRSS() { UNIMPLEMENTED(); }
459
Vitaly Buka5d960ec2017-08-01 22:22:25 +0000460} // namespace __sanitizer
461
462using namespace __sanitizer; // NOLINT
463
464extern "C" {
465void __sanitizer_startup_hook(int argc, char **argv, char **envp,
466 void *stack_base, size_t stack_size) {
467 __sanitizer::StoredArgv = argv;
468 __sanitizer::StoredEnviron = envp;
469 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
470 __sanitizer::MainThreadStackSize = stack_size;
471}
472
473void __sanitizer_set_report_path(const char *path) {
474 // Handle the initialization code in each sanitizer, but no other calls.
475 // This setting is never consulted on Fuchsia.
476 DCHECK_EQ(path, common_flags()->log_path);
477}
478
479void __sanitizer_set_report_fd(void *fd) {
480 UNREACHABLE("not available on Fuchsia");
481}
482} // extern "C"
483
484#endif // SANITIZER_FUCHSIA