blob: 0a9483ae1dd0d0312cf47b3c16c784c7cc291576 [file] [log] [blame]
Dynamic Tools Team517193e2019-09-11 14:48:41 +00001//===-- fuchsia.cpp ---------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "platform.h"
10
11#if SCUDO_FUCHSIA
12
13#include "common.h"
14#include "mutex.h"
15#include "string_utils.h"
16
17#include <lib/sync/mutex.h> // for sync_mutex_t
18#include <limits.h> // for PAGE_SIZE
19#include <stdlib.h> // for getenv()
20#include <zircon/compiler.h>
21#include <zircon/sanitizer.h>
22#include <zircon/syscalls.h>
23
24namespace scudo {
25
26uptr getPageSize() { return PAGE_SIZE; }
27
28void NORETURN die() { __builtin_trap(); }
29
30// We zero-initialize the Extra parameter of map(), make sure this is consistent
31// with ZX_HANDLE_INVALID.
32COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
33
34static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
35 // Only scenario so far.
36 DCHECK(Data);
37 DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
38
39 const zx_status_t Status = _zx_vmar_allocate(
40 _zx_vmar_root_self(),
41 ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
42 Size, &Data->Vmar, &Data->VmarBase);
43 if (UNLIKELY(Status != ZX_OK)) {
44 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
45 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
46 return nullptr;
47 }
48 return reinterpret_cast<void *>(Data->VmarBase);
49}
50
51void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
52 MapPlatformData *Data) {
53 DCHECK_EQ(Size % PAGE_SIZE, 0);
54 const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
55
56 // For MAP_NOACCESS, just allocate a Vmar and return.
57 if (Flags & MAP_NOACCESS)
58 return allocateVmar(Size, Data, AllowNoMem);
59
60 const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
61 CHECK_NE(Vmar, ZX_HANDLE_INVALID);
62
63 zx_status_t Status;
64 zx_handle_t Vmo;
65 uint64_t VmoSize = 0;
66 if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
67 // If a Vmo was specified, it's a resize operation.
68 CHECK(Addr);
69 DCHECK(Flags & MAP_RESIZABLE);
70 Vmo = Data->Vmo;
71 VmoSize = Data->VmoSize;
72 Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
73 if (Status != ZX_OK) {
74 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
75 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
76 return nullptr;
77 }
78 } else {
79 // Otherwise, create a Vmo and set its name.
80 Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
81 if (UNLIKELY(Status != ZX_OK)) {
82 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
83 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
84 return nullptr;
85 }
86 _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
87 }
88
89 uintptr_t P;
90 zx_vm_option_t MapFlags =
91 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
92 const uint64_t Offset =
93 Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
94 if (Offset)
95 MapFlags |= ZX_VM_SPECIFIC;
96 Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
97 // No need to track the Vmo if we don't intend on resizing it. Close it.
98 if (Flags & MAP_RESIZABLE) {
99 DCHECK(Data);
100 DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
101 Data->Vmo = Vmo;
102 } else {
103 CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
104 }
105 if (UNLIKELY(Status != ZX_OK)) {
106 if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
107 dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
108 return nullptr;
109 }
110 if (Data)
111 Data->VmoSize += Size;
112
113 return reinterpret_cast<void *>(P);
114}
115
116void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
117 if (Flags & UNMAP_ALL) {
118 DCHECK_NE(Data, nullptr);
119 const zx_handle_t Vmar = Data->Vmar;
120 DCHECK_NE(Vmar, _zx_vmar_root_self());
121 // Destroying the vmar effectively unmaps the whole mapping.
122 CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
123 CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
124 } else {
125 const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
126 const zx_status_t Status =
127 _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
128 if (UNLIKELY(Status != ZX_OK))
129 dieOnMapUnmapError();
130 }
131 if (Data) {
132 if (Data->Vmo != ZX_HANDLE_INVALID)
133 CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
134 memset(Data, 0, sizeof(*Data));
135 }
136}
137
138void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
139 MapPlatformData *Data) {
140 DCHECK(Data);
141 DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
142 DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
143 const zx_status_t Status =
144 _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
145 CHECK_EQ(Status, ZX_OK);
146}
147
148const char *getEnv(const char *Name) { return getenv(Name); }
149
150// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
151// because the Fuchsia implementation of sync_mutex_t has clang thread safety
152// annotations. Were we to apply proper capability annotations to the top level
153// HybridMutex class itself, they would not be needed. As it stands, the
154// thread analysis thinks that we are locking the mutex and accidentally leaving
155// it locked on the way out.
156bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
157 // Size and alignment must be compatible between both types.
158 return sync_mutex_trylock(&M) == ZX_OK;
159}
160
161void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
162 sync_mutex_lock(&M);
163}
164
165void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
166 sync_mutex_unlock(&M);
167}
168
169u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
170
171u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
172
173bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
174 COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
175 if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength))
176 return false;
177 _zx_cprng_draw(Buffer, Length);
178 return true;
179}
180
181void outputRaw(const char *Buffer) {
182 __sanitizer_log_write(Buffer, strlen(Buffer));
183}
184
185void setAbortMessage(const char *Message) {}
186
187} // namespace scudo
188
189#endif // SCUDO_FUCHSIA