blob: 5ef3fc7aac93e81125bbd8a043fd9b7d81d55c4f [file] [log] [blame]
Dean Michael Berris938c5032016-07-21 07:39:55 +00001//===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of XRay, a dynamic runtime instrumentation system.
11//
12// Implementation of the API functions.
13//
14//===----------------------------------------------------------------------===//
15
16#include "xray_interface_internal.h"
Dean Michael Berris17a586e2016-07-29 07:11:58 +000017
Dean Michael Berris938c5032016-07-21 07:39:55 +000018#include <atomic>
19#include <cstdint>
20#include <cstdio>
21#include <errno.h>
22#include <limits>
23#include <sys/mman.h>
24
Dean Michael Berris17a586e2016-07-29 07:11:58 +000025#include "sanitizer_common/sanitizer_common.h"
26
Dean Michael Berris938c5032016-07-21 07:39:55 +000027namespace __xray {
28
29// This is the function to call when we encounter the entry or exit sleds.
30std::atomic<void (*)(int32_t, XRayEntryType)> XRayPatchedFunction{nullptr};
31
Dean Michael Berris9a0c4462016-07-27 04:30:25 +000032// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will undo
33// any successful mprotect(...) changes. This is used to make a page writeable
34// and executable, and upon destruction if it was successful in doing so returns
35// the page into a read-only and executable page.
36//
37// This is only used specifically for runtime-patching of the XRay
38// instrumentation points. This assumes that the executable pages are originally
39// read-and-execute only.
40class MProtectHelper {
41 void *PageAlignedAddr;
42 std::size_t MProtectLen;
43 bool MustCleanup;
44
45public:
46 explicit MProtectHelper(void *PageAlignedAddr, std::size_t MProtectLen)
47 : PageAlignedAddr(PageAlignedAddr), MProtectLen(MProtectLen),
48 MustCleanup(false) {}
49
50 int MakeWriteable() {
51 auto R = mprotect(PageAlignedAddr, MProtectLen,
52 PROT_READ | PROT_WRITE | PROT_EXEC);
53 if (R != -1)
54 MustCleanup = true;
55 return R;
56 }
57
58 ~MProtectHelper() {
59 if (MustCleanup) {
60 mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
61 }
62 }
63};
64
Dean Michael Berris938c5032016-07-21 07:39:55 +000065} // namespace __xray
66
67extern "C" {
68// The following functions have to be defined in assembler, on a per-platform
69// basis. See xray_trampoline_*.s files for implementations.
70extern void __xray_FunctionEntry();
71extern void __xray_FunctionExit();
72}
73
74extern std::atomic<bool> XRayInitialized;
75extern std::atomic<__xray::XRaySledMap> XRayInstrMap;
76
77int __xray_set_handler(void (*entry)(int32_t, XRayEntryType)) {
78 if (XRayInitialized.load(std::memory_order_acquire)) {
79 __xray::XRayPatchedFunction.store(entry, std::memory_order_release);
80 return 1;
81 }
82 return 0;
83}
84
Dean Michael Berris68e74842016-08-08 03:10:22 +000085int __xray_remove_handler() { return __xray_set_handler(nullptr); }
86
Dean Michael Berris938c5032016-07-21 07:39:55 +000087std::atomic<bool> XRayPatching{false};
88
Dean Michael Berris9a0c4462016-07-27 04:30:25 +000089using namespace __xray;
90
Dean Michael Berris17a586e2016-07-29 07:11:58 +000091// FIXME: Figure out whether we can move this class to sanitizer_common instead
92// as a generic "scope guard".
93template <class Function> class CleanupInvoker {
94 Function Fn;
95
96public:
97 explicit CleanupInvoker(Function Fn) : Fn(Fn) {}
98 CleanupInvoker(const CleanupInvoker &) = default;
99 CleanupInvoker(CleanupInvoker &&) = default;
100 CleanupInvoker &operator=(const CleanupInvoker &) = delete;
101 CleanupInvoker &operator=(CleanupInvoker &&) = delete;
102 ~CleanupInvoker() { Fn(); }
103};
104
105template <class Function> CleanupInvoker<Function> ScopeCleanup(Function Fn) {
106 return CleanupInvoker<Function>{Fn};
107}
108
Dean Michael Berris68e74842016-08-08 03:10:22 +0000109// ControlPatching implements the common internals of the patching/unpatching
110// implementation. |Enable| defines whether we're enabling or disabling the
111// runtime XRay instrumentation.
112XRayPatchingStatus ControlPatching(bool Enable) {
Dean Michael Berris938c5032016-07-21 07:39:55 +0000113 if (!XRayInitialized.load(std::memory_order_acquire))
114 return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
115
116 static bool NotPatching = false;
117 if (!XRayPatching.compare_exchange_strong(NotPatching, true,
118 std::memory_order_acq_rel,
119 std::memory_order_acquire)) {
120 return XRayPatchingStatus::ONGOING; // Already patching.
121 }
122
Dean Michael Berris17a586e2016-07-29 07:11:58 +0000123 bool PatchingSuccess = false;
124 auto XRayPatchingStatusResetter = ScopeCleanup([&PatchingSuccess] {
125 if (!PatchingSuccess) {
126 XRayPatching.store(false, std::memory_order_release);
127 }
128 });
129
Dean Michael Berris938c5032016-07-21 07:39:55 +0000130 // Step 1: Compute the function id, as a unique identifier per function in the
131 // instrumentation map.
Dean Michael Berris9a0c4462016-07-27 04:30:25 +0000132 XRaySledMap InstrMap = XRayInstrMap.load(std::memory_order_acquire);
Dean Michael Berris938c5032016-07-21 07:39:55 +0000133 if (InstrMap.Entries == 0)
134 return XRayPatchingStatus::NOT_INITIALIZED;
135
136 int32_t FuncId = 1;
137 static constexpr uint8_t CallOpCode = 0xe8;
138 static constexpr uint16_t MovR10Seq = 0xba41;
Dean Michael Berris68e74842016-08-08 03:10:22 +0000139 static constexpr uint16_t Jmp9Seq = 0x09eb;
Dean Michael Berris938c5032016-07-21 07:39:55 +0000140 static constexpr uint8_t JmpOpCode = 0xe9;
Dean Michael Berris68e74842016-08-08 03:10:22 +0000141 static constexpr uint8_t RetOpCode = 0xc3;
Dean Michael Berris938c5032016-07-21 07:39:55 +0000142 uint64_t CurFun = 0;
143 for (std::size_t I = 0; I < InstrMap.Entries; I++) {
144 auto Sled = InstrMap.Sleds[I];
145 auto F = Sled.Function;
146 if (CurFun == 0)
147 CurFun = F;
148 if (F != CurFun) {
149 ++FuncId;
150 CurFun = F;
151 }
152
153 // While we're here, we should patch the nop sled. To do that we mprotect
154 // the page containing the function to be writeable.
155 void *PageAlignedAddr =
156 reinterpret_cast<void *>(Sled.Address & ~((2 << 16) - 1));
157 std::size_t MProtectLen =
158 (Sled.Address + 12) - reinterpret_cast<uint64_t>(PageAlignedAddr);
Dean Michael Berris9a0c4462016-07-27 04:30:25 +0000159 MProtectHelper Protector(PageAlignedAddr, MProtectLen);
160 if (Protector.MakeWriteable() == -1) {
Dean Michael Berris938c5032016-07-21 07:39:55 +0000161 printf("Failed mprotect: %d\n", errno);
162 return XRayPatchingStatus::FAILED;
163 }
164
165 static constexpr int64_t MinOffset{std::numeric_limits<int32_t>::min()};
166 static constexpr int64_t MaxOffset{std::numeric_limits<int32_t>::max()};
167 if (Sled.Kind == XRayEntryType::ENTRY) {
Dean Michael Berris17a586e2016-07-29 07:11:58 +0000168 // FIXME: Implement this in a more extensible manner, per-platform.
Dean Michael Berris938c5032016-07-21 07:39:55 +0000169 // Here we do the dance of replacing the following sled:
170 //
171 // xray_sled_n:
172 // jmp +9
173 // <9 byte nop>
174 //
175 // With the following:
176 //
177 // mov r10d, <function id>
178 // call <relative 32bit offset to entry trampoline>
179 //
180 // We need to do this in the following order:
181 //
182 // 1. Put the function id first, 2 bytes from the start of the sled (just
183 // after the 2-byte jmp instruction).
184 // 2. Put the call opcode 6 bytes from the start of the sled.
185 // 3. Put the relative offset 7 bytes from the start of the sled.
186 // 4. Do an atomic write over the jmp instruction for the "mov r10d"
187 // opcode and first operand.
188 //
189 // Prerequisite is to compute the relative offset to the
190 // __xray_FunctionEntry function's address.
191 int64_t TrampolineOffset =
192 reinterpret_cast<int64_t>(__xray_FunctionEntry) -
193 (static_cast<int64_t>(Sled.Address) + 11);
194 if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Dean Michael Berris17a586e2016-07-29 07:11:58 +0000195 Report("XRay Entry trampoline (%p) too far from sled (%p); distance = "
196 "%ld\n",
197 __xray_FunctionEntry, reinterpret_cast<void *>(Sled.Address),
198 TrampolineOffset);
Dean Michael Berris938c5032016-07-21 07:39:55 +0000199 continue;
200 }
Dean Michael Berris68e74842016-08-08 03:10:22 +0000201 if (Enable) {
202 *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
203 *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
204 *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
205 std::atomic_store_explicit(
206 reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
207 std::memory_order_release);
208 } else {
209 std::atomic_store_explicit(
210 reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
211 std::memory_order_release);
212 // FIXME: Write out the nops still?
213 }
Dean Michael Berris938c5032016-07-21 07:39:55 +0000214 }
215
216 if (Sled.Kind == XRayEntryType::EXIT) {
Dean Michael Berris17a586e2016-07-29 07:11:58 +0000217 // FIXME: Implement this in a more extensible manner, per-platform.
Dean Michael Berris938c5032016-07-21 07:39:55 +0000218 // Here we do the dance of replacing the following sled:
219 //
220 // xray_sled_n:
221 // ret
222 // <10 byte nop>
223 //
224 // With the following:
225 //
226 // mov r10d, <function id>
227 // jmp <relative 32bit offset to exit trampoline>
228 //
229 // 1. Put the function id first, 2 bytes from the start of the sled (just
230 // after the 1-byte ret instruction).
231 // 2. Put the jmp opcode 6 bytes from the start of the sled.
232 // 3. Put the relative offset 7 bytes from the start of the sled.
233 // 4. Do an atomic write over the jmp instruction for the "mov r10d"
234 // opcode and first operand.
235 //
236 // Prerequisite is to compute the relative offset fo the
237 // __xray_FunctionExit function's address.
238 int64_t TrampolineOffset =
239 reinterpret_cast<int64_t>(__xray_FunctionExit) -
240 (static_cast<int64_t>(Sled.Address) + 11);
241 if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Dean Michael Berris17a586e2016-07-29 07:11:58 +0000242 Report("XRay Exit trampoline (%p) too far from sled (%p); distance = "
243 "%ld\n",
244 __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address),
245 TrampolineOffset);
Dean Michael Berris938c5032016-07-21 07:39:55 +0000246 continue;
247 }
Dean Michael Berris68e74842016-08-08 03:10:22 +0000248 if (Enable) {
249 *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
250 *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode;
251 *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
252 std::atomic_store_explicit(
253 reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
254 std::memory_order_release);
255 } else {
256 std::atomic_store_explicit(
257 reinterpret_cast<std::atomic<uint8_t> *>(Sled.Address), RetOpCode,
258 std::memory_order_release);
259 // FIXME: Write out the nops still?
260 }
Dean Michael Berris938c5032016-07-21 07:39:55 +0000261 }
Dean Michael Berris938c5032016-07-21 07:39:55 +0000262 }
263 XRayPatching.store(false, std::memory_order_release);
Dean Michael Berris17a586e2016-07-29 07:11:58 +0000264 PatchingSuccess = true;
265 return XRayPatchingStatus::SUCCESS;
Dean Michael Berris938c5032016-07-21 07:39:55 +0000266}
Dean Michael Berris68e74842016-08-08 03:10:22 +0000267
268XRayPatchingStatus __xray_patch() { return ControlPatching(true); }
269
270XRayPatchingStatus __xray_unpatch() { return ControlPatching(false); }