blob: 8f24610dab275b6399f567a72d403dd849c82b3f [file] [log] [blame]
Dean Michael Berrisbad8f0f2016-11-21 03:20:43 +00001//===-- xray_AArch64.cc -----------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of XRay, a dynamic runtime instrumentation system.
11//
12// Implementation of AArch64-specific routines (64-bit).
13//
14//===----------------------------------------------------------------------===//
15#include "sanitizer_common/sanitizer_common.h"
16#include "xray_defs.h"
Diana Picus6b88e322016-12-22 07:35:56 +000017#include "xray_emulate_tsc.h"
Dean Michael Berrisbad8f0f2016-11-21 03:20:43 +000018#include "xray_interface_internal.h"
19#include <atomic>
20#include <cassert>
21
22namespace __xray {
23
Diana Picus6b88e322016-12-22 07:35:56 +000024uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
25 // There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
26 // not have a constant frequency like TSC on x86[_64]; it may go faster or
27 // slower depending on CPU's turbo or power saving modes. Furthermore, to
28 // read from CP15 on ARM a kernel modification or a driver is needed.
29 // We can not require this from users of compiler-rt.
30 // So on ARM we use clock_gettime(2) which gives the result in nanoseconds.
31 // To get the measurements per second, we scale this by the number of
32 // nanoseconds per second, pretending that the TSC frequency is 1GHz and
33 // one TSC tick is 1 nanosecond.
34 return NanosecondsPerSecond;
35}
36
Dean Michael Berrisbad8f0f2016-11-21 03:20:43 +000037// The machine codes for some instructions used in runtime patching.
38enum class PatchOpcodes : uint32_t {
39 PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
40 PO_LdrW0_12 = 0x18000060, // LDR W0, #12
41 PO_LdrX16_12 = 0x58000070, // LDR X16, #12
42 PO_BlrX16 = 0xD63F0200, // BLR X16
43 PO_LdpX0X30SP_16 = 0xA8C17BE0, // LDP X0, X30, [SP], #16
44 PO_B32 = 0x14000008 // B #32
45};
46
47inline static bool patchSled(const bool Enable, const uint32_t FuncId,
48 const XRaySledEntry &Sled,
49 void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
50 // When |Enable| == true,
51 // We replace the following compile-time stub (sled):
52 //
53 // xray_sled_n:
54 // B #32
55 // 7 NOPs (24 bytes)
56 //
57 // With the following runtime patch:
58 //
59 // xray_sled_n:
60 // STP X0, X30, [SP, #-16]! ; PUSH {r0, lr}
61 // LDR W0, #12 ; W0 := function ID
62 // LDR X16,#12 ; X16 := address of the trampoline
63 // BLR X16
64 // ;DATA: 32 bits of function ID
65 // ;DATA: lower 32 bits of the address of the trampoline
66 // ;DATA: higher 32 bits of the address of the trampoline
67 // LDP X0, X30, [SP], #16 ; POP {r0, lr}
68 //
69 // Replacement of the first 4-byte instruction should be the last and atomic
70 // operation, so that the user code which reaches the sled concurrently
71 // either jumps over the whole sled, or executes the whole sled when the
72 // latter is ready.
73 //
74 // When |Enable|==false, we set back the first instruction in the sled to be
75 // B #32
76
77 uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address);
78 if (Enable) {
79 uint32_t *CurAddress = FirstAddress + 1;
80 *CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12);
81 CurAddress++;
82 *CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
83 CurAddress++;
84 *CurAddress = uint32_t(PatchOpcodes::PO_BlrX16);
85 CurAddress++;
86 *CurAddress = FuncId;
87 CurAddress++;
88 *reinterpret_cast<void (**)()>(CurAddress) = TracingHook;
89 CurAddress += 2;
90 *CurAddress = uint32_t(PatchOpcodes::PO_LdpX0X30SP_16);
91 std::atomic_store_explicit(
92 reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
93 uint32_t(PatchOpcodes::PO_StpX0X30SP_m16e), std::memory_order_release);
94 } else {
95 std::atomic_store_explicit(
96 reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
97 uint32_t(PatchOpcodes::PO_B32), std::memory_order_release);
98 }
99 return true;
100}
101
102bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
103 const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
104 return patchSled(Enable, FuncId, Sled, __xray_FunctionEntry);
105}
106
107bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
108 const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
109 return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
110}
111
112bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
113 const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
114 // FIXME: In the future we'd need to distinguish between non-tail exits and
115 // tail exits for better information preservation.
116 return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
117}
118
119} // namespace __xray