blob: 136a567db6ce80b1506e4fc7a042737afe18fad6 [file] [log] [blame]
Bob Wilsona08e9ac2013-11-15 07:18:15 +00001//===-- sanitizer_coverage.cc ---------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Sanitizer Coverage.
11// This file implements run-time support for a poor man's coverage tool.
12//
13// Compiler instrumentation:
Kostya Serebryany714c67c2014-01-17 11:00:30 +000014// For every interesting basic block the compiler injects the following code:
Kostya Serebryany9fdeb372014-12-23 22:32:17 +000015// if (Guard < 0) {
Kostya Serebryany4cadd4a2014-11-24 18:49:53 +000016// __sanitizer_cov(&Guard);
Bob Wilsona08e9ac2013-11-15 07:18:15 +000017// }
Kostya Serebryany9fdeb372014-12-23 22:32:17 +000018// At the module start up time __sanitizer_cov_module_init sets the guards
19// to consecutive negative numbers (-1, -2, -3, ...).
Kostya Serebryany714c67c2014-01-17 11:00:30 +000020// It's fine to call __sanitizer_cov more than once for a given block.
Bob Wilsona08e9ac2013-11-15 07:18:15 +000021//
22// Run-time:
Kostya Serebryany714c67c2014-01-17 11:00:30 +000023// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
Kostya Serebryany9fdeb372014-12-23 22:32:17 +000024// and atomically set Guard to -Guard.
Bob Wilsona08e9ac2013-11-15 07:18:15 +000025// - __sanitizer_cov_dump: dump the coverage data to disk.
26// For every module of the current process that has coverage data
27// this will create a file module_name.PID.sancov. The file format is simple:
28// it's just a sorted sequence of 4-byte offsets in the module.
29//
30// Eventually, this coverage implementation should be obsoleted by a more
31// powerful general purpose Clang/LLVM coverage instrumentation.
32// Consider this implementation as prototype.
33//
34// FIXME: support (or at least test with) dlclose.
35//===----------------------------------------------------------------------===//
36
37#include "sanitizer_allocator_internal.h"
38#include "sanitizer_common.h"
39#include "sanitizer_libc.h"
40#include "sanitizer_mutex.h"
41#include "sanitizer_procmaps.h"
Kostya Serebryany714c67c2014-01-17 11:00:30 +000042#include "sanitizer_stacktrace.h"
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +000043#include "sanitizer_symbolizer.h"
Bob Wilsona08e9ac2013-11-15 07:18:15 +000044#include "sanitizer_flags.h"
45
Kostya Serebryany183cb6e2014-11-14 23:15:55 +000046static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
47
48static atomic_uintptr_t coverage_counter;
Bob Wilsona08e9ac2013-11-15 07:18:15 +000049
Kostya Serebryany8b530e12014-04-30 10:40:48 +000050// pc_array is the array containing the covered PCs.
Sergey Matveev6cb47a082014-05-19 12:53:03 +000051// To make the pc_array thread- and async-signal-safe it has to be large enough.
Kostya Serebryany8b530e12014-04-30 10:40:48 +000052// 128M counters "ought to be enough for anybody" (4M on 32-bit).
Evgeniy Stepanov567e5162014-05-27 12:37:52 +000053
54// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
55// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
56// dump current memory layout to another file.
Bob Wilsona08e9ac2013-11-15 07:18:15 +000057
Sergey Matveev6cb47a082014-05-19 12:53:03 +000058static bool cov_sandboxed = false;
59static int cov_fd = kInvalidFd;
60static unsigned int cov_max_block_size = 0;
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +000061static bool coverage_enabled = false;
62static const char *coverage_dir;
Sergey Matveev6cb47a082014-05-19 12:53:03 +000063
Bob Wilsona08e9ac2013-11-15 07:18:15 +000064namespace __sanitizer {
65
Evgeniy Stepanov567e5162014-05-27 12:37:52 +000066class CoverageData {
67 public:
68 void Init();
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +000069 void Enable();
70 void Disable();
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +000071 void ReInit();
Evgeniy Stepanovfe181022014-06-04 12:13:54 +000072 void BeforeFork();
73 void AfterFork(int child_pid);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +000074 void Extend(uptr npcs);
Kostya Serebryany9fdeb372014-12-23 22:32:17 +000075 void Add(uptr pc, u32 *guard);
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +000076 void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
77 uptr cache_size);
78 void DumpCallerCalleePairs();
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +000079 void DumpTrace();
Kostya Serebryany07aee9c2015-03-04 23:41:55 +000080 void DumpAsBitSet();
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +000081
82 ALWAYS_INLINE
Kostya Serebryanyd421db02015-01-03 00:54:43 +000083 void TraceBasicBlock(s32 *id);
Kostya Serebryany9fdeb372014-12-23 22:32:17 +000084
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +000085 void InitializeGuardArray(s32 *guards);
Kostya Serebryany07aee9c2015-03-04 23:41:55 +000086 void InitializeGuards(s32 *guards, uptr n, const char *module_name,
87 uptr caller_pc);
88 void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +000089 void InitializeCounters(u8 *counters, uptr n);
Kostya Serebryany21a1a232015-01-28 22:39:44 +000090 void ReinitializeGuards();
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +000091 uptr GetNumberOf8bitCounters();
92 uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +000093
94 uptr *data();
95 uptr size();
96
97 private:
98 // Maximal size pc array may ever grow.
99 // We MmapNoReserve this space to ensure that the array is contiguous.
Evgeniy Stepanov10308462014-12-26 13:54:11 +0000100 static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 24, 1 << 27);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000101 // The amount file mapping for the pc array is grown by.
102 static const uptr kPcArrayMmapSize = 64 * 1024;
103
104 // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
105 // much RAM as it really needs.
106 uptr *pc_array;
107 // Index of the first available pc_array slot.
108 atomic_uintptr_t pc_array_index;
109 // Array size.
110 atomic_uintptr_t pc_array_size;
111 // Current file mapped size of the pc array.
112 uptr pc_array_mapped_size;
113 // Descriptor of the file mapped pc array.
114 int pc_fd;
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000115
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +0000116 // Vector of coverage guard arrays, protected by mu.
117 InternalMmapVectorNoCtor<s32*> guard_array_vec;
118
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000119 struct NamedPcRange {
120 const char *name;
121 uptr beg, end; // elements [beg,end) in pc_array.
122 };
123
124 // Vector of module and compilation unit pc ranges.
125 InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
126 InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
Kostya Serebryany88599462015-02-20 00:30:44 +0000127
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000128 struct CounterAndSize {
129 u8 *counters;
130 uptr n;
131 };
132
133 InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
134 uptr num_8bit_counters;
135
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000136 // Caller-Callee (cc) array, size and current index.
137 static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
138 uptr **cc_array;
139 atomic_uintptr_t cc_array_index;
140 atomic_uintptr_t cc_array_size;
141
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000142 // Tracing event array, size and current pointer.
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000143 // We record all events (basic block entries) in a global buffer of u32
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000144 // values. Each such value is the index in pc_array.
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000145 // So far the tracing is highly experimental:
146 // - not thread-safe;
147 // - does not support long traces;
148 // - not tuned for performance.
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000149 static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
150 u32 *tr_event_array;
151 uptr tr_event_array_size;
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000152 u32 *tr_event_pointer;
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000153 static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000154
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000155 StaticSpinMutex mu;
156
Evgeniy Stepanovce984522014-06-03 15:27:15 +0000157 void DirectOpen();
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000158};
159
160static CoverageData coverage_data;
161
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000162void CovUpdateMapping(const char *path, uptr caller_pc = 0);
163
Evgeniy Stepanovce984522014-06-03 15:27:15 +0000164void CoverageData::DirectOpen() {
Alexey Samsonov4cc76cb2014-11-26 01:48:39 +0000165 InternalScopedString path(kMaxPathLength);
Evgeniy Stepanovfa5c0752014-05-29 14:33:16 +0000166 internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000167 coverage_dir, internal_getpid());
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000168 pc_fd = OpenFile(path.data(), true);
169 if (internal_iserror(pc_fd)) {
170 Report(" Coverage: failed to open %s for writing\n", path.data());
171 Die();
172 }
173
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000174 pc_array_mapped_size = 0;
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000175 CovUpdateMapping(coverage_dir);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000176}
177
178void CoverageData::Init() {
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000179 pc_fd = kInvalidFd;
180}
181
182void CoverageData::Enable() {
Viktor Kutuzov7891c8c2015-02-02 09:38:10 +0000183 if (pc_array)
184 return;
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000185 pc_array = reinterpret_cast<uptr *>(
186 MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000187 atomic_store(&pc_array_index, 0, memory_order_relaxed);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000188 if (common_flags()->coverage_direct) {
Evgeniy Stepanovce984522014-06-03 15:27:15 +0000189 atomic_store(&pc_array_size, 0, memory_order_relaxed);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000190 } else {
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000191 atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
192 }
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000193
194 cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
195 sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
196 atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
197 atomic_store(&cc_array_index, 0, memory_order_relaxed);
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000198
Kostya Serebryany0f53d9a2015-01-03 02:07:58 +0000199 // Allocate tr_event_array with a guard page at the end.
200 tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
201 sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
202 "CovInit::tr_event_array"));
203 Mprotect(reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
204 GetMmapGranularity());
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000205 tr_event_array_size = kTrEventArrayMaxSize;
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000206 tr_event_pointer = tr_event_array;
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000207
208 num_8bit_counters = 0;
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000209}
210
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +0000211void CoverageData::InitializeGuardArray(s32 *guards) {
Viktor Kutuzov7891c8c2015-02-02 09:38:10 +0000212 Enable(); // Make sure coverage is enabled at this point.
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +0000213 s32 n = guards[0];
214 for (s32 j = 1; j <= n; j++) {
215 uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
216 guards[j] = -static_cast<s32>(idx + 1);
217 }
218}
219
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000220void CoverageData::Disable() {
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000221 if (pc_array) {
222 internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
223 pc_array = nullptr;
224 }
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000225 if (cc_array) {
226 internal_munmap(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
227 cc_array = nullptr;
228 }
229 if (tr_event_array) {
230 internal_munmap(tr_event_array,
231 sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
232 GetMmapGranularity());
233 tr_event_array = nullptr;
234 tr_event_pointer = nullptr;
235 }
236 if (pc_fd != kInvalidFd) {
237 internal_close(pc_fd);
238 pc_fd = kInvalidFd;
239 }
240}
241
Kostya Serebryany21a1a232015-01-28 22:39:44 +0000242void CoverageData::ReinitializeGuards() {
243 // Assuming single thread.
244 atomic_store(&pc_array_index, 0, memory_order_relaxed);
245 for (uptr i = 0; i < guard_array_vec.size(); i++)
246 InitializeGuardArray(guard_array_vec[i]);
247}
248
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000249void CoverageData::ReInit() {
250 Disable();
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000251 if (coverage_enabled) {
252 if (common_flags()->coverage_direct) {
253 // In memory-mapped mode we must extend the new file to the known array
254 // size.
255 uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000256 Enable();
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000257 if (size) Extend(size);
258 if (coverage_enabled) CovUpdateMapping(coverage_dir);
259 } else {
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000260 Enable();
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000261 }
Evgeniy Stepanovfe181022014-06-04 12:13:54 +0000262 }
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +0000263 // Re-initialize the guards.
264 // We are single-threaded now, no need to grab any lock.
265 CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
Kostya Serebryany21a1a232015-01-28 22:39:44 +0000266 ReinitializeGuards();
Evgeniy Stepanovfe181022014-06-04 12:13:54 +0000267}
268
269void CoverageData::BeforeFork() {
270 mu.Lock();
271}
272
273void CoverageData::AfterFork(int child_pid) {
274 // We are single-threaded so it's OK to release the lock early.
275 mu.Unlock();
276 if (child_pid == 0) ReInit();
277}
278
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000279// Extend coverage PC array to fit additional npcs elements.
280void CoverageData::Extend(uptr npcs) {
Evgeniy Stepanovce984522014-06-03 15:27:15 +0000281 if (!common_flags()->coverage_direct) return;
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000282 SpinMutexLock l(&mu);
283
284 uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
285 size += npcs * sizeof(uptr);
286
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000287 if (coverage_enabled && size > pc_array_mapped_size) {
288 if (pc_fd == kInvalidFd) DirectOpen();
289 CHECK_NE(pc_fd, kInvalidFd);
290
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000291 uptr new_mapped_size = pc_array_mapped_size;
292 while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
Evgeniy Stepanovca9e0452014-12-24 13:57:11 +0000293 CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000294
295 // Extend the file and map the new space at the end of pc_array.
296 uptr res = internal_ftruncate(pc_fd, new_mapped_size);
297 int err;
298 if (internal_iserror(res, &err)) {
299 Printf("failed to extend raw coverage file: %d\n", err);
300 Die();
301 }
Evgeniy Stepanovca9e0452014-12-24 13:57:11 +0000302
303 uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
304 void *p = MapWritableFileToMemory((void *)next_map_base,
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000305 new_mapped_size - pc_array_mapped_size,
306 pc_fd, pc_array_mapped_size);
Evgeniy Stepanovca9e0452014-12-24 13:57:11 +0000307 CHECK_EQ((uptr)p, next_map_base);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000308 pc_array_mapped_size = new_mapped_size;
309 }
310
311 atomic_store(&pc_array_size, size, memory_order_release);
312}
313
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000314void CoverageData::InitializeCounters(u8 *counters, uptr n) {
315 if (!counters) return;
316 CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
317 n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
318 SpinMutexLock l(&mu);
319 counters_vec.push_back({counters, n});
320 num_8bit_counters += n;
321}
322
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000323void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
324 uptr range_end) {
325 auto sym = Symbolizer::GetOrInit();
326 if (!sym)
327 return;
328 const char *module_name = sym->GetModuleNameForPc(caller_pc);
329 if (!module_name) return;
330 if (module_name_vec.empty() || module_name_vec.back().name != module_name)
331 module_name_vec.push_back({module_name, range_beg, range_end});
332 else
333 module_name_vec.back().end = range_end;
334}
335
Kostya Serebryany88599462015-02-20 00:30:44 +0000336void CoverageData::InitializeGuards(s32 *guards, uptr n,
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000337 const char *comp_unit_name,
338 uptr caller_pc) {
Kostya Serebryanyaa185bf2014-12-30 19:29:28 +0000339 // The array 'guards' has n+1 elements, we use the element zero
340 // to store 'n'.
341 CHECK_LT(n, 1 << 30);
342 guards[0] = static_cast<s32>(n);
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +0000343 InitializeGuardArray(guards);
344 SpinMutexLock l(&mu);
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000345 uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
346 uptr range_beg = range_end - n;
347 comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
Kostya Serebryany77c5c1a2014-12-30 23:16:12 +0000348 guard_array_vec.push_back(guards);
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000349 UpdateModuleNameVec(caller_pc, range_beg, range_end);
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000350}
351
Kostya Serebryanyaa185bf2014-12-30 19:29:28 +0000352// If guard is negative, atomically set it to -guard and store the PC in
353// pc_array.
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000354void CoverageData::Add(uptr pc, u32 *guard) {
355 atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
356 s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
357 if (guard_value >= 0) return;
358
359 atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
Kostya Serebryany8b530e12014-04-30 10:40:48 +0000360 if (!pc_array) return;
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000361
362 uptr idx = -guard_value - 1;
363 if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
364 return; // May happen after fork when pc_array_index becomes 0.
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000365 CHECK_LT(idx * sizeof(uptr),
366 atomic_load(&pc_array_size, memory_order_acquire));
Kostya Serebryany8b530e12014-04-30 10:40:48 +0000367 pc_array[idx] = pc;
Kostya Serebryany183cb6e2014-11-14 23:15:55 +0000368 atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
Kostya Serebryany8b530e12014-04-30 10:40:48 +0000369}
370
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000371// Registers a pair caller=>callee.
372// When a given caller is seen for the first time, the callee_cache is added
373// to the global array cc_array, callee_cache[0] is set to caller and
374// callee_cache[1] is set to cache_size.
375// Then we are trying to add callee to callee_cache [2,cache_size) if it is
376// not there yet.
377// If the cache is full we drop the callee (may want to fix this later).
378void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
379 uptr cache_size) {
380 if (!cc_array) return;
381 atomic_uintptr_t *atomic_callee_cache =
382 reinterpret_cast<atomic_uintptr_t *>(callee_cache);
383 uptr zero = 0;
384 if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
385 memory_order_seq_cst)) {
386 uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
387 CHECK_LT(idx * sizeof(uptr),
388 atomic_load(&cc_array_size, memory_order_acquire));
389 callee_cache[1] = cache_size;
390 cc_array[idx] = callee_cache;
391 }
392 CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
393 for (uptr i = 2; i < cache_size; i++) {
394 uptr was = 0;
395 if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
Kostya Serebryany183cb6e2014-11-14 23:15:55 +0000396 memory_order_seq_cst)) {
397 atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000398 return;
Kostya Serebryany183cb6e2014-11-14 23:15:55 +0000399 }
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000400 if (was == callee) // Already have this callee.
401 return;
402 }
403}
404
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000405uptr CoverageData::GetNumberOf8bitCounters() {
406 return num_8bit_counters;
407}
408
409// Map every 8bit counter to a 8-bit bitset and clear the counter.
410uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
411 uptr num_new_bits = 0;
412 uptr cur = 0;
413 // For better speed we map 8 counters to 8 bytes of bitset at once.
414 static const uptr kBatchSize = 8;
415 CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
416 for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
417 u8 *c = counters_vec[i].counters;
418 uptr n = counters_vec[i].n;
419 CHECK_EQ(n % 16, 0);
420 CHECK_EQ(cur % kBatchSize, 0);
421 CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
422 if (!bitset) {
423 internal_bzero_aligned16(c, n);
424 cur += n;
425 continue;
426 }
427 for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
428 CHECK_LT(cur, num_8bit_counters);
429 u64 *pc64 = reinterpret_cast<u64*>(c + j);
430 u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
431 u64 c64 = *pc64;
432 u64 old_bits_64 = *pb64;
433 u64 new_bits_64 = old_bits_64;
434 if (c64) {
435 *pc64 = 0;
436 for (uptr k = 0; k < kBatchSize; k++) {
437 u64 x = (c64 >> (8 * k)) & 0xff;
438 if (x) {
439 u64 bit = 0;
440 /**/ if (x >= 128) bit = 128;
441 else if (x >= 32) bit = 64;
442 else if (x >= 16) bit = 32;
443 else if (x >= 8) bit = 16;
444 else if (x >= 4) bit = 8;
445 else if (x >= 3) bit = 4;
446 else if (x >= 2) bit = 2;
447 else if (x >= 1) bit = 1;
448 u64 mask = bit << (8 * k);
449 if (!(new_bits_64 & mask)) {
450 num_new_bits++;
451 new_bits_64 |= mask;
452 }
453 }
454 }
455 *pb64 = new_bits_64;
456 }
457 }
458 }
459 CHECK_EQ(cur, num_8bit_counters);
460 return num_new_bits;
461}
462
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000463uptr *CoverageData::data() {
464 return pc_array;
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000465}
466
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000467uptr CoverageData::size() {
468 return atomic_load(&pc_array_index, memory_order_relaxed);
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000469}
470
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000471// Block layout for packed file format: header, followed by module name (no
472// trailing zero), followed by data blob.
473struct CovHeader {
474 int pid;
475 unsigned int module_name_length;
476 unsigned int data_length;
477};
478
479static void CovWritePacked(int pid, const char *module, const void *blob,
480 unsigned int blob_size) {
Sergey Matveev83f91e72014-05-21 13:43:52 +0000481 if (cov_fd < 0) return;
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000482 unsigned module_name_length = internal_strlen(module);
483 CovHeader header = {pid, module_name_length, blob_size};
484
485 if (cov_max_block_size == 0) {
486 // Writing to a file. Just go ahead.
487 internal_write(cov_fd, &header, sizeof(header));
488 internal_write(cov_fd, module, module_name_length);
489 internal_write(cov_fd, blob, blob_size);
490 } else {
491 // Writing to a socket. We want to split the data into appropriately sized
492 // blocks.
493 InternalScopedBuffer<char> block(cov_max_block_size);
494 CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
495 uptr header_size_with_module = sizeof(header) + module_name_length;
496 CHECK_LT(header_size_with_module, cov_max_block_size);
497 unsigned int max_payload_size =
498 cov_max_block_size - header_size_with_module;
499 char *block_pos = block.data();
500 internal_memcpy(block_pos, &header, sizeof(header));
501 block_pos += sizeof(header);
502 internal_memcpy(block_pos, module, module_name_length);
503 block_pos += module_name_length;
504 char *block_data_begin = block_pos;
Alexey Samsonov4925fd42014-11-13 22:40:59 +0000505 const char *blob_pos = (const char *)blob;
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000506 while (blob_size > 0) {
507 unsigned int payload_size = Min(blob_size, max_payload_size);
508 blob_size -= payload_size;
509 internal_memcpy(block_data_begin, blob_pos, payload_size);
510 blob_pos += payload_size;
511 ((CovHeader *)block.data())->data_length = payload_size;
512 internal_write(cov_fd, block.data(),
513 header_size_with_module + payload_size);
514 }
515 }
516}
517
Sergey Matveev83f91e72014-05-21 13:43:52 +0000518// If packed = false: <name>.<pid>.<sancov> (name = module name).
519// If packed = true and name == 0: <pid>.<sancov>.<packed>.
520// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
521// user-supplied).
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000522static int CovOpenFile(bool packed, const char *name,
523 const char *extension = "sancov") {
Alexey Samsonov656c29b2014-12-02 22:20:11 +0000524 InternalScopedString path(kMaxPathLength);
Sergey Matveev83f91e72014-05-21 13:43:52 +0000525 if (!packed) {
526 CHECK(name);
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000527 path.append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
528 extension);
Sergey Matveev83f91e72014-05-21 13:43:52 +0000529 } else {
530 if (!name)
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000531 path.append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
Evgeniy Stepanovf8c7e252014-12-26 10:19:56 +0000532 extension);
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000533 else
534 path.append("%s/%s.%s.packed", coverage_dir, name, extension);
Sergey Matveev83f91e72014-05-21 13:43:52 +0000535 }
536 uptr fd = OpenFile(path.data(), true);
537 if (internal_iserror(fd)) {
538 Report(" SanitizerCoverage: failed to open %s for writing\n", path.data());
539 return -1;
540 }
541 return fd;
542}
543
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000544// Dump trace PCs and trace events into two separate files.
545void CoverageData::DumpTrace() {
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000546 uptr max_idx = tr_event_pointer - tr_event_array;
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000547 if (!max_idx) return;
548 auto sym = Symbolizer::GetOrInit();
549 if (!sym)
550 return;
551 InternalScopedString out(32 << 20);
Kostya Serebryanyd421db02015-01-03 00:54:43 +0000552 for (uptr i = 0, n = size(); i < n; i++) {
553 const char *module_name = "<unknown>";
554 uptr module_address = 0;
555 sym->GetModuleNameAndOffsetForPC(pc_array[i], &module_name,
556 &module_address);
557 out.append("%s 0x%zx\n", module_name, module_address);
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000558 }
559 int fd = CovOpenFile(false, "trace-points");
560 if (fd < 0) return;
561 internal_write(fd, out.data(), out.length());
562 internal_close(fd);
563
Kostya Serebryany88599462015-02-20 00:30:44 +0000564 fd = CovOpenFile(false, "trace-compunits");
565 if (fd < 0) return;
566 out.clear();
567 for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000568 out.append("%s\n", comp_unit_name_vec[i].name);
Kostya Serebryany88599462015-02-20 00:30:44 +0000569 internal_write(fd, out.data(), out.length());
570 internal_close(fd);
571
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000572 fd = CovOpenFile(false, "trace-events");
573 if (fd < 0) return;
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000574 uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
575 u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
576 // The trace file could be huge, and may not be written with a single syscall.
577 while (bytes_to_write) {
578 uptr actually_written = internal_write(fd, event_bytes, bytes_to_write);
579 if (actually_written <= bytes_to_write) {
580 bytes_to_write -= actually_written;
581 event_bytes += actually_written;
582 } else {
583 break;
584 }
585 }
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000586 internal_close(fd);
Kostya Serebryanyd421db02015-01-03 00:54:43 +0000587 VReport(1, " CovDump: Trace: %zd PCs written\n", size());
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000588 VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000589}
590
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000591// This function dumps the caller=>callee pairs into a file as a sequence of
592// lines like "module_name offset".
593void CoverageData::DumpCallerCalleePairs() {
594 uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
595 if (!max_idx) return;
596 auto sym = Symbolizer::GetOrInit();
597 if (!sym)
598 return;
Kostya Serebryany40aa4a22014-10-31 19:49:46 +0000599 InternalScopedString out(32 << 20);
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000600 uptr total = 0;
601 for (uptr i = 0; i < max_idx; i++) {
602 uptr *cc_cache = cc_array[i];
603 CHECK(cc_cache);
604 uptr caller = cc_cache[0];
605 uptr n_callees = cc_cache[1];
606 const char *caller_module_name = "<unknown>";
607 uptr caller_module_address = 0;
608 sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
609 &caller_module_address);
610 for (uptr j = 2; j < n_callees; j++) {
611 uptr callee = cc_cache[j];
612 if (!callee) break;
613 total++;
614 const char *callee_module_name = "<unknown>";
615 uptr callee_module_address = 0;
616 sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
617 &callee_module_address);
618 out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
619 caller_module_address, callee_module_name,
620 callee_module_address);
621 }
622 }
623 int fd = CovOpenFile(false, "caller-callee");
624 if (fd < 0) return;
625 internal_write(fd, out.data(), out.length());
626 internal_close(fd);
627 VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
628}
629
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000630// Record the current PC into the event buffer.
631// Every event is a u32 value (index in tr_pc_array_index) so we compute
632// it once and then cache in the provided 'cache' storage.
Kostya Serebryany0f53d9a2015-01-03 02:07:58 +0000633//
634// This function will eventually be inlined by the compiler.
Kostya Serebryanyd421db02015-01-03 00:54:43 +0000635void CoverageData::TraceBasicBlock(s32 *id) {
Kostya Serebryany0f53d9a2015-01-03 02:07:58 +0000636 // Will trap here if
637 // 1. coverage is not enabled at run-time.
638 // 2. The array tr_event_array is full.
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000639 *tr_event_pointer = static_cast<u32>(*id - 1);
640 tr_event_pointer++;
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000641}
642
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000643void CoverageData::DumpAsBitSet() {
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000644 if (!common_flags()->coverage_bitset) return;
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000645 if (!size()) return;
646 InternalScopedBuffer<char> out(size());
647 for (uptr m = 0; m < module_name_vec.size(); m++) {
648 uptr n_set_bits = 0;
649 auto r = module_name_vec[m];
650 CHECK(r.name);
651 CHECK_LE(r.beg, r.end);
652 CHECK_LE(r.end, size());
653 for (uptr i = r.beg; i < r.end; i++) {
654 uptr pc = data()[i];
655 out[i] = pc ? '1' : '0';
656 if (pc)
657 n_set_bits++;
658 }
659 const char *base_name = StripModuleName(r.name);
660 int fd = CovOpenFile(/* packed */ false, base_name, "bitset-sancov");
661 if (fd < 0) return;
662 internal_write(fd, out.data() + r.beg, r.end - r.beg);
663 internal_close(fd);
664 VReport(1,
665 " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
666 r.end - r.beg, base_name, n_set_bits);
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000667 }
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000668}
669
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000670// Dump the coverage on disk.
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000671static void CovDump() {
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000672 if (!coverage_enabled || common_flags()->coverage_direct) return;
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000673#if !SANITIZER_WINDOWS
Kostya Serebryany8b530e12014-04-30 10:40:48 +0000674 if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
675 return;
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000676 coverage_data.DumpAsBitSet();
Kostya Serebryanyd421db02015-01-03 00:54:43 +0000677 coverage_data.DumpTrace();
Kostya Serebryanya7ee2732014-12-30 19:55:04 +0000678 if (!common_flags()->coverage_pcs) return;
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000679 uptr size = coverage_data.size();
Kostya Serebryany8b530e12014-04-30 10:40:48 +0000680 InternalMmapVector<u32> offsets(size);
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000681 uptr *vb = coverage_data.data();
682 uptr *ve = vb + size;
683 SortArray(vb, size);
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000684 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000685 uptr mb, me, off, prot;
Alexey Samsonov656c29b2014-12-02 22:20:11 +0000686 InternalScopedString module(kMaxPathLength);
687 InternalScopedString path(kMaxPathLength);
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000688 for (int i = 0;
689 proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
690 i++) {
691 if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
692 continue;
Sergey Matveev76e02e92014-05-08 16:09:54 +0000693 while (vb < ve && *vb < mb) vb++;
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000694 if (vb >= ve) break;
Sergey Matveev76e02e92014-05-08 16:09:54 +0000695 if (*vb < me) {
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000696 offsets.clear();
697 const uptr *old_vb = vb;
698 CHECK_LE(off, *vb);
699 for (; vb < ve && *vb < me; vb++) {
700 uptr diff = *vb - (i ? mb : 0) + off;
701 CHECK_LE(diff, 0xffffffffU);
702 offsets.push_back(static_cast<u32>(diff));
703 }
Alexey Samsonov26ca05a2014-11-04 19:34:29 +0000704 const char *module_name = StripModuleName(module.data());
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000705 if (cov_sandboxed) {
Sergey Matveev83f91e72014-05-21 13:43:52 +0000706 if (cov_fd >= 0) {
707 CovWritePacked(internal_getpid(), module_name, offsets.data(),
708 offsets.size() * sizeof(u32));
709 VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
710 }
Evgeniy Stepanov8ab205f2014-02-12 15:29:22 +0000711 } else {
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000712 // One file per module per process.
Alexey Samsonov656c29b2014-12-02 22:20:11 +0000713 path.clear();
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000714 path.append("%s/%s.%zd.sancov", coverage_dir, module_name,
715 internal_getpid());
Sergey Matveev83f91e72014-05-21 13:43:52 +0000716 int fd = CovOpenFile(false /* packed */, module_name);
717 if (fd > 0) {
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000718 internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
719 internal_close(fd);
720 VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
721 vb - old_vb);
722 }
Evgeniy Stepanov8ab205f2014-02-12 15:29:22 +0000723 }
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000724 }
725 }
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000726 if (cov_fd >= 0)
727 internal_close(cov_fd);
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000728 coverage_data.DumpCallerCalleePairs();
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000729#endif // !SANITIZER_WINDOWS
730}
731
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000732void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
733 if (!args) return;
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000734 if (!coverage_enabled) return;
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000735 cov_sandboxed = args->coverage_sandboxed;
736 if (!cov_sandboxed) return;
737 cov_fd = args->coverage_fd;
738 cov_max_block_size = args->coverage_max_block_size;
739 if (cov_fd < 0)
740 // Pre-open the file now. The sandbox won't allow us to do it later.
Sergey Matveev83f91e72014-05-21 13:43:52 +0000741 cov_fd = CovOpenFile(true /* packed */, 0);
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000742}
743
Sergey Matveev83f91e72014-05-21 13:43:52 +0000744int MaybeOpenCovFile(const char *name) {
745 CHECK(name);
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000746 if (!coverage_enabled) return -1;
Sergey Matveev83f91e72014-05-21 13:43:52 +0000747 return CovOpenFile(true /* packed */, name);
748}
Evgeniy Stepanovfe181022014-06-04 12:13:54 +0000749
750void CovBeforeFork() {
751 coverage_data.BeforeFork();
752}
753
754void CovAfterFork(int child_pid) {
755 coverage_data.AfterFork(child_pid);
756}
757
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000758void InitializeCoverage(bool enabled, const char *dir) {
Kostya Serebryanye02839b2015-01-06 01:11:23 +0000759 if (coverage_enabled)
760 return; // May happen if two sanitizer enable coverage in the same process.
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000761 coverage_enabled = enabled;
762 coverage_dir = dir;
Evgeniy Stepanov3f2e7612015-01-12 17:13:20 +0000763 coverage_data.Init();
764 if (enabled) coverage_data.Enable();
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000765#if !SANITIZER_WINDOWS
766 if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
767#endif
768}
769
770void ReInitializeCoverage(bool enabled, const char *dir) {
771 coverage_enabled = enabled;
772 coverage_dir = dir;
773 coverage_data.ReInit();
774}
775
776void CoverageUpdateMapping() {
777 if (coverage_enabled)
778 CovUpdateMapping(coverage_dir);
779}
780
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000781} // namespace __sanitizer
782
783extern "C" {
Kostya Serebryany9fdeb372014-12-23 22:32:17 +0000784SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
Kostya Serebryany4cadd4a2014-11-24 18:49:53 +0000785 coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
786 guard);
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000787}
Kostya Serebryany77cc7292015-02-04 01:21:45 +0000788SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
789 atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
790 if (__sanitizer::atomic_load(atomic_guard, memory_order_relaxed))
791 __sanitizer_cov(guard);
792}
Kostya Serebryanyb6eae0c2014-10-31 17:19:11 +0000793SANITIZER_INTERFACE_ATTRIBUTE void
794__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
795 coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
796 callee, callee_cache16, 16);
797}
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000798SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000799 coverage_enabled = true;
800 coverage_dir = common_flags()->coverage_dir;
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000801 coverage_data.Init();
802}
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000803SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
Kostya Serebryany88599462015-02-20 00:30:44 +0000804SANITIZER_INTERFACE_ATTRIBUTE void
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000805__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
Kostya Serebryany07aee9c2015-03-04 23:41:55 +0000806 const char *comp_unit_name) {
807 coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000808 coverage_data.InitializeCounters(counters, npcs);
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000809 if (!common_flags()->coverage_direct) return;
810 if (SANITIZER_ANDROID && coverage_enabled) {
Evgeniy Stepanov38c228a2014-06-05 14:38:53 +0000811 // dlopen/dlclose interceptors do not work on Android, so we rely on
812 // Extend() calls to update .sancov.map.
Evgeniy Stepanov05dc4be2014-12-26 12:32:32 +0000813 CovUpdateMapping(coverage_dir, GET_CALLER_PC());
Evgeniy Stepanov38c228a2014-06-05 14:38:53 +0000814 }
Evgeniy Stepanov567e5162014-05-27 12:37:52 +0000815 coverage_data.Extend(npcs);
816}
Sergey Matveev83f91e72014-05-21 13:43:52 +0000817SANITIZER_INTERFACE_ATTRIBUTE
818sptr __sanitizer_maybe_open_cov_file(const char *name) {
819 return MaybeOpenCovFile(name);
820}
Kostya Serebryany183cb6e2014-11-14 23:15:55 +0000821SANITIZER_INTERFACE_ATTRIBUTE
822uptr __sanitizer_get_total_unique_coverage() {
823 return atomic_load(&coverage_counter, memory_order_relaxed);
824}
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000825
826SANITIZER_INTERFACE_ATTRIBUTE
Kostya Serebryanyd421db02015-01-03 00:54:43 +0000827void __sanitizer_cov_trace_func_enter(s32 *id) {
828 coverage_data.TraceBasicBlock(id);
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000829}
830SANITIZER_INTERFACE_ATTRIBUTE
Kostya Serebryanyd421db02015-01-03 00:54:43 +0000831void __sanitizer_cov_trace_basic_block(s32 *id) {
832 coverage_data.TraceBasicBlock(id);
Kostya Serebryanyc9d251e2014-11-19 00:24:11 +0000833}
Kostya Serebryany21a1a232015-01-28 22:39:44 +0000834SANITIZER_INTERFACE_ATTRIBUTE
835void __sanitizer_reset_coverage() {
836 coverage_data.ReinitializeGuards();
837 internal_bzero_aligned16(
838 coverage_data.data(),
839 RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
840}
841SANITIZER_INTERFACE_ATTRIBUTE
842uptr __sanitizer_get_coverage_guards(uptr **data) {
843 *data = coverage_data.data();
844 return coverage_data.size();
845}
Kostya Serebryanybe5e0ed2015-03-03 23:27:02 +0000846
847SANITIZER_INTERFACE_ATTRIBUTE
848uptr __sanitizer_get_number_of_counters() {
849 return coverage_data.GetNumberOf8bitCounters();
850}
851
852SANITIZER_INTERFACE_ATTRIBUTE
853uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
854 return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
855}
Bob Wilsona08e9ac2013-11-15 07:18:15 +0000856} // extern "C"