Update aosp/master compiler-rt for rebase to r230699.
Change-Id: I6c415fd5f6420e3012d9da76719111721e906dfa
diff --git a/lib/Makefile.mk b/lib/Makefile.mk
index ed9690d..7eb6489 100644
--- a/lib/Makefile.mk
+++ b/lib/Makefile.mk
@@ -12,11 +12,8 @@
# Add submodules.
SubDirs += asan
SubDirs += builtins
-SubDirs += dfsan
SubDirs += interception
SubDirs += lsan
-SubDirs += msan
SubDirs += profile
SubDirs += sanitizer_common
-SubDirs += tsan
SubDirs += ubsan
diff --git a/lib/asan/Android.mk b/lib/asan/Android.mk
index 4a611d3..4314c02 100644
--- a/lib/asan/Android.mk
+++ b/lib/asan/Android.mk
@@ -23,8 +23,9 @@
asan_rtl_files := \
asan_activation.cc \
- asan_allocator2.cc \
+ asan_allocator.cc \
asan_fake_stack.cc \
+ asan_flags.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_linux.cc \
@@ -38,6 +39,7 @@
asan_rtl.cc \
asan_stack.cc \
asan_stats.cc \
+ asan_suppressions.cc \
asan_thread.cc \
asan_win.cc \
../interception/interception_linux.cc \
@@ -51,6 +53,7 @@
../sanitizer_common/sanitizer_deadlock_detector1.cc \
../sanitizer_common/sanitizer_deadlock_detector2.cc \
../sanitizer_common/sanitizer_flags.cc \
+ ../sanitizer_common/sanitizer_flag_parser.cc \
../sanitizer_common/sanitizer_libc.cc \
../sanitizer_common/sanitizer_libignore.cc \
../sanitizer_common/sanitizer_linux.cc \
@@ -182,6 +185,7 @@
external/compiler-rt/lib/asan/tests \
external/compiler-rt/lib/sanitizer_common/tests
LOCAL_CFLAGS += \
+ -Wno-non-virtual-dtor \
-Wno-unused-parameter \
-Wno-sign-compare \
-DASAN_UAR=0 \
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
index 6251f06..90cb6f8 100644
--- a/lib/asan/CMakeLists.txt
+++ b/lib/asan/CMakeLists.txt
@@ -1,10 +1,11 @@
# Build for the AddressSanitizer runtime support library.
set(ASAN_SOURCES
- asan_allocator2.cc
+ asan_allocator.cc
asan_activation.cc
asan_debugging.cc
asan_fake_stack.cc
+ asan_flags.cc
asan_globals.cc
asan_interceptors.cc
asan_linux.cc
@@ -18,6 +19,7 @@
asan_rtl.cc
asan_stack.cc
asan_stats.cc
+ asan_suppressions.cc
asan_thread.cc
asan_win.cc)
@@ -29,10 +31,6 @@
include_directories(..)
-if(ANDROID)
- include_directories(${COMPILER_RT_EXTRA_ANDROID_HEADERS})
-endif()
-
set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_no_rtti_flag(ASAN_CFLAGS)
@@ -67,8 +65,8 @@
add_compiler_rt_darwin_object_library(RTAsan ${os}
ARCH ${ASAN_SUPPORTED_ARCH}
SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
- CFLAGS ${ASAN_CFLAGS}
- DEFS ${ASAN_COMMON_DEFINITIONS})
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
endforeach()
else()
foreach(arch ${ASAN_SUPPORTED_ARCH})
@@ -81,12 +79,10 @@
add_compiler_rt_object_library(RTAsan_preinit ${arch}
SOURCES ${ASAN_PREINIT_SOURCES} CFLAGS ${ASAN_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS})
- if (COMPILER_RT_BUILD_SHARED_ASAN)
- add_compiler_rt_object_library(RTAsan_dynamic ${arch}
- SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
- CFLAGS ${ASAN_DYNAMIC_CFLAGS}
- DEFS ${ASAN_DYNAMIC_DEFINITIONS})
- endif()
+ add_compiler_rt_object_library(RTAsan_dynamic ${arch}
+ SOURCES ${ASAN_SOURCES} ${ASAN_CXX_SOURCES}
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
endforeach()
endif()
@@ -100,8 +96,8 @@
$<TARGET_OBJECTS:RTInterception.${os}>
$<TARGET_OBJECTS:RTSanitizerCommon.${os}>
$<TARGET_OBJECTS:RTLSanCommon.${os}>
- CFLAGS ${ASAN_CFLAGS}
- DEFS ${ASAN_COMMON_DEFINITIONS})
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
add_dependencies(asan clang_rt.asan_${os}_dynamic)
endforeach()
else()
@@ -131,30 +127,27 @@
DEFS ${ASAN_COMMON_DEFINITIONS})
add_dependencies(asan clang_rt.asan_cxx-${arch})
- if (COMPILER_RT_BUILD_SHARED_ASAN)
- add_compiler_rt_runtime(clang_rt.asan-preinit-${arch} ${arch} STATIC
- SOURCES $<TARGET_OBJECTS:RTAsan_preinit.${arch}>
- CFLAGS ${ASAN_CFLAGS}
- DEFS ${ASAN_COMMON_DEFINITIONS})
- add_dependencies(asan clang_rt.asan-preinit-${arch})
+ add_compiler_rt_runtime(clang_rt.asan-preinit-${arch} ${arch} STATIC
+ SOURCES $<TARGET_OBJECTS:RTAsan_preinit.${arch}>
+ CFLAGS ${ASAN_CFLAGS}
+ DEFS ${ASAN_COMMON_DEFINITIONS})
+ add_dependencies(asan clang_rt.asan-preinit-${arch})
- if (WIN32)
- set(SHARED_ASAN_NAME clang_rt.asan_dynamic-${arch}${COMPILER_RT_OS_SUFFIX})
- else()
- set(SHARED_ASAN_NAME clang_rt.asan-${arch}${COMPILER_RT_OS_SUFFIX})
- endif()
-
- add_compiler_rt_runtime(clang_rt.asan-dynamic-${arch} ${arch} SHARED
- OUTPUT_NAME ${SHARED_ASAN_NAME}
- SOURCES $<TARGET_OBJECTS:RTAsan_dynamic.${arch}>
- ${ASAN_COMMON_RUNTIME_OBJECTS}
- CFLAGS ${ASAN_DYNAMIC_CFLAGS}
- DEFS ${ASAN_DYNAMIC_DEFINITIONS})
- target_link_libraries(clang_rt.asan-dynamic-${arch} ${ASAN_DYNAMIC_LIBS})
- add_dependencies(asan clang_rt.asan-dynamic-${arch})
+ if (WIN32)
+ set(SHARED_ASAN_NAME clang_rt.asan_dynamic-${arch}${COMPILER_RT_OS_SUFFIX})
+ else()
+ set(SHARED_ASAN_NAME clang_rt.asan-${arch}${COMPILER_RT_OS_SUFFIX})
endif()
+ add_compiler_rt_runtime(clang_rt.asan-dynamic-${arch} ${arch} SHARED
+ OUTPUT_NAME ${SHARED_ASAN_NAME}
+ SOURCES $<TARGET_OBJECTS:RTAsan_dynamic.${arch}>
+ ${ASAN_COMMON_RUNTIME_OBJECTS}
+ CFLAGS ${ASAN_DYNAMIC_CFLAGS}
+ DEFS ${ASAN_DYNAMIC_DEFINITIONS})
+ target_link_libraries(clang_rt.asan-dynamic-${arch} ${ASAN_DYNAMIC_LIBS})
+ add_dependencies(asan clang_rt.asan-dynamic-${arch})
- if (UNIX AND NOT ${arch} STREQUAL "i386" AND NOT ${arch} STREQUAL "i686")
+ if (UNIX AND NOT ${arch} MATCHES "i386|i686")
add_sanitizer_rt_symbols(clang_rt.asan_cxx-${arch})
add_dependencies(asan clang_rt.asan_cxx-${arch}-symbols)
add_sanitizer_rt_symbols(clang_rt.asan-${arch} asan.syms.extra)
diff --git a/lib/asan/README.txt b/lib/asan/README.txt
index b9c43ac..8cc9bb1 100644
--- a/lib/asan/README.txt
+++ b/lib/asan/README.txt
@@ -1,7 +1,6 @@
AddressSanitizer RT
================================
-This directory contains sources of the AddressSanitizer (asan) runtime library.
-We are in the process of integrating AddressSanitizer with LLVM, stay tuned.
+This directory contains sources of the AddressSanitizer (ASan) runtime library.
Directory structure:
README.txt : This file.
@@ -13,14 +12,13 @@
Also ASan runtime needs the following libraries:
lib/interception/ : Machinery used to intercept function calls.
-lib/sanitizer_common/ : Code shared between ASan and TSan.
+lib/sanitizer_common/ : Code shared between various sanitizers.
-Currently ASan runtime can be built by both make and cmake build systems.
-(see compiler-rt/make and files Makefile.mk for make-based build and
-files CMakeLists.txt for cmake-based build).
+ASan runtime currently also embeds part of LeakSanitizer runtime for
+leak detection (lib/lsan/lsan_common.{cc,h}).
-ASan unit and output tests work only with cmake. You may run this
-command from the root of your cmake build tree:
+ASan runtime can only be built by CMake. You can run ASan tests
+from the root of your CMake build tree:
make check-asan
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index 23273be..3bc0198 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -16,32 +16,106 @@
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
+#include "asan_poisoning.h"
+#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static struct AsanDeactivatedFlags {
- int quarantine_size;
- int max_redzone;
+ AllocatorOptions allocator_options;
int malloc_context_size;
bool poison_heap;
+ bool coverage;
+ const char *coverage_dir;
+
+ void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
+#define ASAN_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &f->Name);
+#define COMMON_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &cf->Name);
+#include "asan_activation_flags.inc"
+#undef ASAN_ACTIVATION_FLAG
+#undef COMMON_ACTIVATION_FLAG
+
+ RegisterIncludeFlag(parser, cf);
+ }
+
+ void OverrideFromActivationFlags() {
+ Flags f;
+ CommonFlags cf;
+ FlagParser parser;
+ RegisterActivationFlags(&parser, &f, &cf);
+
+ // Copy the current activation flags.
+ allocator_options.CopyTo(&f, &cf);
+ cf.malloc_context_size = malloc_context_size;
+ f.poison_heap = poison_heap;
+ cf.coverage = coverage;
+ cf.coverage_dir = coverage_dir;
+ cf.verbosity = Verbosity();
+ cf.help = false; // this is activation-specific help
+
+ // Check if activation flags need to be overriden.
+ if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
+ parser.ParseString(env);
+ }
+
+ // Override from getprop asan.options.
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ parser.ParseString(buf);
+
+ SetVerbosity(cf.verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (cf.help) parser.PrintFlagDescriptions();
+
+ allocator_options.SetFrom(&f, &cf);
+ malloc_context_size = cf.malloc_context_size;
+ poison_heap = f.poison_heap;
+ coverage = cf.coverage;
+ coverage_dir = cf.coverage_dir;
+ }
+
+ void Print() {
+ Report(
+ "quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
+ "malloc_context_size %d, alloc_dealloc_mismatch %d, "
+ "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+ allocator_options.quarantine_size_mb, allocator_options.max_redzone,
+ poison_heap, malloc_context_size,
+ allocator_options.alloc_dealloc_mismatch,
+ allocator_options.may_return_null, coverage, coverage_dir);
+ }
} asan_deactivated_flags;
static bool asan_is_deactivated;
-void AsanStartDeactivated() {
+void AsanDeactivate() {
+ CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n");
- // Save flag values.
- asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
- asan_deactivated_flags.max_redzone = flags()->max_redzone;
- asan_deactivated_flags.poison_heap = flags()->poison_heap;
- asan_deactivated_flags.malloc_context_size =
- common_flags()->malloc_context_size;
- flags()->quarantine_size = 0;
- flags()->max_redzone = 16;
- flags()->poison_heap = false;
- common_flags()->malloc_context_size = 0;
+ // Stash runtime state.
+ GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
+ asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
+ asan_deactivated_flags.poison_heap = CanPoisonMemory();
+ asan_deactivated_flags.coverage = common_flags()->coverage;
+ asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
+
+ // Deactivate the runtime.
+ SetCanPoisonMemory(false);
+ SetMallocContextSize(1);
+ ReInitializeCoverage(false, nullptr);
+
+ AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
+ disabled.quarantine_size_mb = 0;
+ disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
+ disabled.max_redzone = 16;
+ disabled.alloc_dealloc_mismatch = false;
+ disabled.may_return_null = true;
+ ReInitializeAllocator(disabled);
asan_is_deactivated = true;
}
@@ -50,25 +124,19 @@
if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n");
- // Restore flag values.
- // FIXME: this is not atomic, and there may be other threads alive.
- flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
- flags()->max_redzone = asan_deactivated_flags.max_redzone;
- flags()->poison_heap = asan_deactivated_flags.poison_heap;
- common_flags()->malloc_context_size =
- asan_deactivated_flags.malloc_context_size;
+ asan_deactivated_flags.OverrideFromActivationFlags();
- ParseExtraActivationFlags();
-
- ReInitializeAllocator();
+ SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
+ SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
+ ReInitializeCoverage(asan_deactivated_flags.coverage,
+ asan_deactivated_flags.coverage_dir);
+ ReInitializeAllocator(asan_deactivated_flags.allocator_options);
asan_is_deactivated = false;
- VReport(
- 1,
- "quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size "
- "%d\n",
- flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
- common_flags()->malloc_context_size);
+ if (Verbosity()) {
+ Report("Activated with flags:\n");
+ asan_deactivated_flags.Print();
+ }
}
} // namespace __asan
diff --git a/lib/asan/asan_activation.h b/lib/asan/asan_activation.h
index dafb840..d5e1ce4 100644
--- a/lib/asan/asan_activation.h
+++ b/lib/asan/asan_activation.h
@@ -16,7 +16,7 @@
#define ASAN_ACTIVATION_H
namespace __asan {
-void AsanStartDeactivated();
+void AsanDeactivate();
void AsanActivate();
} // namespace __asan
diff --git a/lib/asan/asan_activation_flags.inc b/lib/asan/asan_activation_flags.inc
new file mode 100644
index 0000000..d4c089e
--- /dev/null
+++ b/lib/asan/asan_activation_flags.inc
@@ -0,0 +1,35 @@
+//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A subset of ASan (and common) runtime flags supported at activation time.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ACTIVATION_FLAG
+# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+#ifndef COMMON_ACTIVATION_FLAG
+# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+// ASAN_ACTIVATION_FLAG(Type, Name)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_ACTIVATION_FLAG(int, redzone)
+ASAN_ACTIVATION_FLAG(int, max_redzone)
+ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
+ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
+ASAN_ACTIVATION_FLAG(bool, poison_heap)
+
+COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
+COMMON_ACTIVATION_FLAG(int, malloc_context_size)
+COMMON_ACTIVATION_FLAG(bool, coverage)
+COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
+COMMON_ACTIVATION_FLAG(int, verbosity)
+COMMON_ACTIVATION_FLAG(bool, help)
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
new file mode 100644
index 0000000..fd63ac6
--- /dev/null
+++ b/lib/asan/asan_allocator.cc
@@ -0,0 +1,909 @@
+//===-- asan_allocator.cc -------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Implementation of ASan's memory allocator, 2-nd version.
+// This variant uses the allocator from sanitizer_common, i.e. the one shared
+// with ThreadSanitizer and MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
+
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+#include "lsan/lsan_common.h"
+
+namespace __asan {
+
+// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
+// We use adaptive redzones: for larger allocation larger redzones are used.
+static u32 RZLog2Size(u32 rz_log) {
+ CHECK_LT(rz_log, 8);
+ return 16 << rz_log;
+}
+
+static u32 RZSize2Log(u32 rz_size) {
+ CHECK_GE(rz_size, 16);
+ CHECK_LE(rz_size, 2048);
+ CHECK(IsPowerOfTwo(rz_size));
+ u32 res = Log2(rz_size) - 4;
+ CHECK_EQ(rz_size, RZLog2Size(res));
+ return res;
+}
+
+static AsanAllocator &get_allocator();
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// L L L L L L H H U U U U U U R R
+// L -- left redzone words (0 or more bytes)
+// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
+// U -- user memory.
+// R -- right redzone (0 or more bytes)
+// ChunkBase consists of ChunkHeader and other bytes that overlap with user
+// memory.
+
+// If the left redzone is greater than the ChunkHeader size we store a magic
+// value in the first uptr word of the memory block and store the address of
+// ChunkBase in the next uptr.
+// M B L L L L L L L L L H H U U U U U U
+// | ^
+// ---------------------|
+// M -- magic value kAllocBegMagic
+// B -- address of ChunkHeader pointing to the first 'H'
+static const uptr kAllocBegMagic = 0xCC6E96B9;
+
+struct ChunkHeader {
+ // 1-st 8 bytes.
+ u32 chunk_state : 8; // Must be first.
+ u32 alloc_tid : 24;
+
+ u32 free_tid : 24;
+ u32 from_memalign : 1;
+ u32 alloc_type : 2;
+ u32 rz_log : 3;
+ u32 lsan_tag : 2;
+ // 2-nd 8 bytes
+ // This field is used for small sizes. For large sizes it is equal to
+ // SizeClassMap::kMaxSize and the actual size is stored in the
+ // SecondaryAllocator's metadata.
+ u32 user_requested_size;
+ u32 alloc_context_id;
+};
+
+struct ChunkBase : ChunkHeader {
+ // Header2, intersects with user memory.
+ u32 free_context_id;
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
+COMPILER_CHECK(kChunkHeaderSize == 16);
+COMPILER_CHECK(kChunkHeader2Size <= 16);
+
+// Every chunk of memory allocated by this allocator can be in one of 3 states:
+// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
+// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
+// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
+enum {
+ CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
+ CHUNK_ALLOCATED = 2,
+ CHUNK_QUARANTINE = 3
+};
+
+struct AsanChunk: ChunkBase {
+ uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+ uptr UsedSize(bool locked_version = false) {
+ if (user_requested_size != SizeClassMap::kMaxSize)
+ return user_requested_size;
+ return *reinterpret_cast<uptr *>(
+ get_allocator().GetMetaData(AllocBeg(locked_version)));
+ }
+ void *AllocBeg(bool locked_version = false) {
+ if (from_memalign) {
+ if (locked_version)
+ return get_allocator().GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(this));
+ return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
+ }
+ return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+ }
+ bool AddrIsInside(uptr addr, bool locked_version = false) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+ }
+};
+
+struct QuarantineCallback {
+ explicit QuarantineCallback(AllocatorCache *cache)
+ : cache_(cache) {
+ }
+
+ void Recycle(AsanChunk *m) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+ atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
+ CHECK_NE(m->alloc_tid, kInvalidTid);
+ CHECK_NE(m->free_tid, kInvalidTid);
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ void *p = reinterpret_cast<void *>(m->AllocBeg());
+ if (p != m) {
+ uptr *alloc_magic = reinterpret_cast<uptr *>(p);
+ CHECK_EQ(alloc_magic[0], kAllocBegMagic);
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
+ alloc_magic[0] = 0;
+ CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ }
+
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.real_frees++;
+ thread_stats.really_freed += m->UsedSize();
+
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ void *Allocate(uptr size) {
+ return get_allocator().Allocate(cache_, size, 1, false);
+ }
+
+ void Deallocate(void *p) {
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ AllocatorCache *cache_;
+};
+
+typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
+typedef AsanQuarantine::Cache QuarantineCache;
+
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededASanShadowMemory(p, size);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
+
+// We can not use THREADLOCAL because it is not supported on some of the
+// platforms we care about (OSX 10.6, Android).
+// static THREADLOCAL AllocatorCache cache;
+AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ return &ms->allocator_cache;
+}
+
+QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
+ return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
+}
+
+void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
+ quarantine_size_mb = f->quarantine_size_mb;
+ min_redzone = f->redzone;
+ max_redzone = f->max_redzone;
+ may_return_null = cf->allocator_may_return_null;
+ alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+}
+
+void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
+ f->quarantine_size_mb = quarantine_size_mb;
+ f->redzone = min_redzone;
+ f->max_redzone = max_redzone;
+ cf->allocator_may_return_null = may_return_null;
+ f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+}
+
+struct Allocator {
+ static const uptr kMaxAllowedMallocSize =
+ FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
+ static const uptr kMaxThreadLocalQuarantine =
+ FIRST_32_SECOND_64(1 << 18, 1 << 20);
+
+ AsanAllocator allocator;
+ AsanQuarantine quarantine;
+ StaticSpinMutex fallback_mutex;
+ AllocatorCache fallback_allocator_cache;
+ QuarantineCache fallback_quarantine_cache;
+
+ // ------------------- Options --------------------------
+ atomic_uint16_t min_redzone;
+ atomic_uint16_t max_redzone;
+ atomic_uint8_t alloc_dealloc_mismatch;
+
+ // ------------------- Initialization ------------------------
+ explicit Allocator(LinkerInitialized)
+ : quarantine(LINKER_INITIALIZED),
+ fallback_quarantine_cache(LINKER_INITIALIZED) {}
+
+ void CheckOptions(const AllocatorOptions &options) const {
+ CHECK_GE(options.min_redzone, 16);
+ CHECK_GE(options.max_redzone, options.min_redzone);
+ CHECK_LE(options.max_redzone, 2048);
+ CHECK(IsPowerOfTwo(options.min_redzone));
+ CHECK(IsPowerOfTwo(options.max_redzone));
+ }
+
+ void SharedInitCode(const AllocatorOptions &options) {
+ CheckOptions(options);
+ quarantine.Init((uptr)options.quarantine_size_mb << 20,
+ kMaxThreadLocalQuarantine);
+ atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
+ memory_order_release);
+ atomic_store(&min_redzone, options.min_redzone, memory_order_release);
+ atomic_store(&max_redzone, options.max_redzone, memory_order_release);
+ }
+
+ void Initialize(const AllocatorOptions &options) {
+ allocator.Init(options.may_return_null);
+ SharedInitCode(options);
+ }
+
+ void ReInitialize(const AllocatorOptions &options) {
+ allocator.SetMayReturnNull(options.may_return_null);
+ SharedInitCode(options);
+ }
+
+ void GetOptions(AllocatorOptions *options) const {
+ options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
+ options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
+ options->may_return_null = allocator.MayReturnNull();
+ options->alloc_dealloc_mismatch =
+ atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ }
+
+ // -------------------- Helper methods. -------------------------
+ uptr ComputeRZLog(uptr user_requested_size) {
+ u32 rz_log =
+ user_requested_size <= 64 - 16 ? 0 :
+ user_requested_size <= 128 - 32 ? 1 :
+ user_requested_size <= 512 - 64 ? 2 :
+ user_requested_size <= 4096 - 128 ? 3 :
+ user_requested_size <= (1 << 14) - 256 ? 4 :
+ user_requested_size <= (1 << 15) - 512 ? 5 :
+ user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
+ u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
+ u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
+ return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ }
+
+ // We have an address between two chunks, and we want to report just one.
+ AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
+ AsanChunk *right_chunk) {
+ // Prefer an allocated chunk over freed chunk and freed chunk
+ // over available chunk.
+ if (left_chunk->chunk_state != right_chunk->chunk_state) {
+ if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ return right_chunk;
+ if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ return right_chunk;
+ }
+ // Same chunk_state: choose based on offset.
+ sptr l_offset = 0, r_offset = 0;
+ CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
+ CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
+ if (l_offset < r_offset)
+ return left_chunk;
+ return right_chunk;
+ }
+
+ // -------------------- Allocation/Deallocation routines ---------------
+ void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
+ AllocType alloc_type, bool can_fill) {
+ if (UNLIKELY(!asan_inited))
+ AsanInitFromRtl();
+ Flags &fl = *flags();
+ CHECK(stack);
+ const uptr min_alignment = SHADOW_GRANULARITY;
+ if (alignment < min_alignment)
+ alignment = min_alignment;
+ if (size == 0) {
+ // We'd be happy to avoid allocating memory for zero-size requests, but
+ // some programs/tests depend on this behavior and assume that malloc
+ // would not return NULL even for zero-size allocations. Moreover, it
+ // looks like operator new should never return NULL, and results of
+ // consecutive "new" calls must be different even if the allocated size
+ // is zero.
+ size = 1;
+ }
+ CHECK(IsPowerOfTwo(alignment));
+ uptr rz_log = ComputeRZLog(size);
+ uptr rz_size = RZLog2Size(rz_log);
+ uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
+ uptr needed_size = rounded_size + rz_size;
+ if (alignment > min_alignment)
+ needed_size += alignment;
+ bool using_primary_allocator = true;
+ // If we are allocating from the secondary allocator, there will be no
+ // automatic right redzone, so add the right redzone manually.
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ needed_size += rz_size;
+ using_primary_allocator = false;
+ }
+ CHECK(IsAligned(needed_size, min_alignment));
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+ Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
+ (void*)size);
+ return allocator.ReturnNullOrDie();
+ }
+
+ AsanThread *t = GetCurrentThread();
+ void *allocated;
+ bool check_rss_limit = true;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated =
+ allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated =
+ allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ }
+
+ if (!allocated)
+ return allocator.ReturnNullOrDie();
+
+ if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
+ // Heap poisoning is enabled, but the allocator provides an unpoisoned
+ // chunk. This is possible if CanPoisonMemory() was false for some
+ // time, for example, due to flags()->start_disabled.
+ // Anyway, poison the block before using it for anything else.
+ uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+
+ uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ uptr alloc_end = alloc_beg + needed_size;
+ uptr beg_plus_redzone = alloc_beg + rz_size;
+ uptr user_beg = beg_plus_redzone;
+ if (!IsAligned(user_beg, alignment))
+ user_beg = RoundUpTo(user_beg, alignment);
+ uptr user_end = user_beg + size;
+ CHECK_LE(user_end, alloc_end);
+ uptr chunk_beg = user_beg - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ m->alloc_type = alloc_type;
+ m->rz_log = rz_log;
+ u32 alloc_tid = t ? t->tid() : 0;
+ m->alloc_tid = alloc_tid;
+ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
+ m->free_tid = kInvalidTid;
+ m->from_memalign = user_beg != beg_plus_redzone;
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
+ reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
+ reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
+ }
+ if (using_primary_allocator) {
+ CHECK(size);
+ m->user_requested_size = size;
+ CHECK(allocator.FromPrimary(allocated));
+ } else {
+ CHECK(!allocator.FromPrimary(allocated));
+ m->user_requested_size = SizeClassMap::kMaxSize;
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
+ meta[0] = size;
+ meta[1] = chunk_beg;
+ }
+
+ m->alloc_context_id = StackDepotPut(*stack);
+
+ uptr size_rounded_down_to_granularity =
+ RoundDownTo(size, SHADOW_GRANULARITY);
+ // Unpoison the bulk of the memory region.
+ if (size_rounded_down_to_granularity)
+ PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
+ // Deal with the end of the region if size is not aligned to granularity.
+ if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
+ u8 *shadow =
+ (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
+ *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+ }
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+ thread_stats.malloced_redzones += needed_size - size;
+ uptr class_id =
+ Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
+ thread_stats.malloced_by_size[class_id]++;
+ if (needed_size > SizeClassMap::kMaxSize)
+ thread_stats.malloc_large++;
+
+ void *res = reinterpret_cast<void *>(user_beg);
+ if (can_fill && fl.max_malloc_fill_size) {
+ uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
+ REAL(memset)(res, fl.malloc_fill_byte, fill_size);
+ }
+#if CAN_SANITIZE_LEAKS
+ m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked;
+#endif
+ // Must be the last mutation of metadata in this function.
+ atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ ASAN_MALLOC_HOOK(res, size);
+ return res;
+ }
+
+ void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
+ BufferedStackTrace *stack) {
+ u8 old_chunk_state = CHUNK_ALLOCATED;
+ // Flip the chunk_state atomically to avoid race on double-free.
+ if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
+ CHUNK_QUARANTINE, memory_order_acquire))
+ ReportInvalidFree(ptr, old_chunk_state, stack);
+ CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ }
+
+ // Expects the chunk to already be marked as quarantined by using
+ // AtomicallySetQuarantineFlag.
+ void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+
+ if (m->alloc_type != alloc_type) {
+ if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+ ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
+ (AllocType)alloc_type);
+ }
+ }
+
+ CHECK_GE(m->alloc_tid, 0);
+ if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
+ CHECK_EQ(m->free_tid, kInvalidTid);
+ AsanThread *t = GetCurrentThread();
+ m->free_tid = t ? t->tid() : 0;
+ m->free_context_id = StackDepotPut(*stack);
+ // Poison the region.
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
+ // Push into quarantine.
+ if (t) {
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
+ m->UsedSize());
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *ac = &fallback_allocator_cache;
+ quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
+ m->UsedSize());
+ }
+ }
+
+ void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0) return;
+
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ if (delete_size && flags()->new_delete_type_mismatch &&
+ delete_size != m->UsedSize()) {
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
+ }
+ ASAN_FREE_HOOK(ptr);
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ AtomicallySetQuarantineFlag(m, ptr, stack);
+ QuarantineChunk(m, ptr, stack, alloc_type);
+ }
+
+ void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
+ CHECK(old_ptr && new_size);
+ uptr p = reinterpret_cast<uptr>(old_ptr);
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.reallocs++;
+ thread_stats.realloced += new_size;
+
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
+ if (new_ptr) {
+ u8 chunk_state = m->chunk_state;
+ if (chunk_state != CHUNK_ALLOCATED)
+ ReportInvalidFree(old_ptr, chunk_state, stack);
+ CHECK_NE(REAL(memcpy), (void*)0);
+ uptr memcpy_size = Min(new_size, m->UsedSize());
+ // If realloc() races with free(), we may start copying freed memory.
+ // However, we will report racy double-free later anyway.
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+ Deallocate(old_ptr, 0, stack, FROM_MALLOC);
+ }
+ return new_ptr;
+ }
+
+ void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+ return allocator.ReturnNullOrDie();
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
+ // If the memory comes from the secondary allocator no need to clear it
+ // as it comes directly from mmap.
+ if (ptr && allocator.FromPrimary(ptr))
+ REAL(memset)(ptr, 0, nmemb * size);
+ return ptr;
+ }
+
+ void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
+ if (chunk_state == CHUNK_QUARANTINE)
+ ReportDoubleFree((uptr)ptr, stack);
+ else
+ ReportFreeNotMalloced((uptr)ptr, stack);
+ }
+
+ void CommitBack(AsanThreadLocalMallocStorage *ms) {
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
+ allocator.SwallowCache(ac);
+ }
+
+ // -------------------------- Chunk lookup ----------------------
+
+ // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ AsanChunk *GetAsanChunk(void *alloc_beg) {
+ if (!alloc_beg) return 0;
+ if (!allocator.FromPrimary(alloc_beg)) {
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
+ return m;
+ }
+ uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
+ if (alloc_magic[0] == kAllocBegMagic)
+ return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
+ return reinterpret_cast<AsanChunk *>(alloc_beg);
+ }
+
+ AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ // Allocator must be locked when this function is called.
+ AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+ void *alloc_beg =
+ allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ uptr AllocationSize(uptr p) {
+ AsanChunk *m = GetAsanChunkByAddr(p);
+ if (!m) return 0;
+ if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (m->Beg() != p) return 0;
+ return m->UsedSize();
+ }
+
+ AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ AsanChunk *m1 = GetAsanChunkByAddr(addr);
+ if (!m1) return AsanChunkView(m1);
+ sptr offset = 0;
+ if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ // The address is in the chunk's left redzone, so maybe it is actually
+ // a right buffer overflow from the other chunk to the left.
+ // Search a bit to the left to see if there is another chunk.
+ AsanChunk *m2 = 0;
+ for (uptr l = 1; l < GetPageSizeCached(); l++) {
+ m2 = GetAsanChunkByAddr(addr - l);
+ if (m2 == m1) continue; // Still the same chunk.
+ break;
+ }
+ if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
+ m1 = ChooseChunk(addr, m2, m1);
+ }
+ return AsanChunkView(m1);
+ }
+
+ void PrintStats() {
+ allocator.PrintStats();
+ }
+
+ void ForceLock() {
+ allocator.ForceLock();
+ fallback_mutex.Lock();
+ }
+
+ void ForceUnlock() {
+ fallback_mutex.Unlock();
+ allocator.ForceUnlock();
+ }
+};
+
+static Allocator instance(LINKER_INITIALIZED);
+
+static AsanAllocator &get_allocator() {
+ return instance.allocator;
+}
+
+bool AsanChunkView::IsValid() {
+ return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
+}
+uptr AsanChunkView::Beg() { return chunk_->Beg(); }
+uptr AsanChunkView::End() { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
+uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+StackTrace AsanChunkView::GetAllocStack() {
+ return GetStackTraceFromId(chunk_->alloc_context_id);
+}
+
+StackTrace AsanChunkView::GetFreeStack() {
+ return GetStackTraceFromId(chunk_->free_context_id);
+}
+
+void InitializeAllocator(const AllocatorOptions &options) {
+ instance.Initialize(options);
+}
+
+void ReInitializeAllocator(const AllocatorOptions &options) {
+ instance.ReInitialize(options);
+}
+
+void GetAllocatorOptions(AllocatorOptions *options) {
+ instance.GetOptions(options);
+}
+
+AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ return instance.FindHeapChunkByAddress(addr);
+}
+
+void AsanThreadLocalMallocStorage::CommitBack() {
+ instance.CommitBack(this);
+}
+
+void PrintInternalAllocatorStats() {
+ instance.PrintStats();
+}
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ return instance.Allocate(size, alignment, stack, alloc_type, true);
+}
+
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, 0, stack, alloc_type);
+}
+
+void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ instance.Deallocate(ptr, size, stack, alloc_type);
+}
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack) {
+ return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+}
+
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ return instance.Calloc(nmemb, size, stack);
+}
+
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
+ if (p == 0)
+ return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+ if (size == 0) {
+ instance.Deallocate(p, 0, stack, FROM_MALLOC);
+ return 0;
+ }
+ return instance.Reallocate(p, size, stack);
+}
+
+void *asan_valloc(uptr size, BufferedStackTrace *stack) {
+ return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
+}
+
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ size = RoundUpTo(size, PageSize);
+ if (size == 0) {
+ // pvalloc(0) should allocate one page.
+ size = PageSize;
+ }
+ return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
+}
+
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack) {
+ void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
+ if (ptr == 0) return 0;
+ uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+ if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+ GET_STACK_TRACE_FATAL(pc, bp);
+ ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
+ }
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+}
+
+void asan_mz_force_lock() {
+ instance.ForceLock();
+}
+
+void asan_mz_force_unlock() {
+ instance.ForceUnlock();
+}
+
+void AsanSoftRssLimitExceededCallback(bool exceeded) {
+ instance.allocator.SetRssLimitIsExceeded(exceeded);
+}
+
+} // namespace __asan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+void LockAllocator() {
+ __asan::get_allocator().ForceLock();
+}
+
+void UnlockAllocator() {
+ __asan::get_allocator().ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__asan::get_allocator();
+ *end = *begin + sizeof(__asan::get_allocator());
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
+ if (!m) return 0;
+ uptr chunk = m->Beg();
+ if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ return 0;
+ if (m->AddrIsInside(addr, /*locked_version=*/true))
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+ addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
+ CHECK(m);
+ return m->Beg();
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+}
+
+bool LsanMetadata::allocated() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->chunk_state == __asan::CHUNK_ALLOCATED;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return static_cast<ChunkTag>(m->lsan_tag);
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ m->lsan_tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->UsedSize(/*locked_version=*/true);
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->alloc_context_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __asan::get_allocator().ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
+ if (!m) return kIgnoreObjectInvalid;
+ if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// ASan allocator doesn't reserve extra bytes, so normally we would
+// just return "size". We don't want to expose our redzone sizes, etc here.
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return instance.AllocationSize(ptr) > 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ if (p == 0) return 0;
+ uptr ptr = reinterpret_cast<uptr>(p);
+ uptr allocated_size = instance.AllocationSize(ptr);
+ // Die if p is not malloced or if it is already freed.
+ if (allocated_size == 0) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
+ }
+ return allocated_size;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+} // extern "C"
+#endif
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 6d3a992..3208d1f 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -9,12 +9,13 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// ASan-private header for asan_allocator2.cc.
+// ASan-private header for asan_allocator.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
#define ASAN_ALLOCATOR_H
+#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h"
@@ -31,8 +32,20 @@
static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
-void InitializeAllocator();
-void ReInitializeAllocator();
+struct AllocatorOptions {
+ u32 quarantine_size_mb;
+ u16 min_redzone;
+ u16 max_redzone;
+ u8 may_return_null;
+ u8 alloc_dealloc_mismatch;
+
+ void SetFrom(const Flags *f, const CommonFlags *cf);
+ void CopyTo(Flags *f, CommonFlags *cf);
+};
+
+void InitializeAllocator(const AllocatorOptions &options);
+void ReInitializeAllocator(const AllocatorOptions &options);
+void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
@@ -127,12 +140,12 @@
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
+ SecondaryAllocator> AsanAllocator;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
- AllocatorCache allocator2_cache;
+ AllocatorCache allocator_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
@@ -160,6 +173,7 @@
void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
+void AsanSoftRssLimitExceededCallback(bool exceeded);
} // namespace __asan
#endif // ASAN_ALLOCATOR_H
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
deleted file mode 100644
index 52bdcf6..0000000
--- a/lib/asan/asan_allocator2.cc
+++ /dev/null
@@ -1,792 +0,0 @@
-//===-- asan_allocator2.cc ------------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Implementation of ASan's memory allocator, 2-nd version.
-// This variant uses the allocator from sanitizer_common, i.e. the one shared
-// with ThreadSanitizer and MemorySanitizer.
-//
-//===----------------------------------------------------------------------===//
-#include "asan_allocator.h"
-
-#include "asan_mapping.h"
-#include "asan_poisoning.h"
-#include "asan_report.h"
-#include "asan_stack.h"
-#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
-
-namespace __asan {
-
-void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
- PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mmaps++;
- thread_stats.mmaped += size;
-}
-void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
- PoisonShadow(p, size, 0);
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- FlushUnneededASanShadowMemory(p, size);
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.munmaps++;
- thread_stats.munmaped += size;
-}
-
-// We can not use THREADLOCAL because it is not supported on some of the
-// platforms we care about (OSX 10.6, Android).
-// static THREADLOCAL AllocatorCache cache;
-AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- return &ms->allocator2_cache;
-}
-
-static Allocator allocator;
-
-static const uptr kMaxAllowedMallocSize =
- FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
-
-static const uptr kMaxThreadLocalQuarantine =
- FIRST_32_SECOND_64(1 << 18, 1 << 20);
-
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
-enum {
- CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
- CHUNK_ALLOCATED = 2,
- CHUNK_QUARANTINE = 3
-};
-
-// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
-// We use adaptive redzones: for larger allocation larger redzones are used.
-static u32 RZLog2Size(u32 rz_log) {
- CHECK_LT(rz_log, 8);
- return 16 << rz_log;
-}
-
-static u32 RZSize2Log(u32 rz_size) {
- CHECK_GE(rz_size, 16);
- CHECK_LE(rz_size, 2048);
- CHECK(IsPowerOfTwo(rz_size));
- u32 res = Log2(rz_size) - 4;
- CHECK_EQ(rz_size, RZLog2Size(res));
- return res;
-}
-
-static uptr ComputeRZLog(uptr user_requested_size) {
- u32 rz_log =
- user_requested_size <= 64 - 16 ? 0 :
- user_requested_size <= 128 - 32 ? 1 :
- user_requested_size <= 512 - 64 ? 2 :
- user_requested_size <= 4096 - 128 ? 3 :
- user_requested_size <= (1 << 14) - 256 ? 4 :
- user_requested_size <= (1 << 15) - 512 ? 5 :
- user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
- RZSize2Log(flags()->max_redzone));
-}
-
-// The memory chunk allocated from the underlying allocator looks like this:
-// L L L L L L H H U U U U U U R R
-// L -- left redzone words (0 or more bytes)
-// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
-// U -- user memory.
-// R -- right redzone (0 or more bytes)
-// ChunkBase consists of ChunkHeader and other bytes that overlap with user
-// memory.
-
-// If the left redzone is greater than the ChunkHeader size we store a magic
-// value in the first uptr word of the memory block and store the address of
-// ChunkBase in the next uptr.
-// M B L L L L L L L L L H H U U U U U U
-// | ^
-// ---------------------|
-// M -- magic value kAllocBegMagic
-// B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
- // 1-st 8 bytes.
- u32 chunk_state : 8; // Must be first.
- u32 alloc_tid : 24;
-
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
- // 2-nd 8 bytes
- // This field is used for small sizes. For large sizes it is equal to
- // SizeClassMap::kMaxSize and the actual size is stored in the
- // SecondaryAllocator's metadata.
- u32 user_requested_size;
- u32 alloc_context_id;
-};
-
-struct ChunkBase : ChunkHeader {
- // Header2, intersects with user memory.
- u32 free_context_id;
-};
-
-static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
-static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
-COMPILER_CHECK(kChunkHeaderSize == 16);
-COMPILER_CHECK(kChunkHeader2Size <= 16);
-
-struct AsanChunk: ChunkBase {
- uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize(bool locked_version = false) {
- if (user_requested_size != SizeClassMap::kMaxSize)
- return user_requested_size;
- return *reinterpret_cast<uptr *>(
- allocator.GetMetaData(AllocBeg(locked_version)));
- }
- void *AllocBeg(bool locked_version = false) {
- if (from_memalign) {
- if (locked_version)
- return allocator.GetBlockBeginFastLocked(
- reinterpret_cast<void *>(this));
- return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
- }
- return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
- }
- bool AddrIsInside(uptr addr, bool locked_version = false) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
- }
-};
-
-bool AsanChunkView::IsValid() {
- return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
-}
-uptr AsanChunkView::Beg() { return chunk_->Beg(); }
-uptr AsanChunkView::End() { return Beg() + UsedSize(); }
-uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
-uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
-
-static StackTrace GetStackTraceFromId(u32 id) {
- CHECK(id);
- StackTrace res = StackDepotGet(id);
- CHECK(res.trace);
- return res;
-}
-
-StackTrace AsanChunkView::GetAllocStack() {
- return GetStackTraceFromId(chunk_->alloc_context_id);
-}
-
-StackTrace AsanChunkView::GetFreeStack() {
- return GetStackTraceFromId(chunk_->free_context_id);
-}
-
-struct QuarantineCallback;
-typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
-typedef AsanQuarantine::Cache QuarantineCache;
-static AsanQuarantine quarantine(LINKER_INITIALIZED);
-static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
-static AllocatorCache fallback_allocator_cache;
-static SpinMutex fallback_mutex;
-
-QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
- CHECK(ms);
- CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
- return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
-}
-
-struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCache *cache)
- : cache_(cache) {
- }
-
- void Recycle(AsanChunk *m) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
- CHECK_NE(m->alloc_tid, kInvalidTid);
- CHECK_NE(m->free_tid, kInvalidTid);
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
- void *p = reinterpret_cast<void *>(m->AllocBeg());
- if (p != m) {
- uptr *alloc_magic = reinterpret_cast<uptr *>(p);
- CHECK_EQ(alloc_magic[0], kAllocBegMagic);
- // Clear the magic value, as allocator internals may overwrite the
- // contents of deallocated chunk, confusing GetAsanChunk lookup.
- alloc_magic[0] = 0;
- CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
- }
-
- // Statistics.
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.real_frees++;
- thread_stats.really_freed += m->UsedSize();
-
- allocator.Deallocate(cache_, p);
- }
-
- void *Allocate(uptr size) {
- return allocator.Allocate(cache_, size, 1, false);
- }
-
- void Deallocate(void *p) {
- allocator.Deallocate(cache_, p);
- }
-
- AllocatorCache *cache_;
-};
-
-void InitializeAllocator() {
- allocator.Init();
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-void ReInitializeAllocator() {
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
-}
-
-static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
- AllocType alloc_type, bool can_fill) {
- if (UNLIKELY(!asan_inited))
- AsanInitFromRtl();
- Flags &fl = *flags();
- CHECK(stack);
- const uptr min_alignment = SHADOW_GRANULARITY;
- if (alignment < min_alignment)
- alignment = min_alignment;
- if (size == 0) {
- // We'd be happy to avoid allocating memory for zero-size requests, but
- // some programs/tests depend on this behavior and assume that malloc would
- // not return NULL even for zero-size allocations. Moreover, it looks like
- // operator new should never return NULL, and results of consecutive "new"
- // calls must be different even if the allocated size is zero.
- size = 1;
- }
- CHECK(IsPowerOfTwo(alignment));
- uptr rz_log = ComputeRZLog(size);
- uptr rz_size = RZLog2Size(rz_log);
- uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
- uptr needed_size = rounded_size + rz_size;
- if (alignment > min_alignment)
- needed_size += alignment;
- bool using_primary_allocator = true;
- // If we are allocating from the secondary allocator, there will be no
- // automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
- needed_size += rz_size;
- using_primary_allocator = false;
- }
- CHECK(IsAligned(needed_size, min_alignment));
- if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
- Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
- (void*)size);
- return AllocatorReturnNull();
- }
-
- AsanThread *t = GetCurrentThread();
- void *allocated;
- if (t) {
- AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
- allocated = allocator.Allocate(cache, needed_size, 8, false);
- } else {
- SpinMutexLock l(&fallback_mutex);
- AllocatorCache *cache = &fallback_allocator_cache;
- allocated = allocator.Allocate(cache, needed_size, 8, false);
- }
-
- if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
- // Heap poisoning is enabled, but the allocator provides an unpoisoned
- // chunk. This is possible if flags()->poison_heap was disabled for some
- // time, for example, due to flags()->start_disabled.
- // Anyway, poison the block before using it for anything else.
- uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
- PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
- }
-
- uptr alloc_beg = reinterpret_cast<uptr>(allocated);
- uptr alloc_end = alloc_beg + needed_size;
- uptr beg_plus_redzone = alloc_beg + rz_size;
- uptr user_beg = beg_plus_redzone;
- if (!IsAligned(user_beg, alignment))
- user_beg = RoundUpTo(user_beg, alignment);
- uptr user_end = user_beg + size;
- CHECK_LE(user_end, alloc_end);
- uptr chunk_beg = user_beg - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- m->alloc_type = alloc_type;
- m->rz_log = rz_log;
- u32 alloc_tid = t ? t->tid() : 0;
- m->alloc_tid = alloc_tid;
- CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
- m->free_tid = kInvalidTid;
- m->from_memalign = user_beg != beg_plus_redzone;
- if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
- reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
- reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
- }
- if (using_primary_allocator) {
- CHECK(size);
- m->user_requested_size = size;
- CHECK(allocator.FromPrimary(allocated));
- } else {
- CHECK(!allocator.FromPrimary(allocated));
- m->user_requested_size = SizeClassMap::kMaxSize;
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
- meta[0] = size;
- meta[1] = chunk_beg;
- }
-
- m->alloc_context_id = StackDepotPut(*stack);
-
- uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
- // Unpoison the bulk of the memory region.
- if (size_rounded_down_to_granularity)
- PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
- // Deal with the end of the region if size is not aligned to granularity.
- if (size != size_rounded_down_to_granularity && fl.poison_heap) {
- u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
- *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
- }
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.mallocs++;
- thread_stats.malloced += size;
- thread_stats.malloced_redzones += needed_size - size;
- uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
- thread_stats.malloced_by_size[class_id]++;
- if (needed_size > SizeClassMap::kMaxSize)
- thread_stats.malloc_large++;
-
- void *res = reinterpret_cast<void *>(user_beg);
- if (can_fill && fl.max_malloc_fill_size) {
- uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
- REAL(memset)(res, fl.malloc_fill_byte, fill_size);
- }
-#if CAN_SANITIZE_LEAKS
- m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
- : __lsan::kDirectlyLeaked;
-#endif
- // Must be the last mutation of metadata in this function.
- atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
- ASAN_MALLOC_HOOK(res, size);
- return res;
-}
-
-static void ReportInvalidFree(void *ptr, u8 chunk_state,
- BufferedStackTrace *stack) {
- if (chunk_state == CHUNK_QUARANTINE)
- ReportDoubleFree((uptr)ptr, stack);
- else
- ReportFreeNotMalloced((uptr)ptr, stack);
-}
-
-static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
- BufferedStackTrace *stack) {
- u8 old_chunk_state = CHUNK_ALLOCATED;
- // Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
- CHUNK_QUARANTINE, memory_order_acquire))
- ReportInvalidFree(ptr, old_chunk_state, stack);
- CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
-}
-
-// Expects the chunk to already be marked as quarantined by using
-// AtomicallySetQuarantineFlag.
-static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
- AllocType alloc_type) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
-
- if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
- ReportAllocTypeMismatch((uptr)ptr, stack,
- (AllocType)m->alloc_type, (AllocType)alloc_type);
-
- CHECK_GE(m->alloc_tid, 0);
- if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
- CHECK_EQ(m->free_tid, kInvalidTid);
- AsanThread *t = GetCurrentThread();
- m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(*stack);
- // Poison the region.
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapFreeMagic);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.frees++;
- thread_stats.freed += m->UsedSize();
-
- // Push into quarantine.
- if (t) {
- AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
- AllocatorCache *ac = GetAllocatorCache(ms);
- quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
- m, m->UsedSize());
- } else {
- SpinMutexLock l(&fallback_mutex);
- AllocatorCache *ac = &fallback_allocator_cache;
- quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
- m, m->UsedSize());
- }
-}
-
-static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- uptr p = reinterpret_cast<uptr>(ptr);
- if (p == 0) return;
-
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
- if (delete_size && flags()->new_delete_type_mismatch &&
- delete_size != m->UsedSize()) {
- ReportNewDeleteSizeMismatch(p, delete_size, stack);
- }
- ASAN_FREE_HOOK(ptr);
- // Must mark the chunk as quarantined before any changes to its metadata.
- AtomicallySetQuarantineFlag(m, ptr, stack);
- QuarantineChunk(m, ptr, stack, alloc_type);
-}
-
-static void *Reallocate(void *old_ptr, uptr new_size,
- BufferedStackTrace *stack) {
- CHECK(old_ptr && new_size);
- uptr p = reinterpret_cast<uptr>(old_ptr);
- uptr chunk_beg = p - kChunkHeaderSize;
- AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.reallocs++;
- thread_stats.realloced += new_size;
-
- void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
- if (new_ptr) {
- u8 chunk_state = m->chunk_state;
- if (chunk_state != CHUNK_ALLOCATED)
- ReportInvalidFree(old_ptr, chunk_state, stack);
- CHECK_NE(REAL(memcpy), (void*)0);
- uptr memcpy_size = Min(new_size, m->UsedSize());
- // If realloc() races with free(), we may start copying freed memory.
- // However, we will report racy double-free later anyway.
- REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, 0, stack, FROM_MALLOC);
- }
- return new_ptr;
-}
-
-// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
-static AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return 0;
- if (!allocator.FromPrimary(alloc_beg)) {
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
- AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- return m;
- }
- uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
- if (alloc_magic[0] == kAllocBegMagic)
- return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
- return reinterpret_cast<AsanChunk *>(alloc_beg);
-}
-
-static AsanChunk *GetAsanChunkByAddr(uptr p) {
- void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
- return GetAsanChunk(alloc_beg);
-}
-
-// Allocator must be locked when this function is called.
-static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
- void *alloc_beg =
- allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
- return GetAsanChunk(alloc_beg);
-}
-
-static uptr AllocationSize(uptr p) {
- AsanChunk *m = GetAsanChunkByAddr(p);
- if (!m) return 0;
- if (m->chunk_state != CHUNK_ALLOCATED) return 0;
- if (m->Beg() != p) return 0;
- return m->UsedSize();
-}
-
-// We have an address between two chunks, and we want to report just one.
-AsanChunk *ChooseChunk(uptr addr,
- AsanChunk *left_chunk, AsanChunk *right_chunk) {
- // Prefer an allocated chunk over freed chunk and freed chunk
- // over available chunk.
- if (left_chunk->chunk_state != right_chunk->chunk_state) {
- if (left_chunk->chunk_state == CHUNK_ALLOCATED)
- return left_chunk;
- if (right_chunk->chunk_state == CHUNK_ALLOCATED)
- return right_chunk;
- if (left_chunk->chunk_state == CHUNK_QUARANTINE)
- return left_chunk;
- if (right_chunk->chunk_state == CHUNK_QUARANTINE)
- return right_chunk;
- }
- // Same chunk_state: choose based on offset.
- sptr l_offset = 0, r_offset = 0;
- CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
- CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
- if (l_offset < r_offset)
- return left_chunk;
- return right_chunk;
-}
-
-AsanChunkView FindHeapChunkByAddress(uptr addr) {
- AsanChunk *m1 = GetAsanChunkByAddr(addr);
- if (!m1) return AsanChunkView(m1);
- sptr offset = 0;
- if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
- // The address is in the chunk's left redzone, so maybe it is actually
- // a right buffer overflow from the other chunk to the left.
- // Search a bit to the left to see if there is another chunk.
- AsanChunk *m2 = 0;
- for (uptr l = 1; l < GetPageSizeCached(); l++) {
- m2 = GetAsanChunkByAddr(addr - l);
- if (m2 == m1) continue; // Still the same chunk.
- break;
- }
- if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
- m1 = ChooseChunk(addr, m2, m1);
- }
- return AsanChunkView(m1);
-}
-
-void AsanThreadLocalMallocStorage::CommitBack() {
- AllocatorCache *ac = GetAllocatorCache(this);
- quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
- allocator.SwallowCache(GetAllocatorCache(this));
-}
-
-void PrintInternalAllocatorStats() {
- allocator.PrintStats();
-}
-
-void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- return Allocate(size, alignment, stack, alloc_type, true);
-}
-
-void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
- Deallocate(ptr, 0, stack, alloc_type);
-}
-
-void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- Deallocate(ptr, size, stack, alloc_type);
-}
-
-void *asan_malloc(uptr size, BufferedStackTrace *stack) {
- return Allocate(size, 8, stack, FROM_MALLOC, true);
-}
-
-void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
- if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return AllocatorReturnNull();
- void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
- // If the memory comes from the secondary allocator no need to clear it
- // as it comes directly from mmap.
- if (ptr && allocator.FromPrimary(ptr))
- REAL(memset)(ptr, 0, nmemb * size);
- return ptr;
-}
-
-void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
- if (p == 0)
- return Allocate(size, 8, stack, FROM_MALLOC, true);
- if (size == 0) {
- Deallocate(p, 0, stack, FROM_MALLOC);
- return 0;
- }
- return Reallocate(p, size, stack);
-}
-
-void *asan_valloc(uptr size, BufferedStackTrace *stack) {
- return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
-}
-
-void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
- uptr PageSize = GetPageSizeCached();
- size = RoundUpTo(size, PageSize);
- if (size == 0) {
- // pvalloc(0) should allocate one page.
- size = PageSize;
- }
- return Allocate(size, PageSize, stack, FROM_MALLOC, true);
-}
-
-int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
- BufferedStackTrace *stack) {
- void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
- CHECK(IsAligned((uptr)ptr, alignment));
- *memptr = ptr;
- return 0;
-}
-
-uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
- if (ptr == 0) return 0;
- uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
- if (flags()->check_malloc_usable_size && (usable_size == 0)) {
- GET_STACK_TRACE_FATAL(pc, bp);
- ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
- }
- return usable_size;
-}
-
-uptr asan_mz_size(const void *ptr) {
- return AllocationSize(reinterpret_cast<uptr>(ptr));
-}
-
-void asan_mz_force_lock() {
- allocator.ForceLock();
- fallback_mutex.Lock();
-}
-
-void asan_mz_force_unlock() {
- fallback_mutex.Unlock();
- allocator.ForceUnlock();
-}
-
-} // namespace __asan
-
-// --- Implementation of LSan-specific functions --- {{{1
-namespace __lsan {
-void LockAllocator() {
- __asan::allocator.ForceLock();
-}
-
-void UnlockAllocator() {
- __asan::allocator.ForceUnlock();
-}
-
-void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
- *begin = (uptr)&__asan::allocator;
- *end = *begin + sizeof(__asan::allocator);
-}
-
-uptr PointsIntoChunk(void* p) {
- uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
- if (!m) return 0;
- uptr chunk = m->Beg();
- if (m->chunk_state != __asan::CHUNK_ALLOCATED)
- return 0;
- if (m->AddrIsInside(addr, /*locked_version=*/true))
- return chunk;
- if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
- addr))
- return chunk;
- return 0;
-}
-
-uptr GetUserBegin(uptr chunk) {
- __asan::AsanChunk *m =
- __asan::GetAsanChunkByAddrFastLocked(chunk);
- CHECK(m);
- return m->Beg();
-}
-
-LsanMetadata::LsanMetadata(uptr chunk) {
- metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
-}
-
-bool LsanMetadata::allocated() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->chunk_state == __asan::CHUNK_ALLOCATED;
-}
-
-ChunkTag LsanMetadata::tag() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return static_cast<ChunkTag>(m->lsan_tag);
-}
-
-void LsanMetadata::set_tag(ChunkTag value) {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- m->lsan_tag = value;
-}
-
-uptr LsanMetadata::requested_size() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize(/*locked_version=*/true);
-}
-
-u32 LsanMetadata::stack_trace_id() const {
- __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->alloc_context_id;
-}
-
-void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- __asan::allocator.ForEachChunk(callback, arg);
-}
-
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
- uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
- if (!m) return kIgnoreObjectInvalid;
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
- if (m->lsan_tag == kIgnored)
- return kIgnoreObjectAlreadyIgnored;
- m->lsan_tag = __lsan::kIgnored;
- return kIgnoreObjectSuccess;
- } else {
- return kIgnoreObjectInvalid;
- }
-}
-} // namespace __lsan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan; // NOLINT
-
-// ASan allocator doesn't reserve extra bytes, so normally we would
-// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __sanitizer_get_estimated_allocated_size(uptr size) {
- return size;
-}
-
-int __sanitizer_get_ownership(const void *p) {
- uptr ptr = reinterpret_cast<uptr>(p);
- return (AllocationSize(ptr) > 0);
-}
-
-uptr __sanitizer_get_allocated_size(const void *p) {
- if (p == 0) return 0;
- uptr ptr = reinterpret_cast<uptr>(p);
- uptr allocated_size = AllocationSize(ptr);
- // Die if p is not malloced or if it is already freed.
- if (allocated_size == 0) {
- GET_STACK_TRACE_FATAL_HERE;
- ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
- }
- return allocated_size;
-}
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_free_hook(void *ptr) {
- (void)ptr;
-}
-} // extern "C"
-#endif
diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc
index 2b66dd5..6fc5b69 100644
--- a/lib/asan/asan_debugging.cc
+++ b/lib/asan/asan_debugging.cc
@@ -81,8 +81,8 @@
GetInfoForHeapAddress(addr, descr);
}
-uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
- bool alloc_stack) {
+static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+ bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc
index c7f13c7..bf4f1eb 100644
--- a/lib/asan/asan_fake_stack.cc
+++ b/lib/asan/asan_fake_stack.cc
@@ -27,8 +27,10 @@
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) {
- for (uptr i = 0; i < (1U << class_id); i++)
+ for (uptr i = 0; i < (1U << class_id); i++) {
shadow[i] = magic;
+ SanitizerBreakOptimization(0); // Make sure this does not become memset.
+ }
} else {
// The size class is too big, it's cheaper to poison only size bytes.
PoisonShadow(ptr, size, static_cast<u8>(magic));
@@ -58,7 +60,7 @@
void FakeStack::Destroy(int tid) {
PoisonAll(0);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
@@ -190,20 +192,19 @@
return GetFakeStack();
}
-ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
+ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
- if (!fs) return real_stack;
+ if (!fs) return 0;
+ uptr local_stack;
+ uptr real_stack = reinterpret_cast<uptr>(&local_stack);
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
- if (!ff)
- return real_stack; // Out of fake stack, return the real one.
+ if (!ff) return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
}
-ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
- if (ptr == real_stack)
- return;
+ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
@@ -214,12 +215,12 @@
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
- return OnMalloc(class_id, size, real_stack); \
+ __asan_stack_malloc_##class_id(uptr size) { \
+ return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
- uptr ptr, uptr size, uptr real_stack) { \
- OnFree(ptr, class_id, size, real_stack); \
+ uptr ptr, uptr size) { \
+ OnFree(ptr, class_id, size); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
new file mode 100644
index 0000000..efb7767
--- /dev/null
+++ b/lib/asan/asan_flags.cc
@@ -0,0 +1,160 @@
+//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_flags.h"
+#include "asan_interface_internal.h"
+#include "asan_stack.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __asan {
+
+Flags asan_flags_dont_use_directly; // use via flags().
+
+static const char *MaybeCallAsanDefaultOptions() {
+ return (&__asan_default_options) ? __asan_default_options() : "";
+}
+
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
+#ifdef ASAN_DEFAULT_OPTIONS
+// Stringize the macro value.
+# define ASAN_STRINGIZE(x) #x
+# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
+ return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void Flags::SetDefaults() {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+void InitializeFlags() {
+ // Set the default values and prepare for parsing ASan and common flags.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.detect_leaks = CAN_SANITIZE_LEAKS;
+ cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = kDefaultMallocContextSize;
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser asan_parser;
+ RegisterAsanFlags(&asan_parser, f);
+ RegisterCommonFlags(&asan_parser);
+
+ // Set the default values and prepare for parsing LSan flags (which can also
+ // overwrite common flags).
+#if CAN_SANITIZE_LEAKS
+ __lsan::Flags *lf = __lsan::flags();
+ lf->SetDefaults();
+
+ FlagParser lsan_parser;
+ __lsan::RegisterLsanFlags(&lsan_parser, lf);
+ RegisterCommonFlags(&lsan_parser);
+#endif
+
+ // Override from ASan compile definition.
+ const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
+ asan_parser.ParseString(asan_compile_def);
+
+ // Override from user-specified string.
+ const char *asan_default_options = MaybeCallAsanDefaultOptions();
+ asan_parser.ParseString(asan_default_options);
+
+ // Override from command line.
+ asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
+#endif
+
+ // Let activation flags override current settings. On Android they come
+ // from a system property. On other platforms this is no-op.
+ if (!flags()->start_deactivated) {
+ char buf[100];
+ GetExtraActivationFlags(buf, sizeof(buf));
+ asan_parser.ParseString(buf);
+ }
+
+ SetVerbosity(common_flags()->verbosity);
+
+ // TODO(eugenis): dump all flags at verbosity>=2?
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ // TODO(samsonov): print all of the flags (ASan, LSan, common).
+ asan_parser.PrintFlagDescriptions();
+ }
+
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
+ // Make "strict_init_order" imply "check_initialization_order".
+ // TODO(samsonov): Use a single runtime flag for an init-order checker.
+ if (f->strict_init_order) {
+ f->check_initialization_order = true;
+ }
+ CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
+ CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
+ CHECK_GE(f->redzone, 16);
+ CHECK_GE(f->max_redzone, f->redzone);
+ CHECK_LE(f->max_redzone, 2048);
+ CHECK(IsPowerOfTwo(f->redzone));
+ CHECK(IsPowerOfTwo(f->max_redzone));
+
+ // quarantine_size is deprecated but we still honor it.
+ // quarantine_size can not be used together with quarantine_size_mb.
+ if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
+ Report("%s: please use either 'quarantine_size' (deprecated) or "
+ "quarantine_size_mb, but not both\n", SanitizerToolName);
+ Die();
+ }
+ if (f->quarantine_size >= 0)
+ f->quarantine_size_mb = f->quarantine_size >> 20;
+ if (f->quarantine_size_mb < 0) {
+ const int kDefaultQuarantineSizeMb =
+ (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+ f->quarantine_size_mb = kDefaultQuarantineSizeMb;
+ }
+}
+
+} // namespace __asan
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __asan_default_options() { return ""; }
+} // extern "C"
+#endif
diff --git a/lib/asan/asan_flags.h b/lib/asan/asan_flags.h
index 3df4dd3..4935161 100644
--- a/lib/asan/asan_flags.h
+++ b/lib/asan/asan_flags.h
@@ -16,6 +16,7 @@
#define ASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
// ASan flag values can be defined in four ways:
// 1) initialized with default values at startup.
@@ -24,55 +25,24 @@
// 3) overriden from string returned by user-specified function
// __asan_default_options().
// 4) overriden from env variable ASAN_OPTIONS.
+// 5) overriden during ASan activation (for now used on Android only).
namespace __asan {
struct Flags {
- // Flag descriptions are in asan_rtl.cc.
- int quarantine_size;
- int redzone;
- int max_redzone;
- bool debug;
- int report_globals;
- bool check_initialization_order;
- bool replace_str;
- bool replace_intrin;
- bool mac_ignore_invalid_free;
- bool detect_stack_use_after_return;
- int min_uar_stack_size_log;
- int max_uar_stack_size_log;
- bool uar_noreserve;
- int max_malloc_fill_size, malloc_fill_byte;
- int exitcode;
- bool allow_user_poisoning;
- int sleep_before_dying;
- bool check_malloc_usable_size;
- bool unmap_shadow_on_exit;
- bool abort_on_error;
- bool print_stats;
- bool print_legend;
- bool atexit;
- bool allow_reexec;
- bool print_full_thread_history;
- bool poison_heap;
- bool poison_partial;
- bool poison_array_cookie;
- bool alloc_dealloc_mismatch;
- bool new_delete_type_mismatch;
- bool strict_memcmp;
- bool strict_init_order;
- bool start_deactivated;
- int detect_invalid_pointer_pairs;
- bool detect_container_overflow;
- int detect_odr_violation;
- bool dump_instruction_bytes;
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+
+ void SetDefaults();
};
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
-void InitializeFlags(Flags *f, const char *env);
+
+void InitializeFlags();
} // namespace __asan
diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc
new file mode 100644
index 0000000..53a8a40
--- /dev/null
+++ b/lib/asan/asan_flags.inc
@@ -0,0 +1,145 @@
+//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_FLAG
+# error "Define ASAN_FLAG prior to including this file!"
+#endif
+
+// ASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_FLAG(int, quarantine_size, -1,
+ "Deprecated, please use quarantine_size_mb.")
+ASAN_FLAG(int, quarantine_size_mb, -1,
+ "Size (in Mb) of quarantine used to detect use-after-free "
+ "errors. Lower value may reduce memory usage but increase the "
+ "chance of false negatives.")
+ASAN_FLAG(int, redzone, 16,
+ "Minimal size (in bytes) of redzones around heap objects. "
+ "Requirement: redzone >= 16, is a power of two.")
+ASAN_FLAG(int, max_redzone, 2048,
+ "Maximal size (in bytes) of redzones around heap objects.")
+ASAN_FLAG(
+ bool, debug, false,
+ "If set, prints some debugging information and does additional checks.")
+ASAN_FLAG(
+ int, report_globals, 1,
+ "Controls the way to handle globals (0 - don't detect buffer overflow on "
+ "globals, 1 - detect buffer overflow, 2 - print data about registered "
+ "globals).")
+ASAN_FLAG(bool, check_initialization_order, false,
+ "If set, attempts to catch initialization order issues.")
+ASAN_FLAG(
+ bool, replace_str, true,
+ "If set, uses custom wrappers and replacements for libc string functions "
+ "to find more errors.")
+ASAN_FLAG(bool, replace_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
+ASAN_FLAG(bool, mac_ignore_invalid_free, false,
+ "Ignore invalid free() calls to work around some bugs. Used on OS X "
+ "only.")
+ASAN_FLAG(bool, detect_stack_use_after_return, false,
+ "Enables stack-use-after-return checking at run-time.")
+ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
+ "Minimum fake stack size log.")
+ASAN_FLAG(int, max_uar_stack_size_log,
+ 20, // 1Mb per size class, i.e. ~11Mb per thread
+ "Maximum fake stack size log.")
+ASAN_FLAG(bool, uar_noreserve, false,
+ "Use mmap with 'noreserve' flag to allocate fake stack.")
+ASAN_FLAG(
+ int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
+ "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+ASAN_FLAG(int, exitcode, ASAN_DEFAULT_FAILURE_EXITCODE,
+ "Override the program exit status if the tool found an error.")
+ASAN_FLAG(bool, allow_user_poisoning, true,
+ "If set, user may manually mark memory regions as poisoned or "
+ "unpoisoned.")
+ASAN_FLAG(
+ int, sleep_before_dying, 0,
+ "Number of seconds to sleep between printing an error report and "
+ "terminating the program. Useful for debugging purposes (e.g. when one "
+ "needs to attach gdb).")
+ASAN_FLAG(bool, check_malloc_usable_size, true,
+ "Allows the users to work around the bug in Nvidia drivers prior to "
+ "295.*.")
+ASAN_FLAG(bool, unmap_shadow_on_exit, false,
+ "If set, explicitly unmaps the (huge) shadow at exit.")
+ASAN_FLAG(
+ bool, abort_on_error, false,
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.")
+ASAN_FLAG(bool, print_stats, false,
+ "Print various statistics after printing an error message or if "
+ "atexit=1.")
+ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+ASAN_FLAG(bool, atexit, false,
+ "If set, prints ASan exit stats even after program terminates "
+ "successfully.")
+ASAN_FLAG(
+ bool, print_full_thread_history, true,
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.")
+ASAN_FLAG(
+ bool, poison_heap, true,
+ "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+ "for benchmarking the allocator or instrumentator.")
+ASAN_FLAG(bool, poison_partial, true,
+ "If true, poison partially addressable 8-byte aligned words "
+ "(default=true). This flag affects heap and global buffers, but not "
+ "stack buffers.")
+ASAN_FLAG(bool, poison_array_cookie, true,
+ "Poison (or not) the array cookie after operator new[].")
+
+// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+// https://code.google.com/p/address-sanitizer/issues/detail?id=131
+// https://code.google.com/p/address-sanitizer/issues/detail?id=309
+// TODO(glider,timurrrr): Fix known issues and enable this back.
+ASAN_FLAG(bool, alloc_dealloc_mismatch,
+ (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+ "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+ASAN_FLAG(bool, new_delete_type_mismatch, true,
+ "Report errors on mismatch betwen size of new and delete.")
+ASAN_FLAG(bool, strict_memcmp, true,
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.")
+ASAN_FLAG(
+ bool, strict_init_order, false,
+ "If true, assume that dynamic initializers can never access globals from "
+ "other modules, even if the latter are already initialized.")
+ASAN_FLAG(
+ bool, start_deactivated, false,
+ "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+ "poisoning) to reduce memory consumption as much as possible, and "
+ "restores them to original values when the first instrumented module is "
+ "loaded into the process. This is mainly intended to be used on "
+ "Android. ")
+ASAN_FLAG(
+ int, detect_invalid_pointer_pairs, 0,
+ "If non-zero, try to detect operations like <, <=, >, >= and - on "
+ "invalid pointer pairs (e.g. when pointers belong to different objects). "
+ "The bigger the value the harder we try.")
+ASAN_FLAG(
+ bool, detect_container_overflow, true,
+ "If true, honor the container overflow annotations. "
+ "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
+ASAN_FLAG(int, detect_odr_violation, 2,
+ "If >=2, detect violation of One-Definition-Rule (ODR); "
+ "If ==1, detect ODR-violation only if the two variables "
+ "have different sizes")
+ASAN_FLAG(bool, dump_instruction_bytes, false,
+ "If true, dump 16 bytes starting at the instruction that caused SEGV")
+ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc
index be111d4..853a181 100644
--- a/lib/asan/asan_globals.cc
+++ b/lib/asan/asan_globals.cc
@@ -18,6 +18,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "asan_suppressions.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
@@ -158,13 +159,14 @@
// the entire redzone of the second global may be within the first global.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
- (flags()->detect_odr_violation >= 2 || g->size != l->g->size))
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
ReportODRViolation(g, FindRegistrationSite(g),
l->g, FindRegistrationSite(l->g));
}
}
}
- if (flags()->poison_heap)
+ if (CanPoisonMemory())
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
l->g = g;
@@ -182,11 +184,13 @@
static void UnregisterGlobal(const Global *g) {
CHECK(asan_inited);
+ if (flags()->report_globals >= 2)
+ ReportGlobal(*g, "Removed");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
- if (flags()->poison_heap)
+ if (CanPoisonMemory())
PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
@@ -208,6 +212,20 @@
}
}
+#if SANITIZER_WINDOWS // Should only be called on Windows.
+SANITIZER_INTERFACE_ATTRIBUTE
+void UnregisterGlobalsInRange(void *beg, void *end) {
+ if (!flags()->report_globals)
+ return;
+ BlockingMutexLock lock(&mu_for_globals);
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ void *address = (void *)l->g->beg;
+ if (beg <= address && address < end)
+ UnregisterGlobal(l->g);
+ }
+}
+#endif
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@@ -216,7 +234,7 @@
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
- GET_STACK_TRACE_FATAL_HERE;
+ GET_STACK_TRACE_MALLOC;
u32 stack_id = StackDepotPut(stack);
BlockingMutexLock lock(&mu_for_globals);
if (!global_registration_site_vector)
@@ -249,7 +267,7 @@
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
- !flags()->poison_heap)
+ !CanPoisonMemory())
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
@@ -275,7 +293,7 @@
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
- !flags()->poison_heap)
+ !CanPoisonMemory())
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
diff --git a/lib/asan/asan_init_version.h b/lib/asan/asan_init_version.h
index 77aea81..6cf57c4 100644
--- a/lib/asan/asan_init_version.h
+++ b/lib/asan/asan_init_version.h
@@ -25,8 +25,10 @@
// contains the function PC as the 3-rd field (see
// DescribeAddressIfStack).
// v3=>v4: added '__asan_global_source_location' to __asan_global.
- #define __asan_init __asan_init_v4
- #define __asan_init_name "__asan_init_v4"
+ // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
+ // __asan_stack_free_ functions.
+ #define __asan_init __asan_init_v5
+ #define __asan_init_name "__asan_init_v5"
}
#endif // ASAN_INIT_VERSION_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index deac034..df57696 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -20,6 +20,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "asan_suppressions.h"
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
@@ -34,12 +35,16 @@
return false;
}
+struct AsanInterceptorContext {
+ const char *interceptor_name;
+};
+
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains
// relevant information only.
// We check all shadow bytes.
-#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \
+#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
@@ -49,13 +54,26 @@
} \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
- GET_CURRENT_PC_BP_SP; \
- __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \
+ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
+ bool suppressed = false; \
+ if (_ctx) { \
+ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
+ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ suppressed = IsStackTraceSuppressed(&stack); \
+ } \
+ } \
+ if (!suppressed) { \
+ GET_CURRENT_PC_BP_SP; \
+ __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \
+ } \
} \
} while (0)
-#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false)
-#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true)
+#define ASAN_READ_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, false)
+#define ASAN_WRITE_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, true)
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
@@ -113,20 +131,28 @@
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
+#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
+ AsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx; \
+
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- ASAN_WRITE_RANGE(ptr, size)
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size)
+ ASAN_WRITE_RANGE(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ ASAN_READ_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
do { \
if (asan_init_is_running) \
return REAL(func)(__VA_ARGS__); \
- ctx = 0; \
- (void) ctx; \
if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \
} while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ } while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
} while (false)
@@ -145,14 +171,23 @@
do { \
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+// Strict init-order checking is dlopen-hostile:
+// https://code.google.com/p/address-sanitizer/issues/detail?id=178
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ if (flags()->strict_init_order) { \
+ StopInitOrderChecking(); \
+ }
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ CoverageUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
-#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
-#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s)
+// Syscall interceptors don't have contexts, we don't support suppressions
+// for them.
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s)
#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
do { \
(void)(p); \
@@ -165,46 +200,79 @@
} while (false)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
+struct ThreadStartParam {
+ atomic_uintptr_t t;
+ atomic_uintptr_t is_registered;
+};
+
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+#if SANITIZER_WINDOWS
+ // FIXME: this is a bandaid fix for PR22025.
AsanThread *t = (AsanThread*)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid());
+ return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+#else
+ ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
+ AsanThread *t = nullptr;
+ while ((t = reinterpret_cast<AsanThread *>(
+ atomic_load(¶m->t, memory_order_acquire))) == 0)
+ internal_sched_yield();
+ SetCurrentThread(t);
+ return t->ThreadStart(GetTid(), ¶m->is_registered);
+#endif
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect();
- // Strict init-order checking in thread-hostile.
+ // Strict init-order checking is thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
int detached = 0;
if (attr != 0)
REAL(pthread_attr_getdetachstate)(attr, &detached);
-
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t = AsanThread::Create(start_routine, arg);
- CreateThreadContextArgs args = { t, &stack };
- asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
- return REAL(pthread_create)(thread, attr, asan_thread_start, t);
+ ThreadStartParam param;
+ atomic_store(¶m.t, 0, memory_order_relaxed);
+ atomic_store(¶m.is_registered, 0, memory_order_relaxed);
+ int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
+ if (result == 0) {
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release);
+ // Wait until the AsanThread object is initialized and the ThreadRegistry
+ // entry is in "started" state. One reason for this is that after this
+ // interceptor exits, the child thread's stack may be the only thing holding
+ // the |arg| pointer. This may cause LSan to report a leak if leak checking
+ // happens at a point when the interceptor has already exited, but the stack
+ // range for the child thread is not yet known.
+ while (atomic_load(¶m.is_registered, memory_order_acquire) == 0)
+ internal_sched_yield();
+ }
+ return result;
}
+
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return real_pthread_join(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) ||
- common_flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler);
}
return 0;
}
#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!AsanInterceptsSignal(signum) ||
- common_flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
@@ -213,8 +281,7 @@
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
- if (!AsanInterceptsSignal(signum) ||
- common_flags()->allow_user_segv_handler) {
+ if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
@@ -325,14 +392,16 @@
}
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memcmp);
if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size);
ENSURE_ASAN_INITED();
if (flags()->replace_intrin) {
if (flags()->strict_memcmp) {
// Check the entire regions even if the first bytes of the buffers are
// different.
- ASAN_READ_RANGE(a1, size);
- ASAN_READ_RANGE(a2, size);
+ ASAN_READ_RANGE(ctx, a1, size);
+ ASAN_READ_RANGE(ctx, a2, size);
// Fallthrough to REAL(memcmp) below.
} else {
unsigned char c1 = 0, c2 = 0;
@@ -344,65 +413,81 @@
c2 = s2[i];
if (c1 != c2) break;
}
- ASAN_READ_RANGE(s1, Min(i + 1, size));
- ASAN_READ_RANGE(s2, Min(i + 1, size));
+ ASAN_READ_RANGE(ctx, s1, Min(i + 1, size));
+ ASAN_READ_RANGE(ctx, s2, Min(i + 1, size));
return CharCmp(c1, c2);
}
}
return REAL(memcmp(a1, a2, size));
}
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
+ if (asan_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ if (to != from) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+
void *__asan_memcpy(void *to, const void *from, uptr size) {
- if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size);
- // memcpy is called during __asan_init() from the internals
- // of printf(...).
- if (asan_init_is_running) {
- return REAL(memcpy)(to, from, size);
- }
- ENSURE_ASAN_INITED();
- if (flags()->replace_intrin) {
- if (to != from) {
- // We do not treat memcpy with to==from as a bug.
- // See http://llvm.org/bugs/show_bug.cgi?id=11763.
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size);
- }
- ASAN_READ_RANGE(from, size);
- ASAN_WRITE_RANGE(to, size);
- }
- return REAL(memcpy)(to, from, size);
+ ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
+ if (asan_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
void *__asan_memset(void *block, int c, uptr size) {
- if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size);
- // memset is called inside Printf.
- if (asan_init_is_running) {
- return REAL(memset)(block, c, size);
- }
- ENSURE_ASAN_INITED();
- if (flags()->replace_intrin) {
- ASAN_WRITE_RANGE(block, size);
- }
- return REAL(memset)(block, c, size);
+ ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
+ if (UNLIKELY(!asan_inited)) \
+ return internal_memmove(to, from, size); \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
void *__asan_memmove(void *to, const void *from, uptr size) {
- if (UNLIKELY(!asan_inited))
- return internal_memmove(to, from, size);
- ENSURE_ASAN_INITED();
- if (flags()->replace_intrin) {
- ASAN_READ_RANGE(from, size);
- ASAN_WRITE_RANGE(to, size);
- }
- return internal_memmove(to, from, size);
+ ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
- return __asan_memmove(to, from, size);
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove);
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
#if !SANITIZER_MAC
- return __asan_memcpy(to, from, size);
+ ASAN_MEMCPY_IMPL(ctx, to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
@@ -410,15 +495,19 @@
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
- return __asan_memmove(to, from, size);
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size);
#endif // !SANITIZER_MAC
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- return __asan_memset(block, c, size);
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, memset);
+ ASAN_MEMSET_IMPL(ctx, block, c, size);
}
INTERCEPTOR(char*, strchr, const char *str, int c) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strchr);
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
// used.
@@ -429,7 +518,7 @@
char *result = REAL(strchr)(str, c);
if (flags()->replace_str) {
uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1;
- ASAN_READ_RANGE(str, bytes_read);
+ ASAN_READ_RANGE(ctx, str, bytes_read);
}
return result;
}
@@ -451,13 +540,15 @@
// For both strcat() and strncat() we need to check the validity of |to|
// argument irrespective of the |from| length.
INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_length = REAL(strlen)(from);
- ASAN_READ_RANGE(from, from_length + 1);
+ ASAN_READ_RANGE(ctx, from, from_length + 1);
uptr to_length = REAL(strlen)(to);
- ASAN_READ_RANGE(to, to_length);
- ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+ ASAN_READ_RANGE(ctx, to, to_length);
+ ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
// If the copying actually happens, the |from| string should not overlap
// with the resulting string starting at |to|, which has a length of
// to_length + from_length + 1.
@@ -470,14 +561,16 @@
}
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strncat);
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1);
- ASAN_READ_RANGE(from, copy_length);
+ ASAN_READ_RANGE(ctx, from, copy_length);
uptr to_length = REAL(strlen)(to);
- ASAN_READ_RANGE(to, to_length);
- ASAN_WRITE_RANGE(to + to_length, from_length + 1);
+ ASAN_READ_RANGE(ctx, to, to_length);
+ ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
if (from_length > 0) {
CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1,
from, copy_length);
@@ -487,6 +580,8 @@
}
INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT
#if SANITIZER_MAC
if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
#endif
@@ -499,19 +594,21 @@
if (flags()->replace_str) {
uptr from_size = REAL(strlen)(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
- ASAN_READ_RANGE(from, from_size);
- ASAN_WRITE_RANGE(to, from_size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, from_size);
}
return REAL(strcpy)(to, from); // NOLINT
}
#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strdup);
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
- ASAN_READ_RANGE(s, length + 1);
+ ASAN_READ_RANGE(ctx, s, length + 1);
}
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
@@ -521,6 +618,8 @@
#endif
INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strlen);
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
@@ -530,37 +629,43 @@
ENSURE_ASAN_INITED();
SIZE_T length = REAL(strlen)(s);
if (flags()->replace_str) {
- ASAN_READ_RANGE(s, length + 1);
+ ASAN_READ_RANGE(ctx, s, length + 1);
}
return length;
}
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
SIZE_T length = REAL(wcslen)(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
- ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
+ ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
}
return length;
}
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
ENSURE_ASAN_INITED();
if (flags()->replace_str) {
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
- ASAN_READ_RANGE(from, from_size);
- ASAN_WRITE_RANGE(to, size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, size);
}
return REAL(strncpy)(to, from, size);
}
#if ASAN_INTERCEPT_STRNLEN
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
ENSURE_ASAN_INITED();
uptr length = REAL(strnlen)(s, maxlen);
if (flags()->replace_str) {
- ASAN_READ_RANGE(s, Min(length + 1, maxlen));
+ ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
}
return length;
}
@@ -585,6 +690,8 @@
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
char **endptr, int base) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strtol);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(strtol)(nptr, endptr, base);
@@ -596,12 +703,14 @@
}
if (IsValidStrtolBase(base)) {
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
}
return result;
}
INTERCEPTOR(int, atoi, const char *nptr) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atoi);
#if SANITIZER_MAC
if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
#endif
@@ -616,11 +725,13 @@
// different from int). So, we just imitate this behavior.
int result = REAL(strtol)(nptr, &real_endptr, 10);
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atol);
#if SANITIZER_MAC
if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
#endif
@@ -631,13 +742,15 @@
char *real_endptr;
long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT
char **endptr, int base) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(strtoll)(nptr, endptr, base);
@@ -652,12 +765,14 @@
// if base is valid.
if (IsValidStrtolBase(base)) {
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
}
return result;
}
INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atoll);
ENSURE_ASAN_INITED();
if (!flags()->replace_str) {
return REAL(atoll)(nptr);
@@ -665,7 +780,7 @@
char *real_endptr;
long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT
FixRealStrtolEndptr(nptr, &real_endptr);
- ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1);
+ ASAN_READ_RANGE(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
@@ -703,15 +818,16 @@
void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD thr_flags, void* tid) {
- // Strict init-order checking in thread-hostile.
+ // Strict init-order checking is thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t = AsanThread::Create(start_routine, arg);
- CreateThreadContextArgs args = { t, &stack };
+ // FIXME: The CreateThread interceptor is not the same as a pthread_create
+ // one. This is a bandaid fix for PR22025.
bool detached = false; // FIXME: how can we determine it on Windows?
- asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args);
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
return REAL(CreateThread)(security, stack_size,
asan_thread_start, t, thr_flags, tid);
}
@@ -797,6 +913,7 @@
// Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE
ASAN_INTERCEPT_FUNC(pthread_create);
+ ASAN_INTERCEPT_FUNC(pthread_join);
#endif
// Intercept atexit function.
diff --git a/lib/asan/asan_interface_internal.h b/lib/asan/asan_interface_internal.h
index edaf44d..ea7540f 100644
--- a/lib/asan/asan_interface_internal.h
+++ b/lib/asan/asan_interface_internal.h
@@ -9,8 +9,11 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
-// This header can be included by the instrumented program to fetch
-// data (mostly allocator statistics) from ASan runtime library.
+// This header declares the AddressSanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/asan_interface.h
//===----------------------------------------------------------------------===//
#ifndef ASAN_INTERFACE_INTERNAL_H
#define ASAN_INTERFACE_INTERNAL_H
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index f9f9243..ffd3ff8 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -62,6 +62,21 @@
class AsanThread;
using __sanitizer::StackTrace;
+struct SignalContext {
+ void *context;
+ uptr addr;
+ uptr pc;
+ uptr sp;
+ uptr bp;
+
+ SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
+ context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
+ }
+
+ // Creates signal context in a platform-specific manner.
+ static SignalContext Create(void *siginfo, void *context);
+};
+
void AsanInitFromRtl();
// asan_rtl.cc
@@ -78,8 +93,8 @@
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
void AsanOnSIGSEGV(int, void *siginfo, void *context);
+void DisableReexec();
void MaybeReexec();
-bool AsanInterceptsSignal(int signum);
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
@@ -92,10 +107,10 @@
void AppendToErrorMessageBuffer(const char *buffer);
-void ParseExtraActivationFlags();
-
void *AsanDlSymNext(const char *sym);
+void ReserveShadowMemoryRange(uptr beg, uptr end);
+
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
@@ -136,6 +151,8 @@
const int kAsanInternalHeapMagic = 0xfe;
const int kAsanArrayCookieMagic = 0xac;
const int kAsanIntraObjectRedzone = 0xbb;
+const int kAsanAllocaLeftMagic = 0xca;
+const int kAsanAllocaRightMagic = 0xcb;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
diff --git a/lib/asan/asan_linux.cc b/lib/asan/asan_linux.cc
index fdd009c..8e8bafd 100644
--- a/lib/asan/asan_linux.cc
+++ b/lib/asan/asan_linux.cc
@@ -68,6 +68,10 @@
namespace __asan {
+void DisableReexec() {
+ // No need to re-exec on Linux.
+}
+
void MaybeReexec() {
// No need to re-exec on Linux.
}
@@ -220,10 +224,6 @@
#endif
}
-bool AsanInterceptsSignal(int signum) {
- return signum == SIGSEGV && common_flags()->handle_segv;
-}
-
void AsanPlatformThreadInit() {
// Nothing here for now.
}
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index 4014357..b353686 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -101,8 +101,15 @@
}
}
+static bool reexec_disabled = false;
+
+void DisableReexec() {
+ reexec_disabled = true;
+}
+
void MaybeReexec() {
- if (!flags()->allow_reexec) return;
+ if (reexec_disabled) return;
+
// Make sure the dynamic ASan runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves.
@@ -113,8 +120,10 @@
uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname);
+ const char *dylib_name = StripModuleName(info.dli_fname);
+ uptr dylib_name_len = internal_strlen(dylib_name);
if (!dyld_insert_libraries ||
- !REAL(strstr)(dyld_insert_libraries, StripModuleName(info.dli_fname))) {
+ !REAL(strstr)(dyld_insert_libraries, dylib_name)) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
char program_name[1024];
@@ -140,58 +149,74 @@
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n");
- VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
- } else {
- // DYLD_INSERT_LIBRARIES is set and contains the runtime library.
- if (old_env_len == fname_len) {
- // It's just the runtime library name - fine to unset the variable.
- LeakyResetEnv(kDyldInsertLibraries, NULL);
- } else {
- uptr env_name_len = internal_strlen(kDyldInsertLibraries);
- // Allocate memory to hold the previous env var name, its value, the '='
- // sign and the '\0' char.
- char *new_env = (char*)allocator_for_env.Allocate(
- old_env_len + 2 + env_name_len);
- CHECK(new_env);
- internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
- internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
- new_env[env_name_len] = '=';
- char *new_env_pos = new_env + env_name_len + 1;
- // Iterate over colon-separated pieces of |dyld_insert_libraries|.
- char *piece_start = dyld_insert_libraries;
- char *piece_end = NULL;
- char *old_env_end = dyld_insert_libraries + old_env_len;
- do {
- if (piece_start[0] == ':') piece_start++;
- piece_end = REAL(strchr)(piece_start, ':');
- if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
- if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
- uptr piece_len = piece_end - piece_start;
-
- // If the current piece isn't the runtime library name,
- // append it to new_env.
- if ((piece_len != fname_len) ||
- (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
- if (new_env_pos != new_env + env_name_len + 1) {
- new_env_pos[0] = ':';
- new_env_pos++;
- }
- internal_strncpy(new_env_pos, piece_start, piece_len);
- }
- // Move on to the next piece.
- new_env_pos += piece_len;
- piece_start = piece_end;
- } while (piece_start < old_env_end);
-
- // Can't use setenv() here, because it requires the allocator to be
- // initialized.
- // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
- // a separate function called after InitializeAllocator().
- LeakyResetEnv(kDyldInsertLibraries, new_env);
- }
+ // We get here only if execv() failed.
+ Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+ "which is required for ASan to work. ASan tried to set the "
+ "environment variable and re-execute itself, but execv() failed, "
+ "possibly because of sandbox restrictions. Make sure to launch the "
+ "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+ CHECK("execv failed" && 0);
}
+
+ // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+ // the dylib from the environment variable, because interceptors are installed
+ // and we don't want our children to inherit the variable.
+
+ uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+ // Allocate memory to hold the previous env var name, its value, the '='
+ // sign and the '\0' char.
+ char *new_env = (char*)allocator_for_env.Allocate(
+ old_env_len + 2 + env_name_len);
+ CHECK(new_env);
+ internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+ internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+ new_env[env_name_len] = '=';
+ char *new_env_pos = new_env + env_name_len + 1;
+
+ // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+ char *piece_start = dyld_insert_libraries;
+ char *piece_end = NULL;
+ char *old_env_end = dyld_insert_libraries + old_env_len;
+ do {
+ if (piece_start[0] == ':') piece_start++;
+ piece_end = REAL(strchr)(piece_start, ':');
+ if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+ if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+ uptr piece_len = piece_end - piece_start;
+
+ char *filename_start =
+ (char *)internal_memrchr(piece_start, '/', piece_len);
+ uptr filename_len = piece_len;
+ if (filename_start) {
+ filename_start += 1;
+ filename_len = piece_len - (filename_start - piece_start);
+ } else {
+ filename_start = piece_start;
+ }
+
+ // If the current piece isn't the runtime library name,
+ // append it to new_env.
+ if ((dylib_name_len != filename_len) ||
+ (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+ if (new_env_pos != new_env + env_name_len + 1) {
+ new_env_pos[0] = ':';
+ new_env_pos++;
+ }
+ internal_strncpy(new_env_pos, piece_start, piece_len);
+ new_env_pos += piece_len;
+ }
+ // Move on to the next piece.
+ piece_start = piece_end;
+ } while (piece_start < old_env_end);
+
+ // Can't use setenv() here, because it requires the allocator to be
+ // initialized.
+ // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+ // a separate function called after InitializeAllocator().
+ if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+ LeakyResetEnv(kDyldInsertLibraries, new_env);
}
// No-op. Mac does not support static linkage anyway.
@@ -205,11 +230,6 @@
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
-bool AsanInterceptsSignal(int signum) {
- return (signum == SIGSEGV || signum == SIGBUS) &&
- common_flags()->handle_segv;
-}
-
void AsanPlatformThreadInit() {
}
@@ -264,9 +284,8 @@
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = GetCurrentThread();
if (!t) {
- t = AsanThread::Create(0, 0);
- CreateThreadContextArgs args = { t, stack };
- asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args);
+ t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
+ parent_tid, stack, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
@@ -313,7 +332,7 @@
dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
- if (common_flags()->verbosity >= 2) { \
+ if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \
@@ -331,7 +350,7 @@
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
@@ -344,7 +363,7 @@
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
- if (common_flags()->verbosity >= 2) {
+ if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
@@ -374,13 +393,6 @@
work(); \
}
-// Forces the compiler to generate a frame pointer in the function.
-#define ENABLE_FRAME_POINTER \
- do { \
- volatile uptr enable_fp; \
- enable_fp = GET_CURRENT_FRAME(); \
- } while (0)
-
INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) {
ENABLE_FRAME_POINTER;
@@ -404,6 +416,10 @@
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
diff --git a/lib/asan/asan_malloc_mac.cc b/lib/asan/asan_malloc_mac.cc
index 2ef4c77..d7a6307 100644
--- a/lib/asan/asan_malloc_mac.cc
+++ b/lib/asan/asan_malloc_mac.cc
@@ -90,9 +90,9 @@
ENSURE_ASAN_INITED();
// Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes.
size_t buflen = 6 + (name ? internal_strlen(name) : 0);
- InternalScopedBuffer<char> new_name(buflen);
+ InternalScopedString new_name(buflen);
if (name && zone->introspect == asan_zone.introspect) {
- internal_snprintf(new_name.data(), buflen, "asan-%s", name);
+ new_name.append("asan-%s", name);
name = new_name.data();
}
@@ -152,13 +152,17 @@
namespace {
-// TODO(glider): the mz_* functions should be united with the Linux wrappers,
-// as they are basically copied from there.
-size_t mz_size(malloc_zone_t* zone, const void* ptr) {
+// TODO(glider): the __asan_mz_* functions should be united with the Linux
+// wrappers, as they are basically copied from there.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) {
return asan_mz_size(ptr);
}
-void *mz_malloc(malloc_zone_t *zone, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
@@ -167,7 +171,9 @@
return asan_malloc(size, &stack);
}
-void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
@@ -183,7 +189,9 @@
return asan_calloc(nmemb, size, &stack);
}
-void *mz_valloc(malloc_zone_t *zone, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
@@ -210,11 +218,15 @@
}
// TODO(glider): the allocation callbacks need to be refactored.
-void mz_free(malloc_zone_t *zone, void *ptr) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_mz_free(malloc_zone_t *zone, void *ptr) {
free_common(zone, ptr);
}
-void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
@@ -233,15 +245,16 @@
}
}
-void mz_destroy(malloc_zone_t* zone) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed!
- Report("mz_destroy() called -- ignoring\n");
+ Report("__asan_mz_destroy() called -- ignoring\n");
}
- // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
-void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
@@ -252,12 +265,12 @@
// This function is currently unused, and we build with -Werror.
#if 0
-void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
+void __asan_mz_free_definite_size(
+ malloc_zone_t* zone, void *ptr, size_t size) {
// TODO(glider): check that |size| is valid.
UNIMPLEMENTED();
}
#endif
-#endif
kern_return_t mi_enumerator(task_t task, void *,
unsigned type_mask, vm_address_t zone_address,
@@ -299,13 +312,10 @@
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
}
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
boolean_t mi_zone_locked(malloc_zone_t *zone) {
// UNIMPLEMENTED();
return false;
}
-#endif
} // unnamed namespace
@@ -324,32 +334,25 @@
asan_introspection.force_lock = &mi_force_lock;
asan_introspection.force_unlock = &mi_force_unlock;
asan_introspection.statistics = &mi_statistics;
+ asan_introspection.zone_locked = &mi_zone_locked;
internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
- // Start with a version 4 zone which is used for OS X 10.4 and 10.5.
- asan_zone.version = 4;
+ // Use version 6 for OSX >= 10.6.
+ asan_zone.version = 6;
asan_zone.zone_name = "asan";
- asan_zone.size = &mz_size;
- asan_zone.malloc = &mz_malloc;
- asan_zone.calloc = &mz_calloc;
- asan_zone.valloc = &mz_valloc;
- asan_zone.free = &mz_free;
- asan_zone.realloc = &mz_realloc;
- asan_zone.destroy = &mz_destroy;
+ asan_zone.size = &__asan_mz_size;
+ asan_zone.malloc = &__asan_mz_malloc;
+ asan_zone.calloc = &__asan_mz_calloc;
+ asan_zone.valloc = &__asan_mz_valloc;
+ asan_zone.free = &__asan_mz_free;
+ asan_zone.realloc = &__asan_mz_realloc;
+ asan_zone.destroy = &__asan_mz_destroy;
asan_zone.batch_malloc = 0;
asan_zone.batch_free = 0;
- asan_zone.introspect = &asan_introspection;
-
- // from AvailabilityMacros.h
-#if defined(MAC_OS_X_VERSION_10_6) && \
- MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
- // Switch to version 6 on OSX 10.6 to support memalign.
- asan_zone.version = 6;
asan_zone.free_definite_size = 0;
- asan_zone.memalign = &mz_memalign;
- asan_introspection.zone_locked = &mi_zone_locked;
-#endif
+ asan_zone.memalign = &__asan_mz_memalign;
+ asan_zone.introspect = &asan_introspection;
// Register the ASan zone.
malloc_zone_register(&asan_zone);
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index 2746754..5cb011d 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -59,13 +59,20 @@
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
-// Default Linux/MIPS mapping:
+// Default Linux/MIPS32 mapping:
// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
//
+// Default Linux/MIPS64 mapping:
+// || `[0x4000000000, 0xffffffffff]` || HighMem ||
+// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
+// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
+// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
+// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
+//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@@ -79,6 +86,15 @@
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
+//
+// Default Windows/i386 mapping:
+// (the exact location of HighShadow/HighMem may vary depending
+// on WoW64, /LARGEADDRESSAWARE, etc).
+// || `[0x50000000, 0xffffffff]` || HighMem ||
+// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
+// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
+// || `[0x30000000, 0x35ffffff]` || LowShadow ||
+// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
@@ -87,10 +103,11 @@
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
-static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
+static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
+static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_ANDROID
@@ -101,12 +118,12 @@
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# elif SANITIZER_IOS
+# define SHADOW_OFFSET kIosShadowOffset32
+# elif SANITIZER_WINDOWS
+# define SHADOW_OFFSET kWindowsShadowOffset32
# else
-# if SANITIZER_IOS
-# define SHADOW_OFFSET kIosShadowOffset32
-# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
-# endif
+# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# else
# if defined(__aarch64__)
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index 1c6e92f..e2b1f4d 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -15,13 +15,24 @@
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
+static atomic_uint8_t can_poison_memory;
+
+void SetCanPoisonMemory(bool value) {
+ atomic_store(&can_poison_memory, value, memory_order_release);
+}
+
+bool CanPoisonMemory() {
+ return atomic_load(&can_poison_memory, memory_order_acquire);
+}
+
void PoisonShadow(uptr addr, uptr size, u8 value) {
- if (!flags()->poison_heap) return;
+ if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
@@ -34,7 +45,7 @@
uptr size,
uptr redzone_size,
u8 value) {
- if (!flags()->poison_heap) return;
+ if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
@@ -63,10 +74,10 @@
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
- if (common_flags()->verbosity) {
+ if (Verbosity()) {
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
poison ? "" : "un", ptr, end, size);
- if (common_flags()->verbosity >= 2)
+ if (Verbosity() >= 2)
PRINT_CURRENT_STACK();
}
CHECK(size);
diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h
index feda1a9..3fc9464 100644
--- a/lib/asan/asan_poisoning.h
+++ b/lib/asan/asan_poisoning.h
@@ -19,6 +19,10 @@
namespace __asan {
+// Enable/disable memory poisoning.
+void SetCanPoisonMemory(bool value);
+bool CanPoisonMemory();
+
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
@@ -34,7 +38,7 @@
// performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
- DCHECK(flags()->poison_heap);
+ DCHECK(CanPoisonMemory());
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
@@ -60,15 +64,14 @@
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
- void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
- CHECK_EQ(page_beg, res);
+ ReserveShadowMemoryRange(page_beg, page_end - 1);
}
}
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
- DCHECK(flags()->poison_heap);
+ DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc
index c910e23..ad31458 100644
--- a/lib/asan/asan_posix.cc
+++ b/lib/asan/asan_posix.cc
@@ -32,19 +32,24 @@
namespace __asan {
+SignalContext SignalContext::Create(void *siginfo, void *context) {
+ uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
+ uptr pc, sp, bp;
+ GetPcSpBp(context, &pc, &sp, &bp);
+ return SignalContext(context, addr, pc, sp, bp);
+}
+
void AsanOnSIGSEGV(int, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
- uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using the bullet-proof write.
if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die();
- uptr pc, sp, bp;
- GetPcSpBp(context, &pc, &sp, &bp);
+ SignalContext sig = SignalContext::Create(siginfo, context);
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// probably a stack overflow.
- bool IsStackAccess = addr + 512 > sp && addr < sp + 0xFFFF;
+ bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
#if __powerpc__
// Large stack frames can be allocated with e.g.
@@ -53,8 +58,8 @@
// If the store faults then sp will not have been updated, so test above
// will not work, becase the fault address will be more than just "slightly"
// below sp.
- if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {
- u32 inst = *(unsigned *)pc;
+ if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
+ u32 inst = *(unsigned *)sig.pc;
u32 ra = (inst >> 16) & 0x1F;
u32 opcd = inst >> 26;
u32 xo = (inst >> 1) & 0x3FF;
@@ -75,9 +80,9 @@
// then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
- ReportStackOverflow(pc, sp, bp, context, addr);
+ ReportStackOverflow(sig);
else
- ReportSIGSEGV("SEGV", pc, sp, bp, context, addr);
+ ReportSIGSEGV("SEGV", sig);
}
// ---------------------- TSD ---------------- {{{1
diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc
index 2ca11a3..ad75262 100644
--- a/lib/asan/asan_report.cc
+++ b/lib/asan/asan_report.cc
@@ -53,7 +53,7 @@
buffer, remaining);
error_message_buffer[error_message_buffer_size - 1] = '\0';
// FIXME: reallocate the buffer instead of truncating the message.
- error_message_buffer_pos += remaining > length ? length : remaining;
+ error_message_buffer_pos += Min(remaining, length);
}
}
@@ -87,6 +87,8 @@
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
@@ -173,6 +175,8 @@
PrintShadowByte(str, " Intra object redzone: ",
kAsanIntraObjectRedzone);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
+ PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
}
void MaybeDumpInstructionBytes(uptr pc) {
@@ -643,38 +647,37 @@
}
};
-void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
+void ReportStackOverflow(const SignalContext &sig) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: stack-overflow on address %p"
" (pc %p bp %p sp %p T%d)\n",
- (void *)addr, (void *)pc, (void *)bp, (void *)sp,
+ (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
- GET_STACK_TRACE_SIGNAL(pc, bp, context);
+ GET_STACK_TRACE_SIGNAL(sig);
stack.Print();
ReportErrorSummary("stack-overflow", &stack);
}
-void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
- void *context, uptr addr) {
+void ReportSIGSEGV(const char *description, const SignalContext &sig) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s on unknown address %p"
" (pc %p bp %p sp %p T%d)\n",
- description, (void *)addr, (void *)pc, (void *)bp, (void *)sp,
- GetCurrentTidOrInvalid());
- if (pc < GetPageSizeCached()) {
+ description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
+ (void *)sig.sp, GetCurrentTidOrInvalid());
+ if (sig.pc < GetPageSizeCached()) {
Report("Hint: pc points to the zero page.\n");
}
Printf("%s", d.EndWarning());
- GET_STACK_TRACE_SIGNAL(pc, bp, context);
+ GET_STACK_TRACE_SIGNAL(sig);
stack.Print();
- MaybeDumpInstructionBytes(pc);
+ MaybeDumpInstructionBytes(sig.pc);
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary("SEGV", &stack);
}
@@ -831,6 +834,9 @@
" old_mid : %p\n"
" new_mid : %p\n",
beg, end, old_mid, new_mid);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!IsAligned(beg, granularity))
+ Report("ERROR: beg is not aligned by %d\n", granularity);
stack->Print();
ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
}
@@ -934,6 +940,8 @@
void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
uptr access_size) {
+ ENABLE_FRAME_POINTER;
+
// Determine the error type.
const char *bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
@@ -982,6 +990,10 @@
case kAsanIntraObjectRedzone:
bug_descr = "intra-object-overflow";
break;
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ bug_descr = "dynamic-stack-buffer-overflow";
+ break;
}
}
diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h
index fd65bad..029c914 100644
--- a/lib/asan/asan_report.h
+++ b/lib/asan/asan_report.h
@@ -52,10 +52,8 @@
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
-void NORETURN
- ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
-void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
- void *context, uptr addr);
+void NORETURN ReportStackOverflow(const SignalContext &sig);
+void NORETURN ReportSIGSEGV(const char *description, const SignalContext &sig);
void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index f87d84f..9126e71 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -21,6 +21,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_stats.h"
+#include "asan_suppressions.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -55,8 +56,6 @@
}
if (common_flags()->coverage)
__sanitizer_cov_dump();
- if (death_callback)
- death_callback();
if (flags()->abort_on_error)
Abort();
internal__exit(flags()->exitcode);
@@ -71,265 +70,9 @@
Die();
}
-// -------------------------- Flags ------------------------- {{{1
-static const int kDefaultMallocContextSize = 30;
-
-Flags asan_flags_dont_use_directly; // use via flags().
-
-static const char *MaybeCallAsanDefaultOptions() {
- return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
-static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
-#ifdef ASAN_DEFAULT_OPTIONS
-// Stringize the macro value.
-# define ASAN_STRINGIZE(x) #x
-# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
- return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
-#else
- return "";
-#endif
-}
-
-static void ParseFlagsFromString(Flags *f, const char *str) {
- CommonFlags *cf = common_flags();
- ParseCommonFlagsFromString(cf, str);
- CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
- // Please write meaningful flag descriptions when adding new flags.
- ParseFlag(str, &f->quarantine_size, "quarantine_size",
- "Size (in bytes) of quarantine used to detect use-after-free "
- "errors. Lower value may reduce memory usage but increase the "
- "chance of false negatives.");
- ParseFlag(str, &f->redzone, "redzone",
- "Minimal size (in bytes) of redzones around heap objects. "
- "Requirement: redzone >= 16, is a power of two.");
- ParseFlag(str, &f->max_redzone, "max_redzone",
- "Maximal size (in bytes) of redzones around heap objects.");
- CHECK_GE(f->redzone, 16);
- CHECK_GE(f->max_redzone, f->redzone);
- CHECK_LE(f->max_redzone, 2048);
- CHECK(IsPowerOfTwo(f->redzone));
- CHECK(IsPowerOfTwo(f->max_redzone));
-
- ParseFlag(str, &f->debug, "debug",
- "If set, prints some debugging information and does additional checks.");
- ParseFlag(str, &f->report_globals, "report_globals",
- "Controls the way to handle globals (0 - don't detect buffer overflow on "
- "globals, 1 - detect buffer overflow, 2 - print data about registered "
- "globals).");
-
- ParseFlag(str, &f->check_initialization_order,
- "check_initialization_order",
- "If set, attempts to catch initialization order issues.");
-
- ParseFlag(str, &f->replace_str, "replace_str",
- "If set, uses custom wrappers and replacements for libc string functions "
- "to find more errors.");
-
- ParseFlag(str, &f->replace_intrin, "replace_intrin",
- "If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
- ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
- "Ignore invalid free() calls to work around some bugs. Used on OS X "
- "only.");
- ParseFlag(str, &f->detect_stack_use_after_return,
- "detect_stack_use_after_return",
- "Enables stack-use-after-return checking at run-time.");
- ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
- "Minimum fake stack size log.");
- ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
- "Maximum fake stack size log.");
- ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
- "Use mmap with 'norserve' flag to allocate fake stack.");
- ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
- "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
- "bytes that will be filled with malloc_fill_byte on malloc.");
- ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
- "Value used to fill the newly allocated memory.");
- ParseFlag(str, &f->exitcode, "exitcode",
- "Override the program exit status if the tool found an error.");
- ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
- "If set, user may manually mark memory regions as poisoned or "
- "unpoisoned.");
- ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
- "Number of seconds to sleep between printing an error report and "
- "terminating the program. Useful for debugging purposes (e.g. when one "
- "needs to attach gdb).");
-
- ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
- "Allows the users to work around the bug in Nvidia drivers prior to "
- "295.*.");
-
- ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
- "If set, explicitly unmaps the (huge) shadow at exit.");
- ParseFlag(str, &f->abort_on_error, "abort_on_error",
- "If set, the tool calls abort() instead of _exit() after printing the "
- "error report.");
- ParseFlag(str, &f->print_stats, "print_stats",
- "Print various statistics after printing an error message or if "
- "atexit=1.");
- ParseFlag(str, &f->print_legend, "print_legend",
- "Print the legend for the shadow bytes.");
- ParseFlag(str, &f->atexit, "atexit",
- "If set, prints ASan exit stats even after program terminates "
- "successfully.");
-
- ParseFlag(str, &f->allow_reexec, "allow_reexec",
- "Allow the tool to re-exec the program. This may interfere badly with "
- "the debugger.");
-
- ParseFlag(str, &f->print_full_thread_history,
- "print_full_thread_history",
- "If set, prints thread creation stacks for the threads involved in the "
- "report and their ancestors up to the main thread.");
-
- ParseFlag(str, &f->poison_heap, "poison_heap",
- "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
- "for benchmarking the allocator or instrumentator.");
-
- ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
- "Poison (or not) the array cookie after operator new[].");
-
- ParseFlag(str, &f->poison_partial, "poison_partial",
- "If true, poison partially addressable 8-byte aligned words "
- "(default=true). This flag affects heap and global buffers, but not "
- "stack buffers.");
-
- ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
- "Report errors on malloc/delete, new/free, new/delete[], etc.");
-
- ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
- "Report errors on mismatch betwen size of new and delete.");
-
- ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
- "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
- "comparing p1 and p2.");
-
- ParseFlag(str, &f->strict_init_order, "strict_init_order",
- "If true, assume that dynamic initializers can never access globals from "
- "other modules, even if the latter are already initialized.");
-
- ParseFlag(str, &f->start_deactivated, "start_deactivated",
- "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
- "poisoning) to reduce memory consumption as much as possible, and "
- "restores them to original values when the first instrumented module is "
- "loaded into the process. This is mainly intended to be used on "
- "Android. ");
-
- ParseFlag(str, &f->detect_invalid_pointer_pairs,
- "detect_invalid_pointer_pairs",
- "If non-zero, try to detect operations like <, <=, >, >= and - on "
- "invalid pointer pairs (e.g. when pointers belong to different objects). "
- "The bigger the value the harder we try.");
-
- ParseFlag(str, &f->detect_container_overflow,
- "detect_container_overflow",
- "If true, honor the container overflow annotations. "
- "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
-
- ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
- "If >=2, detect violation of One-Definition-Rule (ODR); "
- "If ==1, detect ODR-violation only if the two variables "
- "have different sizes");
-
- ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
- "If true, dump 16 bytes starting at the instruction that caused SEGV");
-}
-
-void InitializeFlags(Flags *f, const char *env) {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->detect_leaks = CAN_SANITIZE_LEAKS;
- cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = kDefaultMallocContextSize;
- cf->intercept_tls_get_addr = true;
- cf->coverage = false;
-
- internal_memset(f, 0, sizeof(*f));
- f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
- f->redzone = 16;
- f->max_redzone = 2048;
- f->debug = false;
- f->report_globals = 1;
- f->check_initialization_order = false;
- f->replace_str = true;
- f->replace_intrin = true;
- f->mac_ignore_invalid_free = false;
- f->detect_stack_use_after_return = false; // Also needs the compiler flag.
- f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
- f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
- f->uar_noreserve = false;
- f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
- f->malloc_fill_byte = 0xbe;
- f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
- f->allow_user_poisoning = true;
- f->sleep_before_dying = 0;
- f->check_malloc_usable_size = true;
- f->unmap_shadow_on_exit = false;
- f->abort_on_error = false;
- f->print_stats = false;
- f->print_legend = true;
- f->atexit = false;
- f->allow_reexec = true;
- f->print_full_thread_history = true;
- f->poison_heap = true;
- f->poison_array_cookie = true;
- f->poison_partial = true;
- // Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
- // https://code.google.com/p/address-sanitizer/issues/detail?id=131
- // https://code.google.com/p/address-sanitizer/issues/detail?id=309
- // TODO(glider,timurrrr): Fix known issues and enable this back.
- f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
- f->new_delete_type_mismatch = true;
- f->strict_memcmp = true;
- f->strict_init_order = false;
- f->start_deactivated = false;
- f->detect_invalid_pointer_pairs = 0;
- f->detect_container_overflow = true;
- f->detect_odr_violation = 2;
- f->dump_instruction_bytes = false;
-
- // Override from compile definition.
- ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
-
- // Override from user-specified string.
- ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
- VReport(1, "Using the defaults from __asan_default_options: %s\n",
- MaybeCallAsanDefaultOptions());
-
- // Override from command line.
- ParseFlagsFromString(f, env);
- if (common_flags()->help) {
- PrintFlagDescriptions();
- }
-
- if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
- Report("%s: detect_leaks is not supported on this platform.\n",
- SanitizerToolName);
- cf->detect_leaks = false;
- }
-
- // Make "strict_init_order" imply "check_initialization_order".
- // TODO(samsonov): Use a single runtime flag for an init-order checker.
- if (f->strict_init_order) {
- f->check_initialization_order = true;
- }
-}
-
-// Parse flags that may change between startup and activation.
-// On Android they come from a system property.
-// On other platforms this is no-op.
-void ParseExtraActivationFlags() {
- char buf[100];
- GetExtraActivationFlags(buf, sizeof(buf));
- ParseFlagsFromString(flags(), buf);
- if (buf[0] != '\0')
- VReport(1, "Extra activation flags: %s\n", buf);
-}
-
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
-void (*death_callback)(void);
#if !ASAN_FIXED_MAPPING
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@@ -343,7 +86,8 @@
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
-static void ReserveShadowMemoryRange(uptr beg, uptr end) {
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
@@ -354,6 +98,10 @@
"Perhaps you're using ulimit -v\n", size);
Abort();
}
+ if (common_flags()->no_huge_pages_for_shadow)
+ NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1
@@ -499,7 +247,13 @@
}
static void ProtectGap(uptr a, uptr size) {
- CHECK_EQ(a, (uptr)Mprotect(a, size));
+ void *res = Mprotect(a, size);
+ if (a == (uptr)res)
+ return;
+ Report("ERROR: Failed to protect the shadow gap. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ DumpProcessMap();
+ Die();
}
static void PrintAddressSpaceLayout() {
@@ -538,7 +292,7 @@
Printf("\n");
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
- Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
+ Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@@ -560,8 +314,10 @@
// Initialize flags. This must be done early, because most of the
// initialization steps look at flags().
- const char *options = GetEnv("ASAN_OPTIONS");
- InitializeFlags(flags(), options);
+ InitializeFlags();
+
+ SetCanPoisonMemory(flags()->poison_heap);
+ SetMallocContextSize(common_flags()->malloc_context_size);
InitializeHighMemEnd();
@@ -573,20 +329,11 @@
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
- if (!flags()->start_deactivated)
- ParseExtraActivationFlags();
-
__sanitizer_set_report_path(common_flags()->log_path);
+
+ // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
- CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
-
- if (options) {
- VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
- }
-
- if (flags()->start_deactivated)
- AsanStartDeactivated();
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -617,8 +364,7 @@
}
#endif
- if (common_flags()->verbosity)
- PrintAddressSpaceLayout();
+ if (Verbosity()) PrintAddressSpaceLayout();
DisableCoreDumperIfNecessary();
@@ -648,6 +394,8 @@
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
@@ -655,7 +403,12 @@
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
- InitializeAllocator();
+ AllocatorOptions allocator_options;
+ allocator_options.SetFrom(flags(), common_flags());
+ InitializeAllocator(allocator_options);
+
+ MaybeStartBackgroudThread();
+ SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
@@ -665,32 +418,36 @@
if (flags()->atexit)
Atexit(asan_atexit);
- if (common_flags()->coverage) {
- __sanitizer_cov_init();
- Atexit(__sanitizer_cov_dump);
- }
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ // Now that ASan runtime is (mostly) initialized, deactivate it if
+ // necessary, so that it can be re-activated when requested.
+ if (flags()->start_deactivated)
+ AsanDeactivate();
// interceptors
InitTlsSize();
// Create main thread.
- AsanThread *main_thread = AsanThread::Create(0, 0);
- CreateThreadContextArgs create_main_args = { main_thread, 0 };
- u32 main_tid = asanThreadRegistry().CreateThread(
- 0, true, 0, &create_main_args);
- CHECK_EQ(0, main_tid);
+ AsanThread *main_thread = AsanThread::Create(
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* stack */ nullptr, /* detached */ true);
+ CHECK_EQ(0, main_thread->tid());
SetCurrentThread(main_thread);
- main_thread->ThreadStart(internal_getpid());
+ main_thread->ThreadStart(internal_getpid(),
+ /* signal_thread_is_registered */ nullptr);
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
- __lsan::InitCommonLsan(false);
+ __lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
#endif // CAN_SANITIZE_LEAKS
+ InitializeSuppressions();
+
VReport(1, "AddressSanitizer Init done\n");
}
@@ -709,8 +466,7 @@
AsanInitializer() {
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
- if (UNLIKELY(!asan_inited))
- __asan_init();
+ AsanInitFromRtl();
}
};
@@ -722,13 +478,6 @@
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
-
int NOINLINE __asan_set_error_exit_code(int exit_code) {
int old = flags()->exitcode;
flags()->exitcode = exit_code;
@@ -762,7 +511,7 @@
}
void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
- death_callback = callback;
+ SetUserDieCallback(callback);
}
// Initialize as requested from instrumented application code.
diff --git a/lib/asan/asan_stack.cc b/lib/asan/asan_stack.cc
index 8188f3b..cf7a587 100644
--- a/lib/asan/asan_stack.cc
+++ b/lib/asan/asan_stack.cc
@@ -13,6 +13,21 @@
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __asan {
+
+static atomic_uint32_t malloc_context_size;
+
+void SetMallocContextSize(u32 size) {
+ atomic_store(&malloc_context_size, size, memory_order_release);
+}
+
+u32 GetMallocContextSize() {
+ return atomic_load(&malloc_context_size, memory_order_acquire);
+}
+
+} // namespace __asan
// ------------------ Interface -------------- {{{1
diff --git a/lib/asan/asan_stack.h b/lib/asan/asan_stack.h
index 8610ee4..122967a 100644
--- a/lib/asan/asan_stack.h
+++ b/lib/asan/asan_stack.h
@@ -21,6 +21,11 @@
namespace __asan {
+static const u32 kDefaultMallocContextSize = 30;
+
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
+
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
@@ -78,9 +83,10 @@
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \
+#define GET_STACK_TRACE_SIGNAL(sig) \
BufferedStackTrace stack; \
- GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \
+ (sig).pc, (sig).bp, (sig).context, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
@@ -92,9 +98,8 @@
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
-#define GET_STACK_TRACE_MALLOC \
- GET_STACK_TRACE(common_flags()->malloc_context_size, \
- common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
diff --git a/lib/asan/asan_suppressions.cc b/lib/asan/asan_suppressions.cc
new file mode 100644
index 0000000..3f76e20
--- /dev/null
+++ b/lib/asan/asan_suppressions.cc
@@ -0,0 +1,114 @@
+//===-- asan_suppressions.cc ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Issue suppression and suppression-related functions.
+//===----------------------------------------------------------------------===//
+
+#include "asan_suppressions.h"
+
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __asan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kInterceptorName[] = "interceptor_name";
+static const char kInterceptorViaFunction[] = "interceptor_via_fun";
+static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
+static const char kODRViolation[] = "odr_violation";
+static const char *kSuppressionTypes[] = {
+ kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
+ kODRViolation};
+
+extern "C" {
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__asan_default_suppressions();
+#else
+// No week hooks, provide empty implementation.
+const char *__asan_default_suppressions() { return ""; }
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+} // extern "C"
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+ if (&__asan_default_suppressions)
+ suppression_ctx->Parse(__asan_default_suppressions());
+}
+
+bool IsInterceptorSuppressed(const char *interceptor_name) {
+ CHECK(suppression_ctx);
+ Suppression *s;
+ // Match "interceptor_name" suppressions.
+ return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
+}
+
+bool HaveStackTraceBasedSuppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
+ suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
+}
+
+bool IsODRViolationSuppressed(const char *global_var_name) {
+ CHECK(suppression_ctx);
+ Suppression *s;
+ // Match "odr_violation" suppressions.
+ return suppression_ctx->Match(global_var_name, kODRViolation, &s);
+}
+
+bool IsStackTraceSuppressed(const StackTrace *stack) {
+ if (!HaveStackTraceBasedSuppressions())
+ return false;
+
+ CHECK(suppression_ctx);
+ Symbolizer *symbolizer = Symbolizer::GetOrInit();
+ Suppression *s;
+ for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
+ uptr addr = stack->trace[i];
+
+ if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
+ const char *module_name;
+ uptr module_offset;
+ // Match "interceptor_via_lib" suppressions.
+ if (symbolizer->GetModuleNameAndOffsetForPC(addr, &module_name,
+ &module_offset) &&
+ suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) {
+ return true;
+ }
+ }
+
+ if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
+ SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ const char *function_name = cur->info.function;
+ if (!function_name) {
+ continue;
+ }
+ // Match "interceptor_via_fun" suppressions.
+ if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
+ &s)) {
+ frames->ClearAll();
+ return true;
+ }
+ }
+ frames->ClearAll();
+ }
+ }
+ return false;
+}
+
+} // namespace __asan
diff --git a/lib/asan/asan_suppressions.h b/lib/asan/asan_suppressions.h
new file mode 100644
index 0000000..5246b4b
--- /dev/null
+++ b/lib/asan/asan_suppressions.h
@@ -0,0 +1,30 @@
+//===-- asan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_suppressions.cc.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_SUPPRESSIONS_H
+#define ASAN_SUPPRESSIONS_H
+
+#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __asan {
+
+void InitializeSuppressions();
+bool IsInterceptorSuppressed(const char *interceptor_name);
+bool HaveStackTraceBasedSuppressions();
+bool IsStackTraceSuppressed(const StackTrace *stack);
+bool IsODRViolationSuppressed(const char *global_var_name);
+
+} // namespace __asan
+
+#endif // ASAN_SUPPRESSIONS_H
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index ce53bea..9af5706 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -27,6 +27,11 @@
// AsanThreadContext implementation.
+struct CreateThreadContextArgs {
+ AsanThread *thread;
+ StackTrace *stack;
+};
+
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
@@ -75,13 +80,17 @@
// AsanThread implementation.
-AsanThread *AsanThread::Create(thread_callback_t start_routine,
- void *arg) {
+AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack,
+ bool detached) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
+ CreateThreadContextArgs args = { thread, stack };
+ asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
+ parent_tid, &args);
return thread;
}
@@ -155,9 +164,13 @@
AsanPlatformThreadInit();
}
-thread_return_t AsanThread::ThreadStart(uptr os_id) {
+thread_return_t AsanThread::ThreadStart(
+ uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, 0);
+ if (signal_thread_is_registered)
+ atomic_store(signal_thread_is_registered, 1, memory_order_release);
+
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
if (!start_routine_) {
diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h
index bf23728..9da136c 100644
--- a/lib/asan/asan_thread.h
+++ b/lib/asan/asan_thread.h
@@ -55,12 +55,14 @@
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
- static AsanThread *Create(thread_callback_t start_routine, void *arg);
+ static AsanThread *Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack, bool detached);
static void TSDDtor(void *tsd);
void Destroy();
void Init(); // Should be called from the thread itself.
- thread_return_t ThreadStart(uptr os_id);
+ thread_return_t ThreadStart(uptr os_id,
+ atomic_uintptr_t *signal_thread_is_registered);
uptr stack_top() { return stack_top_; }
uptr stack_bottom() { return stack_bottom_; }
@@ -166,11 +168,6 @@
AsanThread *thread;
};
-struct CreateThreadContextArgs {
- AsanThread *thread;
- StackTrace *stack;
-};
-
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc
index 5303d1b..5b1d0da 100644
--- a/lib/asan/asan_win.cc
+++ b/lib/asan/asan_win.cc
@@ -27,13 +27,28 @@
#include "sanitizer_common/sanitizer_mutex.h"
extern "C" {
- SANITIZER_INTERFACE_ATTRIBUTE
- int __asan_should_detect_stack_use_after_return() {
- __asan_init();
- return __asan_option_detect_stack_use_after_return;
- }
+SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_should_detect_stack_use_after_return() {
+ __asan_init();
+ return __asan_option_detect_stack_use_after_return;
}
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value, which works nicely for allocator hooks and
+// __asan_default_options().
+void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
+void __sanitizer_default_free_hook(void *ptr) { }
+const char* __asan_default_default_options() { return ""; }
+const char* __asan_default_default_suppressions() { return ""; }
+void __asan_default_on_error() {}
+#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
+#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
+#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
+#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
+#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
+} // extern "C"
+
namespace __asan {
// ---------------------- TSD ---------------- {{{1
@@ -60,6 +75,10 @@
AsanThread::TSDDtor(tsd);
}
// ---------------------- Various stuff ---------------- {{{1
+void DisableReexec() {
+ // No need to re-exec on Windows.
+}
+
void MaybeReexec() {
// No need to re-exec on Windows.
}
@@ -89,15 +108,26 @@
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
+SignalContext SignalContext::Create(void *siginfo, void *context) {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD*)siginfo;
+ CONTEXT *context_record = (CONTEXT*)context;
+
+ uptr pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+ uptr bp = (uptr)context_record->Rbp;
+ uptr sp = (uptr)context_record->Rsp;
+#else
+ uptr bp = (uptr)context_record->Ebp;
+ uptr sp = (uptr)context_record->Esp;
+#endif
+ uptr access_addr = exception_record->ExceptionInformation[1];
+
+ return SignalContext(context, access_addr, pc, sp, bp);
+}
+
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
CONTEXT *context = info->ContextRecord;
- uptr pc = (uptr)exception_record->ExceptionAddress;
-#ifdef _WIN64
- uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp;
-#else
- uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp;
-#endif
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
@@ -105,8 +135,8 @@
(exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
? "access-violation"
: "in-page-error";
- uptr access_addr = exception_record->ExceptionInformation[1];
- ReportSIGSEGV(description, pc, sp, bp, context, access_addr);
+ SignalContext sig = SignalContext::Create(exception_record, context);
+ ReportSIGSEGV(description, sig);
}
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc
index b38a2d1..7b94302 100644
--- a/lib/asan/asan_win_dll_thunk.cc
+++ b/lib/asan/asan_win_dll_thunk.cc
@@ -294,7 +294,41 @@
INTERFACE_FUNCTION(__asan_stack_free_9)
INTERFACE_FUNCTION(__asan_stack_free_10)
+// FIXME: we might want to have a sanitizer_win_dll_thunk?
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_cov)
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
+INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
+INTERFACE_FUNCTION(__sanitizer_cov_with_check)
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
+INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
+INTERFACE_FUNCTION(__sanitizer_ptr_sub)
+INTERFACE_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_FUNCTION(__sanitizer_reset_coverage)
+INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cc b/lib/asan/asan_win_dynamic_runtime_thunk.cc
index 3a4de7d..1945614 100644
--- a/lib/asan/asan_win_dynamic_runtime_thunk.cc
+++ b/lib/asan/asan_win_dynamic_runtime_thunk.cc
@@ -23,10 +23,11 @@
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
-extern "C" {
-__declspec(dllimport) int __asan_set_seh_filter();
-__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
+#include <windows.h>
+#include <psapi.h>
+extern "C" {
+////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
// used when linking an MD runtime with a set of object files on Windows.
//
@@ -37,16 +38,82 @@
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the a variable that is
// constant after initialization anyways.
+__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
+}
-// Set the ASan-specific SEH handler at the end of CRT initialization of each
-// module (see asan_win.cc for the details).
+////////////////////////////////////////////////////////////////////////////////
+// For some reason, the MD CRT doesn't call the C/C++ terminators as MT does.
+// To work around this, for each DLL we schedule a call to
+// UnregisterGlobalsInRange atexit() specifying the address range of the DLL
+// image to unregister globals in that range. We don't do the same
+// for the main module (.exe) as the asan_globals.cc allocator is destroyed
+// by the time UnregisterGlobalsInRange is executed.
+// See PR22545 for the details.
+namespace __asan {
+__declspec(dllimport)
+void UnregisterGlobalsInRange(void *beg, void *end);
+}
+
+namespace {
+void *this_module_base, *this_module_end;
+
+void UnregisterGlobals() {
+ __asan::UnregisterGlobalsInRange(this_module_base, this_module_end);
+}
+
+int ScheduleUnregisterGlobals() {
+ HMODULE this_module = 0;
+ // Increments the reference counter of the DLL module, so need to call
+ // FreeLibrary later.
+ if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+ (LPCTSTR)&UnregisterGlobals, &this_module))
+ return 1;
+
+ // Skip the main module.
+ if (this_module == GetModuleHandle(0))
+ return 0;
+
+ MODULEINFO mi;
+ bool success =
+ GetModuleInformation(GetCurrentProcess(), this_module, &mi, sizeof(mi));
+ if (!FreeLibrary(this_module))
+ return 2;
+ if (!success)
+ return 3;
+
+ this_module_base = mi.lpBaseOfDll;
+ this_module_end = (char*)mi.lpBaseOfDll + mi.SizeOfImage;
+
+ return atexit(UnregisterGlobals);
+}
+} // namespace
+
+///////////////////////////////////////////////////////////////////////////////
+// ASan SEH handling.
+extern "C" __declspec(dllimport) int __asan_set_seh_filter();
+static int SetSEHFilter() { return __asan_set_seh_filter(); }
+
+///////////////////////////////////////////////////////////////////////////////
+// We schedule some work at start-up by placing callbacks to our code to the
+// list of CRT C initializers.
+//
+// First, declare sections we'll be using:
+#pragma section(".CRT$XID", long, read) // NOLINT
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+
+// We need to call 'atexit(UnregisterGlobals);' after atexit() is initialized
+// (.CRT$XIC) but before the C++ constructors (.CRT$XCA).
+__declspec(allocate(".CRT$XID"))
+static int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
+
+// We need to set the ASan-specific SEH handler at the end of CRT initialization
+// of each module (see also asan_win.cc).
//
// Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
-static int SetSEHFilter() { return __asan_set_seh_filter(); }
-#pragma section(".CRT$XIZ", long, read) // NOLINT
-__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
-}
+extern "C" __declspec(allocate(".CRT$XIZ"))
+int (*__asan_seh_interceptor)() = SetSEHFilter;
+
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup
index a620f51..104e07b 100755
--- a/lib/asan/scripts/asan_device_setup
+++ b/lib/asan/scripts/asan_device_setup
@@ -18,6 +18,7 @@
extra_options=
device=
lib=
+use_su=0
function usage {
echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]"
@@ -26,13 +27,70 @@
echo " --extra-options: Extra ASAN_OPTIONS."
echo " --device: Install to the given device. Use 'adb devices' to find"
echo " device-id."
+ echo " --use-su: Use 'su -c' prefix for every adb command instead of using"
+ echo " 'adb root' once."
echo
exit 1
}
+function adb_push {
+ if [ $use_su -eq 0 ]; then
+ $ADB push "$1" "$2"
+ else
+ local FILENAME=$(basename $1)
+ $ADB push "$1" "/data/local/tmp/$FILENAME"
+ $ADB shell su -c "rm \\\"$2/$FILENAME\\\"" >&/dev/null
+ $ADB shell su -c "cat \\\"/data/local/tmp/$FILENAME\\\" > \\\"$2/$FILENAME\\\""
+ $ADB shell su -c "rm \\\"/data/local/tmp/$FILENAME\\\""
+ fi
+}
+
+function adb_remount {
+ if [ $use_su -eq 0 ]; then
+ $ADB remount
+ else
+ local STORAGE=`$ADB shell mount | grep /system | cut -d ' ' -f1`
+ if [ "$STORAGE" != "" ]; then
+ echo Remounting $STORAGE at /system
+ $ADB shell su -c "mount -o remount,rw $STORAGE /system"
+ else
+ echo Failed to get storage device name for "/system" mount point
+ fi
+ fi
+}
+
+function adb_shell {
+ if [ $use_su -eq 0 ]; then
+ $ADB shell $@
+ else
+ $ADB shell su -c "$*"
+ fi
+}
+
+function adb_root {
+ if [ $use_su -eq 0 ]; then
+ $ADB root
+ fi
+}
+
+function adb_wait_for_device {
+ $ADB wait-for-device
+}
+
+function adb_pull {
+ if [ $use_su -eq 0 ]; then
+ $ADB pull "$1" "$2"
+ else
+ local FILENAME=$(basename $1)
+ $ADB shell rm "/data/local/tmp/$FILENAME" >&/dev/null
+ $ADB shell su -c "[ -f \\\"$1\\\" ] && cat \\\"$1\\\" > \\\"/data/local/tmp/$FILENAME\\\" && chown root.shell \\\"/data/local/tmp/$FILENAME\\\" && chmod 755 \\\"/data/local/tmp/$FILENAME\\\"" &&
+ $ADB pull "/data/local/tmp/$FILENAME" "$2" >&/dev/null && $ADB shell "rm \"/data/local/tmp/$FILENAME\""
+ fi
+}
+
function get_device_arch { # OUTVAR
local _outvar=$1
- local _ABI=$($ADB shell getprop ro.product.cpu.abi)
+ local _ABI=$(adb_shell getprop ro.product.cpu.abi)
local _ARCH=
if [[ $_ABI == x86* ]]; then
_ARCH=i686
@@ -74,6 +132,9 @@
fi
device="$1"
;;
+ --use-su)
+ use_su=1
+ ;;
*)
usage
;;
@@ -86,11 +147,25 @@
ADB="$ADB -s $device"
fi
+if [ $use_su -eq 1 ]; then
+ # Test if 'su' is present on the device
+ SU_TEST_OUT=`$ADB shell su -c "echo foo" 2>&1 | sed 's/\r$//'`
+ if [ $? != 0 -o "$SU_TEST_OUT" != "foo" ]; then
+ echo "ERROR: Cannot use 'su -c':"
+ echo "$ adb shell su -c \"echo foo\""
+ echo $SU_TEST_OUT
+ echo "Check that 'su' binary is correctly installed on the device or omit"
+ echo " --use-su flag"
+ exit 1
+ fi
+fi
+
echo '>> Remounting /system rw'
-$ADB root
-$ADB wait-for-device
-$ADB remount
-$ADB wait-for-device
+adb_wait_for_device
+adb_root
+adb_wait_for_device
+adb_remount
+adb_wait_for_device
get_device_arch ARCH
echo "Target architecture: $ARCH"
@@ -99,22 +174,24 @@
if [[ x$revert == xyes ]]; then
echo '>> Uninstalling ASan'
- if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then
+ if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
echo '>> Pre-L device detected.'
- $ADB shell mv /system/bin/app_process.real /system/bin/app_process
- $ADB shell rm /system/bin/asanwrapper
- $ADB shell rm /system/lib/$ASAN_RT
+ adb_shell mv /system/bin/app_process.real /system/bin/app_process
+ adb_shell rm /system/bin/asanwrapper
else
- $ADB shell rm /system/bin/app_process.wrap
- $ADB shell rm /system/bin/asanwrapper
- $ADB shell rm /system/lib/$ASAN_RT
- $ADB shell rm /system/bin/app_process
- $ADB shell ln -s /system/bin/app_process32 /system/bin/app_process
+ adb_shell rm /system/bin/app_process.wrap
+ adb_shell rm /system/bin/asanwrapper
+ adb_shell rm /system/bin/app_process
+ adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
fi
echo '>> Restarting shell'
- $ADB shell stop
- $ADB shell start
+ adb_shell stop
+ adb_shell start
+
+ # Remove the library on the last step to give a chance to the 'su' binary to
+ # be executed without problem.
+ adb_shell rm /system/lib/$ASAN_RT
echo '>> Done'
exit 0
@@ -145,28 +222,28 @@
TMPDIR="$TMPDIRBASE/new"
mkdir "$TMPDIROLD"
-RELEASE=$($ADB shell getprop ro.build.version.release)
+RELEASE=$(adb_shell getprop ro.build.version.release)
PRE_L=0
if echo "$RELEASE" | grep '^4\.' >&/dev/null; then
PRE_L=1
fi
-if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then
+if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
- if $ADB pull /system/bin/app_process.real /dev/null >&/dev/null; then
+ if adb_pull /system/bin/app_process.real /dev/null >&/dev/null; then
echo '>> Old-style ASan installation detected. Reverting.'
- $ADB shell mv /system/bin/app_process.real /system/bin/app_process
+ adb_shell mv /system/bin/app_process.real /system/bin/app_process
fi
echo '>> Pre-L device detected. Setting up app_process symlink.'
- $ADB shell mv /system/bin/app_process /system/bin/app_process32
- $ADB shell ln -s /system/bin/app_process32 /system/bin/app_process
+ adb_shell mv /system/bin/app_process /system/bin/app_process32
+ adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
fi
echo '>> Copying files from the device'
-$ADB pull /system/bin/app_process.wrap "$TMPDIROLD" || true
-$ADB pull /system/bin/asanwrapper "$TMPDIROLD" || true
-$ADB pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
+adb_pull /system/bin/app_process.wrap "$TMPDIROLD" || true
+adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true
+adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
cp -r "$TMPDIROLD" "$TMPDIR"
if [[ -f "$TMPDIR/app_process.wrap" ]]; then
@@ -184,7 +261,7 @@
ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0
# On Android-L not allowing user segv handler breaks some applications.
-if $ADB shell 'echo $LD_PRELOAD' | grep libsigchain.so >&/dev/null; then
+if [[ PRE_L -eq 0 ]]; then
ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1"
fi
@@ -212,52 +289,52 @@
if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
echo '>> Pushing files to the device'
- $ADB push "$TMPDIR/$ASAN_RT" /system/lib/
- $ADB push "$TMPDIR/app_process.wrap" /system/bin/app_process.wrap
- $ADB push "$TMPDIR/asanwrapper" /system/bin/asanwrapper
+ adb_push "$TMPDIR/$ASAN_RT" /system/lib/
+ adb_push "$TMPDIR/app_process.wrap" /system/bin
+ adb_push "$TMPDIR/asanwrapper" /system/bin
- $ADB shell rm /system/bin/app_process
- $ADB shell ln -s /system/bin/app_process.wrap /system/bin/app_process
+ adb_shell rm /system/bin/app_process
+ adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
- $ADB shell chown root.shell \
+ adb_shell chown root.shell \
/system/lib/"$ASAN_RT" \
/system/bin/app_process.wrap \
/system/bin/asanwrapper
- $ADB shell chmod 644 \
+ adb_shell chmod 644 \
/system/lib/"$ASAN_RT"
- $ADB shell chmod 755 \
+ adb_shell chmod 755 \
/system/bin/app_process.wrap \
/system/bin/asanwrapper
# Make SELinux happy by keeping app_process wrapper and the shell
# it runs on in zygote domain.
ENFORCING=0
- if $ADB shell getenforce | grep Enforcing >/dev/null; then
+ if adb_shell getenforce | grep Enforcing >/dev/null; then
# Sometimes shell is not allowed to change file contexts.
# Temporarily switch to permissive.
ENFORCING=1
- $ADB shell setenforce 0
+ adb_shell setenforce 0
fi
- $ADB shell cp /system/bin/sh /system/bin/sh-from-zygote
+ adb_shell cp /system/bin/sh /system/bin/sh-from-zygote
if [[ PRE_L -eq 1 ]]; then
CTX=u:object_r:system_file:s0
else
CTX=u:object_r:zygote_exec:s0
fi
- $ADB shell chcon $CTX \
+ adb_shell chcon $CTX \
/system/bin/sh-from-zygote \
/system/bin/app_process.wrap \
/system/bin/app_process32
if [ $ENFORCING == 1 ]; then
- $ADB shell setenforce 1
+ adb_shell setenforce 1
fi
echo '>> Restarting shell (asynchronous)'
- $ADB shell stop
- $ADB shell start
+ adb_shell stop
+ adb_shell start
echo '>> Please wait until the device restarts'
else
diff --git a/lib/asan/scripts/asan_symbolize.py b/lib/asan/scripts/asan_symbolize.py
index 76de60a..59fceaa 100755
--- a/lib/asan/scripts/asan_symbolize.py
+++ b/lib/asan/scripts/asan_symbolize.py
@@ -11,11 +11,9 @@
import bisect
import getopt
import os
-import pty
import re
import subprocess
import sys
-import termios
symbolizers = {}
DEBUG = False
@@ -66,10 +64,12 @@
class LLVMSymbolizer(Symbolizer):
- def __init__(self, symbolizer_path, addr):
+ def __init__(self, symbolizer_path, default_arch, system, dsym_hints=[]):
super(LLVMSymbolizer, self).__init__()
self.symbolizer_path = symbolizer_path
- self.default_arch = guess_arch(addr)
+ self.default_arch = default_arch
+ self.system = system
+ self.dsym_hints = dsym_hints
self.pipe = self.open_llvm_symbolizer()
def open_llvm_symbolizer(self):
@@ -79,6 +79,9 @@
'--functions=short',
'--inlining=true',
'--default-arch=%s' % self.default_arch]
+ if self.system == 'Darwin':
+ for hint in self.dsym_hints:
+ cmd.append('--dsym-hint=%s' % hint)
if DEBUG:
print ' '.join(cmd)
try:
@@ -94,7 +97,7 @@
return None
result = []
try:
- symbolizer_input = '%s %s' % (binary, offset)
+ symbolizer_input = '"%s" %s' % (binary, offset)
if DEBUG:
print symbolizer_input
print >> self.pipe.stdin, symbolizer_input
@@ -116,14 +119,14 @@
return result
-def LLVMSymbolizerFactory(system, addr):
+def LLVMSymbolizerFactory(system, default_arch, dsym_hints=[]):
symbolizer_path = os.getenv('LLVM_SYMBOLIZER_PATH')
if not symbolizer_path:
symbolizer_path = os.getenv('ASAN_SYMBOLIZER_PATH')
if not symbolizer_path:
# Assume llvm-symbolizer is in PATH.
symbolizer_path = 'llvm-symbolizer'
- return LLVMSymbolizer(symbolizer_path, addr)
+ return LLVMSymbolizer(symbolizer_path, default_arch, system, dsym_hints)
class Addr2LineSymbolizer(Symbolizer):
@@ -166,6 +169,9 @@
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
+ # Local imports so that the script can start on Windows.
+ import pty
+ import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
@@ -335,26 +341,55 @@
class SymbolizationLoop(object):
- def __init__(self, binary_name_filter=None):
- # Used by clients who may want to supply a different binary name.
- # E.g. in Chrome several binaries may share a single .dSYM.
- self.binary_name_filter = binary_name_filter
- self.system = os.uname()[0]
- if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
- raise Exception('Unknown system')
- self.llvm_symbolizer = None
- self.frame_no = 0
+ def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
+ if sys.platform == 'win32':
+ # ASan on Windows uses dbghelp.dll to symbolize in-process, which works
+ # even in sandboxed processes. Nothing needs to be done here.
+ self.process_line = self.process_line_echo
+ else:
+ # Used by clients who may want to supply a different binary name.
+ # E.g. in Chrome several binaries may share a single .dSYM.
+ self.binary_name_filter = binary_name_filter
+ self.dsym_hint_producer = dsym_hint_producer
+ self.system = os.uname()[0]
+ if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
+ raise Exception('Unknown system')
+ self.llvm_symbolizers = {}
+ self.last_llvm_symbolizer = None
+ self.dsym_hints = set([])
+ self.frame_no = 0
+ self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset):
- # Initialize llvm-symbolizer lazily.
- if not self.llvm_symbolizer:
- self.llvm_symbolizer = LLVMSymbolizerFactory(self.system, addr)
+ # On non-Darwin (i.e. on platforms without .dSYM debug info) always use
+ # a single symbolizer binary.
+ # On Darwin, if the dsym hint producer is present:
+ # 1. check whether we've seen this binary already; if so,
+ # use |llvm_symbolizers[binary]|, which has already loaded the debug
+ # info for this binary (might not be the case for
+ # |last_llvm_symbolizer|);
+ # 2. otherwise check if we've seen all the hints for this binary already;
+ # if so, reuse |last_llvm_symbolizer| which has the full set of hints;
+ # 3. otherwise create a new symbolizer and pass all currently known
+ # .dSYM hints to it.
+ if not binary in self.llvm_symbolizers:
+ use_new_symbolizer = True
+ if self.system == 'Darwin' and self.dsym_hint_producer:
+ dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
+ use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
+ self.dsym_hints |= dsym_hints_for_binary
+ if self.last_llvm_symbolizer and not use_new_symbolizer:
+ self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
+ else:
+ self.last_llvm_symbolizer = LLVMSymbolizerFactory(
+ self.system, guess_arch(addr), self.dsym_hints)
+ self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
# Use the chain of symbolizers:
# Breakpad symbolizer -> LLVM symbolizer -> addr2line/atos
# (fall back to next symbolizer if the previous one fails).
if not binary in symbolizers:
symbolizers[binary] = ChainSymbolizer(
- [BreakpadSymbolizerFactory(binary), self.llvm_symbolizer])
+ [BreakpadSymbolizerFactory(binary), self.llvm_symbolizers[binary]])
result = symbolizers[binary].symbolize(addr, binary, offset)
if result is None:
# Initialize system symbolizer only if other symbolizers failed.
@@ -377,14 +412,14 @@
def process_logfile(self):
self.frame_no = 0
- while True:
- line = logfile.readline()
- if not line:
- break
+ for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
- def process_line(self, line):
+ def process_line_echo(self, line):
+ return [line.rstrip()]
+
+ def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
@@ -409,20 +444,23 @@
if __name__ == '__main__':
- parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
- description='ASan symbolization script',
- epilog='''Example of use:
- asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" -s "$HOME/SymbolFiles" < asan.log''')
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description='ASan symbolization script',
+ epilog='Example of use:\n'
+ 'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
+ '-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
- help='pattern to be cut from the result file path ')
+ help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
- help='demangle function names')
+ help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
- help='set path to sysroot for sanitized binaries')
+ help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
- help='set prefix for binutils')
- parser.add_argument('-l','--logfile', default=sys.stdin, type=argparse.FileType('r'),
- help='set log file name to parse, default is stdin')
+ help='set prefix for binutils')
+ parser.add_argument('-l','--logfile', default=sys.stdin,
+ type=argparse.FileType('r'),
+ help='set log file name to parse, default is stdin')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut
diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt
index 7b36371..513d128 100644
--- a/lib/asan/tests/CMakeLists.txt
+++ b/lib/asan/tests/CMakeLists.txt
@@ -30,7 +30,8 @@
-fno-rtti
-O2
-Wno-format
- -Werror=sign-compare)
+ -Werror=sign-compare
+ -Wno-non-virtual-dtor)
append_list_if(COMPILER_RT_HAS_WVARIADIC_MACROS_FLAG -Wno-variadic-macros ASAN_UNITTEST_COMMON_CFLAGS)
# -gline-tables-only must be enough for ASan, so use it if possible.
@@ -46,6 +47,11 @@
-DASAN_HAS_EXCEPTIONS=1
-DASAN_UAR=0)
+if(APPLE)
+ list(APPEND ASAN_UNITTEST_COMMON_CFLAGS ${DARWIN_osx_CFLAGS})
+ list(APPEND ASAN_UNITTEST_COMMON_LINKFLAGS ${DARWIN_osx_LINKFLAGS})
+endif()
+
set(ASAN_BLACKLIST_FILE "${CMAKE_CURRENT_SOURCE_DIR}/asan_test.ignore")
set(ASAN_UNITTEST_INSTRUMENTED_CFLAGS
${ASAN_UNITTEST_COMMON_CFLAGS}
@@ -117,7 +123,7 @@
# Link ASan unit test for a given architecture from a set
# of objects in with given linker flags.
macro(add_asan_test test_suite test_name arch kind)
- parse_arguments(TEST "OBJECTS;LINKFLAGS" "WITH_TEST_RUNTIME" ${ARGN})
+ parse_arguments(TEST "OBJECTS;LINKFLAGS;SUBDIR" "WITH_TEST_RUNTIME" ${ARGN})
get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
set(TEST_DEPS ${TEST_OBJECTS})
if(NOT COMPILER_RT_STANDALONE_BUILD)
@@ -132,6 +138,7 @@
endif()
endif()
add_compiler_rt_test(${test_suite} ${test_name}
+ SUBDIR ${TEST_SUBDIR}
OBJECTS ${TEST_OBJECTS}
DEPS ${TEST_DEPS}
LINK_FLAGS ${TEST_LINKFLAGS}
@@ -141,6 +148,11 @@
# Main AddressSanitizer unit tests.
add_custom_target(AsanUnitTests)
set_target_properties(AsanUnitTests PROPERTIES FOLDER "ASan unit tests")
+# AddressSanitizer unit tests with dynamic runtime (on platforms where it's
+# not the default).
+add_custom_target(AsanDynamicUnitTests)
+set_target_properties(AsanDynamicUnitTests
+ PROPERTIES FOLDER "ASan unit tests with dynamic runtime")
# ASan benchmarks (not actively used now).
add_custom_target(AsanBenchmarks)
set_target_properties(AsanBenchmarks PROPERTIES FOLDER "Asan benchmarks")
@@ -182,11 +194,15 @@
asan_compile(ASAN_INST_TEST_OBJECTS asan_mac_test_helpers.mm ${arch} ${kind}
${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -ObjC ${ARGN})
endif()
- add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Test" ${arch} ${kind}
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/default")
+ add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Test"
+ ${arch} ${kind} SUBDIR "default"
OBJECTS ${ASAN_INST_TEST_OBJECTS}
LINKFLAGS ${ASAN_UNITTEST_INSTRUMENTED_LINKFLAGS})
- if(COMPILER_RT_BUILD_SHARED_ASAN)
- add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Dynamic-Test" ${arch} ${kind}
+ if(COMPILER_RT_ASAN_HAS_STATIC_RUNTIME)
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynamic")
+ add_asan_test(AsanDynamicUnitTests "Asan-${arch}${kind}-Dynamic-Test"
+ ${arch} ${kind} SUBDIR "dynamic"
OBJECTS ${ASAN_INST_TEST_OBJECTS}
LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
endif()
@@ -220,7 +236,8 @@
asan_compile(ASAN_NOINST_TEST_OBJECTS ${src} ${arch} ${kind}
${ASAN_UNITTEST_COMMON_CFLAGS} ${ARGN})
endforeach()
- add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Noinst-Test" ${arch} ${kind}
+ add_asan_test(AsanUnitTests "Asan-${arch}${kind}-Noinst-Test"
+ ${arch} ${kind} SUBDIR "default"
OBJECTS ${ASAN_NOINST_TEST_OBJECTS}
LINKFLAGS ${ASAN_UNITTEST_NOINST_LINKFLAGS}
WITH_TEST_RUNTIME)
@@ -231,14 +248,10 @@
asan_compile(ASAN_BENCHMARKS_OBJECTS ${src} ${arch} ${kind}
${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} ${ARGN})
endforeach()
- add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Benchmark" ${arch} ${kind}
+ add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Benchmark"
+ ${arch} ${kind} SUBDIR "default"
OBJECTS ${ASAN_BENCHMARKS_OBJECTS}
LINKFLAGS ${ASAN_UNITTEST_INSTRUMENTED_LINKFLAGS})
- if(COMPILER_RT_BUILD_SHARED_ASAN)
- add_asan_test(AsanBenchmarks "Asan-${arch}${kind}-Dynamic-Benchmark" ${arch} ${kind}
- OBJECTS ${ASAN_BENCHMARKS_OBJECTS}
- LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
- endif()
endmacro()
if(COMPILER_RT_CAN_EXECUTE_TESTS AND NOT ANDROID)
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
index 50fdf11..a34c852 100644
--- a/lib/asan/tests/asan_interface_test.cc
+++ b/lib/asan/tests/asan_interface_test.cc
@@ -87,7 +87,7 @@
}
TEST(AddressSanitizerInterface, GetHeapSizeTest) {
- // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
+ // ASan allocator does not keep huge chunks in free list, but unmaps them.
// The chunk should be greater than the quarantine size,
// otherwise it will be stuck in quarantine instead of being unmaped.
static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index bb6af45..6a428fb 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -31,18 +31,12 @@
// in this test. The static runtime library is linked explicitly (without
// -fsanitize=address), thus the interceptors do not work correctly on OS X.
-#if !defined(_WIN32)
-extern "C" {
-// Set specific ASan options for uninstrumented unittest.
-const char* __asan_default_options() {
- return "allow_reexec=0";
-}
-} // extern "C"
-#endif
-
// Make sure __asan_init is called before any test case is run.
struct AsanInitCaller {
- AsanInitCaller() { __asan_init(); }
+ AsanInitCaller() {
+ __asan::DisableReexec();
+ __asan_init();
+ }
};
static AsanInitCaller asan_init_caller;
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 67bcbac..952b05e 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -603,7 +603,8 @@
}
#if !defined(__ANDROID__) && !defined(__arm__) && \
- !defined(__powerpc64__) && !defined(__powerpc__)
+ !defined(__powerpc64__) && !defined(__powerpc__) && \
+ !defined(__aarch64__)
// Does not work on Power and ARM:
// https://code.google.com/p/address-sanitizer/issues/detail?id=185
TEST(AddressSanitizer, BuiltinLongJmpTest) {
@@ -1284,3 +1285,33 @@
ASSERT_EQ(0, res);
}
#endif
+
+#if SANITIZER_TEST_HAS_PRINTF_L
+static int vsnprintf_l_wrapper(char *s, size_t n,
+ locale_t l, const char *format, ...) {
+ va_list va;
+ va_start(va, format);
+ int res = vsnprintf_l(s, n , l, format, va);
+ va_end(va);
+ return res;
+}
+
+TEST(AddressSanitizer, snprintf_l) {
+ char buff[5];
+ // Check that snprintf_l() works fine with Asan.
+ int res = snprintf_l(buff, 5,
+ _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()");
+ EXPECT_EQ(12, res);
+ // Check that vsnprintf_l() works fine with Asan.
+ res = vsnprintf_l_wrapper(buff, 5,
+ _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()");
+ EXPECT_EQ(13, res);
+
+ EXPECT_DEATH(snprintf_l(buff, 10,
+ _LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()"),
+ "AddressSanitizer: stack-buffer-overflow");
+ EXPECT_DEATH(vsnprintf_l_wrapper(buff, 10,
+ _LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()"),
+ "AddressSanitizer: stack-buffer-overflow");
+}
+#endif
diff --git a/lib/builtins/CMakeLists.txt b/lib/builtins/CMakeLists.txt
index 999faa8..4d102c6 100644
--- a/lib/builtins/CMakeLists.txt
+++ b/lib/builtins/CMakeLists.txt
@@ -79,7 +79,6 @@
floatuntidf.c
floatuntisf.c
floatuntixf.c
- gcc_personality_v0.c
int_util.c
lshrdi3.c
lshrti3.c
@@ -137,6 +136,12 @@
umodsi3.c
umodti3.c)
+if (HAVE_UNWIND_H)
+ set(GENERIC_SOURCES
+ ${GENERIC_SOURCES}
+ gcc_personality_v0.c)
+endif ()
+
set(x86_64_SOURCES
x86_64/floatdidf.c
x86_64/floatdisf.c
diff --git a/lib/builtins/assembly.h b/lib/builtins/assembly.h
index 8688a9b..8bb0ddc 100644
--- a/lib/builtins/assembly.h
+++ b/lib/builtins/assembly.h
@@ -28,6 +28,7 @@
// tell linker it can break up file at label boundaries
#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
#define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
#elif defined(__ELF__)
@@ -39,10 +40,11 @@
#else
#define SYMBOL_IS_FUNC(name) .type name,@function
#endif
+#define CONST_SECTION .section .rodata
#else // !__APPLE__ && !__ELF__
-#define HIDDEN_DIRECTIVE(name)
+#define HIDDEN(name)
#define LOCAL_LABEL(name) .L ## name
#define FILE_LEVEL_DIRECTIVE
#define SYMBOL_IS_FUNC(name) \
@@ -50,6 +52,7 @@
.scl 2 SEPARATOR \
.type 32 SEPARATOR \
.endef
+#define CONST_SECTION .section .rdata,"rd"
#endif
diff --git a/lib/builtins/atomic.c b/lib/builtins/atomic.c
index 02429a6..35c8837 100644
--- a/lib/builtins/atomic.c
+++ b/lib/builtins/atomic.c
@@ -28,20 +28,14 @@
#include <stdint.h>
#include <string.h>
+#include "assembly.h"
+
// Clang objects if you redefine a builtin. This little hack allows us to
// define a function with the same name as an intrinsic.
-#if __APPLE__
-// mach-o has extra leading underscore
-#pragma redefine_extname __atomic_load_c ___atomic_load
-#pragma redefine_extname __atomic_store_c ___atomic_store
-#pragma redefine_extname __atomic_exchange_c ___atomic_exchange
-#pragma redefine_extname __atomic_compare_exchange_c ___atomic_compare_exchange
-#else
-#pragma redefine_extname __atomic_load_c __atomic_load
-#pragma redefine_extname __atomic_store_c __atomic_store
-#pragma redefine_extname __atomic_exchange_c __atomic_exchange
-#pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
-#endif
+#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
+#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
+#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
+#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(__atomic_compare_exchange)
/// Number of locks. This allocates one page on 32-bit platforms, two on
/// 64-bit. This can be specified externally if a different trade between
diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c
index d329b80..8dc0fb1 100644
--- a/lib/builtins/clear_cache.c
+++ b/lib/builtins/clear_cache.c
@@ -13,14 +13,19 @@
#if __APPLE__
#include <libkern/OSCacheControl.h>
#endif
+#if defined(__FreeBSD__) && defined(__arm__)
+ #include <sys/types.h>
+ #include <machine/sysarch.h>
+#endif
+
#if defined(__NetBSD__) && defined(__arm__)
#include <machine/sysarch.h>
#endif
-#if defined(__ANDROID__) && defined(__mips__)
+#if defined(__mips__)
#include <sys/cachectl.h>
#include <sys/syscall.h>
- #ifdef __LP64__
+ #if defined(__ANDROID__) && defined(__LP64__)
/*
* clear_mips_cache - Invalidates instruction cache for Mips.
*/
@@ -84,7 +89,7 @@
* so there is nothing to do
*/
#elif defined(__arm__) && !defined(__APPLE__)
- #if defined(__NetBSD__)
+ #if defined(__FreeBSD__) || defined(__NetBSD__)
struct arm_sync_icache_args arg;
arg.addr = (uintptr_t)start;
@@ -92,7 +97,7 @@
sysarch(ARM_SYNC_ICACHE, &arg);
#elif defined(__ANDROID__)
- const register int start_reg __asm("r0") = (int) (intptr_t) start;
+ register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
@@ -104,10 +109,10 @@
#else
compilerrt_abort();
#endif
-#elif defined(__ANDROID__) && defined(__mips__)
+#elif defined(__mips__)
const uintptr_t start_int = (uintptr_t) start;
const uintptr_t end_int = (uintptr_t) end;
- #ifdef __LP64__
+ #if defined(__ANDROID__) && defined(__LP64__)
// Call synci implementation for short address range.
const uintptr_t address_range_limit = 256;
if ((end_int - start_int) <= address_range_limit) {
diff --git a/lib/builtins/gcc_personality_v0.c b/lib/builtins/gcc_personality_v0.c
index 869f417..4b95cfd 100644
--- a/lib/builtins/gcc_personality_v0.c
+++ b/lib/builtins/gcc_personality_v0.c
@@ -11,47 +11,7 @@
#include "int_lib.h"
-/*
- * _Unwind_* stuff based on C++ ABI public documentation
- * http://refspecs.freestandards.org/abi-eh-1.21.html
- */
-
-typedef enum {
- _URC_NO_REASON = 0,
- _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
- _URC_FATAL_PHASE2_ERROR = 2,
- _URC_FATAL_PHASE1_ERROR = 3,
- _URC_NORMAL_STOP = 4,
- _URC_END_OF_STACK = 5,
- _URC_HANDLER_FOUND = 6,
- _URC_INSTALL_CONTEXT = 7,
- _URC_CONTINUE_UNWIND = 8
-} _Unwind_Reason_Code;
-
-typedef enum {
- _UA_SEARCH_PHASE = 1,
- _UA_CLEANUP_PHASE = 2,
- _UA_HANDLER_FRAME = 4,
- _UA_FORCE_UNWIND = 8,
- _UA_END_OF_STACK = 16
-} _Unwind_Action;
-
-typedef struct _Unwind_Context* _Unwind_Context_t;
-
-struct _Unwind_Exception {
- uint64_t exception_class;
- void (*exception_cleanup)(_Unwind_Reason_Code reason,
- struct _Unwind_Exception* exc);
- uintptr_t private_1;
- uintptr_t private_2;
-};
-
-COMPILER_RT_ABI const uint8_t* _Unwind_GetLanguageSpecificData(_Unwind_Context_t c);
-COMPILER_RT_ABI void _Unwind_SetGR(_Unwind_Context_t c, int i, uintptr_t n);
-COMPILER_RT_ABI void _Unwind_SetIP(_Unwind_Context_t, uintptr_t new_value);
-COMPILER_RT_ABI uintptr_t _Unwind_GetIP(_Unwind_Context_t context);
-COMPILER_RT_ABI uintptr_t _Unwind_GetRegionStart(_Unwind_Context_t context);
-
+#include <unwind.h>
/*
* Pointer encodings documented at:
@@ -185,12 +145,12 @@
COMPILER_RT_ABI _Unwind_Reason_Code
__gcc_personality_sj0(int version, _Unwind_Action actions,
uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
- _Unwind_Context_t context)
+ struct _Unwind_Context *context)
#else
COMPILER_RT_ABI _Unwind_Reason_Code
__gcc_personality_v0(int version, _Unwind_Action actions,
uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
- _Unwind_Context_t context)
+ struct _Unwind_Context *context)
#endif
{
/* Since C does not have catch clauses, there is nothing to do during */
@@ -199,7 +159,7 @@
return _URC_CONTINUE_UNWIND;
/* There is nothing to do if there is no LSDA for this frame. */
- const uint8_t* lsda = _Unwind_GetLanguageSpecificData(context);
+ const uint8_t* lsda = (uint8_t*)_Unwind_GetLanguageSpecificData(context);
if ( lsda == (uint8_t*) 0 )
return _URC_CONTINUE_UNWIND;
diff --git a/lib/builtins/i386/floatdidf.S b/lib/builtins/i386/floatdidf.S
index f4f5d01..dcc32f8 100644
--- a/lib/builtins/i386/floatdidf.S
+++ b/lib/builtins/i386/floatdidf.S
@@ -7,13 +7,7 @@
#ifdef __i386__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
twop52:
diff --git a/lib/builtins/i386/floatundidf.S b/lib/builtins/i386/floatundidf.S
index 676fed0..8058c2a 100644
--- a/lib/builtins/i386/floatundidf.S
+++ b/lib/builtins/i386/floatundidf.S
@@ -17,13 +17,7 @@
#ifdef __i386__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
twop52:
diff --git a/lib/builtins/i386/floatundisf.S b/lib/builtins/i386/floatundisf.S
index 5b81620..94c97e2 100644
--- a/lib/builtins/i386/floatundisf.S
+++ b/lib/builtins/i386/floatundisf.S
@@ -18,7 +18,7 @@
#ifdef __i386__
-.const
+CONST_SECTION
.balign 3
.quad 0x43f0000000000000
@@ -52,13 +52,7 @@
#ifdef __i386__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
twop52:
diff --git a/lib/builtins/i386/floatundixf.S b/lib/builtins/i386/floatundixf.S
index d60ad7d..814b52f 100644
--- a/lib/builtins/i386/floatundixf.S
+++ b/lib/builtins/i386/floatundixf.S
@@ -7,13 +7,7 @@
#ifdef __i386__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
twop52:
diff --git a/lib/builtins/int_endianness.h b/lib/builtins/int_endianness.h
index 4b35bde..7995ddb 100644
--- a/lib/builtins/int_endianness.h
+++ b/lib/builtins/int_endianness.h
@@ -16,6 +16,20 @@
#ifndef INT_ENDIANNESS_H
#define INT_ENDIANNESS_H
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ defined(__ORDER_LITTLE_ENDIAN__)
+
+/* Clang and GCC provide built-in endianness definitions. */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif /* __BYTE_ORDER__ */
+
+#else /* Compilers other than Clang or GCC. */
+
#if defined(__SVR4) && defined(__sun)
#include <sys/byteorder.h>
@@ -84,18 +98,6 @@
/* .. */
-#if defined(__linux__)
-
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define _YUGA_LITTLE_ENDIAN 0
-#define _YUGA_BIG_ENDIAN 1
-#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
-#define _YUGA_LITTLE_ENDIAN 1
-#define _YUGA_BIG_ENDIAN 0
-#endif /* __BYTE_ORDER__ */
-
-#endif /* GNU/Linux */
-
#if defined(_WIN32)
#define _YUGA_LITTLE_ENDIAN 1
@@ -103,6 +105,8 @@
#endif /* Windows */
+#endif /* Clang or GCC. */
+
/* . */
#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
diff --git a/lib/builtins/x86_64/floatundidf.S b/lib/builtins/x86_64/floatundidf.S
index d54b974..3cd5d02 100644
--- a/lib/builtins/x86_64/floatundidf.S
+++ b/lib/builtins/x86_64/floatundidf.S
@@ -17,13 +17,7 @@
#ifdef __x86_64__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
twop52:
diff --git a/lib/builtins/x86_64/floatundisf.S b/lib/builtins/x86_64/floatundisf.S
index e41f118..61952f4 100644
--- a/lib/builtins/x86_64/floatundisf.S
+++ b/lib/builtins/x86_64/floatundisf.S
@@ -7,13 +7,7 @@
#ifdef __x86_64__
-#if defined(__APPLE__)
- .literal4
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
two:
diff --git a/lib/builtins/x86_64/floatundixf.S b/lib/builtins/x86_64/floatundixf.S
index 91bdc8a..92961c8 100644
--- a/lib/builtins/x86_64/floatundixf.S
+++ b/lib/builtins/x86_64/floatundixf.S
@@ -7,13 +7,7 @@
#ifdef __x86_64__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .section .rodata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
.balign 16
twop64:
@@ -42,13 +36,8 @@
#ifdef __x86_64__
-#if defined(__APPLE__)
- .const
-#elif defined(__ELF__)
- .rdata
-#else
- .section .rdata,"rd"
-#endif
+CONST_SECTION
+
.balign 4
twop52:
.quad 0x4330000000000000
diff --git a/lib/dfsan/CMakeLists.txt b/lib/dfsan/CMakeLists.txt
index daad07f..24ea876 100644
--- a/lib/dfsan/CMakeLists.txt
+++ b/lib/dfsan/CMakeLists.txt
@@ -6,13 +6,13 @@
dfsan_custom.cc
dfsan_interceptors.cc)
set(DFSAN_COMMON_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+append_no_rtti_flag(DFSAN_COMMON_CFLAGS)
# Prevent clang from generating libc calls.
append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding DFSAN_COMMON_CFLAGS)
# Static runtime library.
add_custom_target(dfsan)
-set(arch "x86_64")
-if(CAN_TARGET_${arch})
+foreach(arch ${DFSAN_SUPPORTED_ARCH})
set(DFSAN_CFLAGS ${DFSAN_COMMON_CFLAGS})
append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE DFSAN_CFLAGS)
add_compiler_rt_runtime(clang_rt.dfsan-${arch} ${arch} STATIC
@@ -30,7 +30,7 @@
add_dependencies(dfsan
clang_rt.dfsan-${arch}
clang_rt.dfsan-${arch}-symbols)
-endif()
+endforeach()
set(dfsan_abilist_filename ${COMPILER_RT_OUTPUT_DIR}/dfsan_abilist.txt)
add_custom_target(dfsan_abilist ALL
diff --git a/lib/dfsan/Makefile.mk b/lib/dfsan/Makefile.mk
deleted file mode 100644
index 4aeaac4..0000000
--- a/lib/dfsan/Makefile.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-#===- lib/dfsan/Makefile.mk --------------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := dfsan
-SubDirs :=
-
-Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-ObjNames := $(Sources:%.cc=%.o)
-
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
-Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
-
-# Define a convenience variable for all the dfsan functions.
-DfsanFunctions := $(Sources:%.cc=%)
diff --git a/lib/dfsan/dfsan.cc b/lib/dfsan/dfsan.cc
index dcc52b1..de5b2ce 100644
--- a/lib/dfsan/dfsan.cc
+++ b/lib/dfsan/dfsan.cc
@@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "dfsan/dfsan.h"
@@ -63,12 +64,37 @@
// account for the double byte representation of shadow labels and move the
// address into the shadow memory range. See the function shadow_for below.
+// On Linux/MIPS64, memory is laid out as follows:
+//
+// +--------------------+ 0x10000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0xF000008000 (kAppAddr)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x2200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x2000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
+#if defined(__x86_64__)
static const uptr kShadowAddr = 0x10000;
static const uptr kUnionTableAddr = 0x200000000000;
static const uptr kUnusedAddr = kUnionTableAddr + sizeof(dfsan_union_table_t);
static const uptr kAppAddr = 0x700000008000;
+#elif defined(__mips64)
+static const uptr kShadowAddr = 0x10000;
+static const uptr kUnionTableAddr = 0x2000000000;
+static const uptr kUnusedAddr = kUnionTableAddr + sizeof(dfsan_union_table_t);
+static const uptr kAppAddr = 0xF000008000;
+#else
+# error "DFSan not supported for this platform!"
+#endif
static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) {
return &(*(dfsan_union_table_t *) kUnionTableAddr)[l1][l2];
@@ -231,7 +257,7 @@
return __dfsan_union_load(shadow_for(addr), size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) {
return &__dfsan_label_info[label];
}
@@ -285,16 +311,24 @@
}
}
-static void InitializeFlags(Flags &f, const char *env) {
- f.warn_unimplemented = true;
- f.warn_nonzero_labels = false;
- f.strict_data_dependencies = true;
- f.dump_labels_at_exit = "";
+void Flags::SetDefaults() {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+}
- ParseFlag(env, &f.warn_unimplemented, "warn_unimplemented", "");
- ParseFlag(env, &f.warn_nonzero_labels, "warn_nonzero_labels", "");
- ParseFlag(env, &f.strict_data_dependencies, "strict_data_dependencies", "");
- ParseFlag(env, &f.dump_labels_at_exit, "dump_labels_at_exit", "");
+static void RegisterDfsanFlags(FlagParser *parser, Flags *f) {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+}
+
+static void InitializeFlags() {
+ FlagParser parser;
+ RegisterDfsanFlags(&parser, &flags());
+ flags().SetDefaults();
+ parser.ParseString(GetEnv("DFSAN_OPTIONS"));
}
static void dfsan_fini() {
@@ -329,8 +363,7 @@
if (!(init_addr >= kUnusedAddr && init_addr < kAppAddr))
Mprotect(kUnusedAddr, kAppAddr - kUnusedAddr);
- InitializeFlags(flags(), GetEnv("DFSAN_OPTIONS"));
-
+ InitializeFlags();
InitializeInterceptors();
// Register the fini callback to run when the program terminates successfully
diff --git a/lib/dfsan/dfsan.h b/lib/dfsan/dfsan.h
index 1b6c150..ceba353 100644
--- a/lib/dfsan/dfsan.h
+++ b/lib/dfsan/dfsan.h
@@ -44,7 +44,11 @@
void InitializeInterceptors();
inline dfsan_label *shadow_for(void *ptr) {
+#if defined(__x86_64__)
return (dfsan_label *) ((((uptr) ptr) & ~0x700000000000) << 1);
+#elif defined(__mips64)
+ return (dfsan_label *) ((((uptr) ptr) & ~0xF000000000) << 1);
+#endif
}
inline const dfsan_label *shadow_for(const void *ptr) {
@@ -52,17 +56,11 @@
}
struct Flags {
- // Whether to warn on unimplemented functions.
- bool warn_unimplemented;
- // Whether to warn on non-zero labels.
- bool warn_nonzero_labels;
- // Whether to propagate labels only when there is an obvious data dependency
- // (e.g., when comparing strings, ignore the fact that the output of the
- // comparison might be data-dependent on the content of the strings). This
- // applies only to the custom functions defined in 'custom.c'.
- bool strict_data_dependencies;
- // The path of the file where to dump the labels when the program terminates.
- const char* dump_labels_at_exit;
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+
+ void SetDefaults();
};
extern Flags flags_data;
diff --git a/lib/dfsan/dfsan_custom.cc b/lib/dfsan/dfsan_custom.cc
index 839a399..318ecd6 100644
--- a/lib/dfsan/dfsan_custom.cc
+++ b/lib/dfsan/dfsan_custom.cc
@@ -314,11 +314,12 @@
SANITIZER_INTERFACE_ATTRIBUTE void *
__dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,
dfsan_label flag_label, dfsan_label *ret_label) {
- link_map *map = (link_map *)dlopen(filename, flag);
+ void *handle = dlopen(filename, flag);
+ link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);
if (map)
ForEachMappedRegion(map, unpoison);
*ret_label = 0;
- return (void *)map;
+ return handle;
}
struct pthread_create_info {
diff --git a/lib/dfsan/dfsan_flags.inc b/lib/dfsan/dfsan_flags.inc
new file mode 100644
index 0000000..24fbfcb
--- /dev/null
+++ b/lib/dfsan/dfsan_flags.inc
@@ -0,0 +1,32 @@
+//===-- dfsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DFSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef DFSAN_FLAG
+# error "Define DFSAN_FLAG prior to including this file!"
+#endif
+
+// DFSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+DFSAN_FLAG(bool, warn_unimplemented, true,
+ "Whether to warn on unimplemented functions.")
+DFSAN_FLAG(bool, warn_nonzero_labels, false,
+ "Whether to warn on unimplemented functions.")
+DFSAN_FLAG(
+ bool, strict_data_dependencies, true,
+ "Whether to propagate labels only when there is an obvious data dependency"
+ "(e.g., when comparing strings, ignore the fact that the output of the"
+ "comparison might be data-dependent on the content of the strings). This"
+ "applies only to the custom functions defined in 'custom.c'.")
+DFSAN_FLAG(const char *, dump_labels_at_exit, "", "The path of the file where "
+ "to dump the labels when the "
+ "program terminates.")
diff --git a/lib/lsan/Makefile.mk b/lib/lsan/Makefile.mk
index 2a6b41c..5e70634 100644
--- a/lib/lsan/Makefile.mk
+++ b/lib/lsan/Makefile.mk
@@ -20,9 +20,6 @@
Dependencies += $(wildcard $(Dir)/../interception/*.h)
Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
-# Define a convenience variable for all the lsan functions.
-LsanFunctions := $(Sources:%.cc=%)
-
# lsan functions used in another sanitizers.
LsanCommonSources := $(foreach file,$(wildcard $(Dir)/lsan_common*.cc),$(notdir $(file)))
LsanCommonFunctions := $(LsanCommonSources:%.cc=%)
diff --git a/lib/lsan/lsan.cc b/lib/lsan/lsan.cc
index 1598fca..6018f7b 100644
--- a/lib/lsan/lsan.cc
+++ b/lib/lsan/lsan.cc
@@ -15,6 +15,7 @@
#include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
@@ -34,13 +35,42 @@
using namespace __lsan; // NOLINT
+static void InitializeFlags() {
+ // Set all the default values.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 30;
+ cf.detect_leaks = true;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterLsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ parser.ParseString(GetEnv("LSAN_OPTIONS"));
+
+ SetVerbosity(common_flags()->verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
return;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
- InitCommonLsan(true);
+ InitializeFlags();
+ InitCommonLsan();
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
@@ -52,6 +82,9 @@
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
lsan_inited = true;
lsan_init_is_running = false;
}
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index 8be2a2a..67125db 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -25,10 +25,6 @@
namespace __lsan {
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-
struct ChunkMetadata {
bool allocated : 8; // Must be first.
ChunkTag tag : 2;
@@ -36,8 +32,22 @@
u32 stack_trace_id;
};
+#if defined(__mips64)
+static const uptr kMaxAllowedMallocSize = 4UL << 30;
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+typedef CompactSizeClassMap SizeClassMap;
+typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
+ sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
+ PrimaryAllocator;
+#else
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
+#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -47,7 +57,7 @@
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.Init();
+ allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
}
void AllocatorThreadFinish() {
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index 746244c..a6119af 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -16,11 +16,11 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
@@ -36,52 +36,17 @@
Flags lsan_flags;
-static void InitializeFlags(bool standalone) {
- Flags *f = flags();
- // Default values.
- f->report_objects = false;
- f->resolution = 0;
- f->max_leaks = 0;
- f->exitcode = 23;
- f->use_registers = true;
- f->use_globals = true;
- f->use_stacks = true;
- f->use_tls = true;
- f->use_root_regions = true;
- f->use_unaligned = false;
- f->use_poisoned = false;
- f->log_pointers = false;
- f->log_threads = false;
+void Flags::SetDefaults() {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
- const char *options = GetEnv("LSAN_OPTIONS");
- if (options) {
- ParseFlag(options, &f->use_registers, "use_registers", "");
- ParseFlag(options, &f->use_globals, "use_globals", "");
- ParseFlag(options, &f->use_stacks, "use_stacks", "");
- ParseFlag(options, &f->use_tls, "use_tls", "");
- ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
- ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
- ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
- ParseFlag(options, &f->report_objects, "report_objects", "");
- ParseFlag(options, &f->resolution, "resolution", "");
- CHECK_GE(&f->resolution, 0);
- ParseFlag(options, &f->max_leaks, "max_leaks", "");
- CHECK_GE(&f->max_leaks, 0);
- ParseFlag(options, &f->log_pointers, "log_pointers", "");
- ParseFlag(options, &f->log_threads, "log_threads", "");
- ParseFlag(options, &f->exitcode, "exitcode", "");
- }
-
- // Set defaults for common flags (only in standalone mode) and parse
- // them from LSAN_OPTIONS.
- CommonFlags *cf = common_flags();
- if (standalone) {
- SetCommonFlagsDefaults(cf);
- cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = 30;
- cf->detect_leaks = true;
- }
- ParseCommonFlagsFromString(cf, options);
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
}
#define LOG_POINTERS(...) \
@@ -94,14 +59,23 @@
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0);
-static bool suppressions_inited = false;
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = { kSuppressionLeak };
void InitializeSuppressions() {
- CHECK(!suppressions_inited);
- SuppressionContext::InitIfNecessary();
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
- SuppressionContext::Get()->Parse(__lsan_default_suppressions());
- suppressions_inited = true;
+ suppression_ctx->Parse(__lsan_default_suppressions());
+}
+
+static SuppressionContext *GetSuppressionContext() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
}
struct RootRegion {
@@ -117,8 +91,7 @@
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
-void InitCommonLsan(bool standalone) {
- InitializeFlags(standalone);
+void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
@@ -141,9 +114,11 @@
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
if (p < kMinAddress) return false;
-#ifdef __x86_64__
+#if defined(__x86_64__)
// Accept only canonical form user-space addresses.
return ((p >> 47) == 0);
+#elif defined(__mips64)
+ return ((p >> 40) == 0);
#else
return true;
#endif
@@ -367,7 +342,7 @@
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- uptr resolution = flags()->resolution;
+ u32 resolution = flags()->resolution;
u32 stack_trace_id = 0;
if (resolution > 0) {
StackTrace stack = StackDepotGet(m.stack_trace_id());
@@ -383,7 +358,7 @@
static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
- SuppressionContext::Get()->GetMatched(&matched);
+ GetSuppressionContext()->GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
@@ -424,7 +399,7 @@
param.success = false;
LockThreadRegistry();
LockAllocator();
- StopTheWorld(DoLeakCheckCallback, ¶m);
+ DoStopTheWorld(DoLeakCheckCallback, ¶m);
UnlockAllocator();
UnlockThreadRegistry();
@@ -457,30 +432,27 @@
}
static Suppression *GetSuppressionForAddr(uptr addr) {
- Suppression *s;
+ Suppression *s = nullptr;
// Suppress by module name.
const char *module_name;
uptr module_offset;
- if (Symbolizer::GetOrInit()
- ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
- SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
+ SuppressionContext *suppressions = GetSuppressionContext();
+ if (Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(addr, &module_name,
+ &module_offset) &&
+ suppressions->Match(module_name, kSuppressionLeak, &s))
return s;
// Suppress by file or function name.
- static const uptr kMaxAddrFrames = 16;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
- addr, addr_frames.data(), kMaxAddrFrames);
- for (uptr i = 0; i < addr_frames_num; i++) {
- if (SuppressionContext::Get()->Match(addr_frames[i].function,
- SuppressionLeak, &s) ||
- SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
- &s))
- return s;
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
+ suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
}
- return 0;
+ frames->ClearAll();
+ return s;
}
static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
@@ -593,10 +565,9 @@
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
- InternalScopedBuffer<char> summary(kMaxSummaryLength);
- internal_snprintf(summary.data(), summary.size(),
- "%zu byte(s) leaked in %zu allocation(s).", bytes,
- allocations);
+ InternalScopedString summary(kMaxSummaryLength);
+ summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
ReportErrorSummary(summary.data());
}
diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h
index 86ff12d..4f9d24f 100644
--- a/lib/lsan/lsan_common.h
+++ b/lib/lsan/lsan_common.h
@@ -19,14 +19,20 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \
+ && (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
+namespace __sanitizer {
+class FlagParser;
+}
+
namespace __lsan {
// Chunk tags.
@@ -38,44 +44,19 @@
};
struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+ void SetDefaults();
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
-
- // Print addresses of leaked objects after main leak report.
- bool report_objects;
- // Aggregate two objects into one leak if this many stack frames match. If
- // zero, the entire stack trace must match.
- int resolution;
- // The number of leaks reported.
- int max_leaks;
- // If nonzero kill the process with this exit code upon finding leaks.
- int exitcode;
-
- // Flags controlling the root set of reachable memory.
- // Global variables (.data and .bss).
- bool use_globals;
- // Thread stacks.
- bool use_stacks;
- // Thread registers.
- bool use_registers;
- // TLS and thread-specific storage.
- bool use_tls;
- // Regions added via __lsan_register_root_region().
- bool use_root_regions;
-
- // Consider unaligned pointers valid.
- bool use_unaligned;
- // Consider pointers found in poisoned memory to be valid.
- bool use_poisoned;
-
- // Debug logging.
- bool log_pointers;
- bool log_threads;
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
struct Leak {
u32 id;
@@ -119,6 +100,8 @@
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
+// Run stoptheworld while holding any platform-specific locks.
+void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
@@ -131,7 +114,7 @@
};
// Functions called from the parent tool.
-void InitCommonLsan(bool standalone);
+void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index ba51868..813e0b7 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -85,10 +85,6 @@
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
- // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
- // deadlocking by running this under StopTheWorld. However, the lock is
- // reentrant, so we should be able to fix this by acquiring the lock before
- // suspending threads.
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
@@ -153,5 +149,30 @@
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
+struct DoStopTheWorldParam {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+ StopTheWorld(param->callback, param->argument);
+ return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ DoStopTheWorldParam param = {callback, argument};
+ dl_iterate_phdr(DoStopTheWorldCallback, ¶m);
+}
+
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
diff --git a/lib/lsan/lsan_flags.inc b/lib/lsan/lsan_flags.inc
new file mode 100644
index 0000000..b19b345
--- /dev/null
+++ b/lib/lsan/lsan_flags.inc
@@ -0,0 +1,45 @@
+//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LSAN_FLAG
+# error "Define LSAN_FLAG prior to including this file!"
+#endif
+
+// LSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+LSAN_FLAG(bool, report_objects, false,
+ "Print addresses of leaked objects after main leak report.")
+LSAN_FLAG(
+ int, resolution, 0,
+ "Aggregate two objects into one leak if this many stack frames match. If "
+ "zero, the entire stack trace must match.")
+LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
+LSAN_FLAG(int, exitcode, 23,
+ "If nonzero kill the process with this exit code upon finding leaks.")
+
+// Flags controlling the root set of reachable memory.
+LSAN_FLAG(bool, use_globals, true,
+ "Root set: include global variables (.data and .bss)")
+LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
+LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
+LSAN_FLAG(bool, use_tls, true,
+ "Root set: include TLS and thread-specific storage")
+LSAN_FLAG(bool, use_root_regions, true,
+ "Root set: include regions added via __lsan_register_root_region().")
+
+LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
+LSAN_FLAG(bool, use_poisoned, false,
+ "Consider pointers found in poisoned memory to be valid.")
+LSAN_FLAG(bool, log_pointers, false, "Debug logging")
+LSAN_FLAG(bool, log_threads, false, "Debug logging")
+LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc
index b01bbf8..ba2519d 100644
--- a/lib/lsan/lsan_interceptors.cc
+++ b/lib/lsan/lsan_interceptors.cc
@@ -215,9 +215,9 @@
int tid = 0;
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
- atomic_store(&p->tid, 0, memory_order_release);
SetCurrentThread(tid);
ThreadStart(tid, GetTid());
+ atomic_store(&p->tid, 0, memory_order_release);
return callback(param);
}
diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt
index 90d9fac..ccf47fc 100644
--- a/lib/msan/CMakeLists.txt
+++ b/lib/msan/CMakeLists.txt
@@ -10,6 +10,7 @@
msan_new_delete.cc
msan_report.cc
msan_thread.cc
+ msan_poisoning.cc
)
set(MSAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
diff --git a/lib/msan/Makefile.mk b/lib/msan/Makefile.mk
deleted file mode 100644
index 99e3b03..0000000
--- a/lib/msan/Makefile.mk
+++ /dev/null
@@ -1,24 +0,0 @@
-#===- lib/msan/Makefile.mk ---------------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := msan
-SubDirs :=
-
-Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-ObjNames := $(Sources:%.cc=%.o)
-
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
-Dependencies += $(wildcard $(Dir)/../interception/*.h)
-Dependencies += $(wildcard $(Dir)/../sanitizer_common/*.h)
-
-# Define a convenience variable for all the msan functions.
-MsanFunctions := $(Sources:%.cc=%)
diff --git a/lib/msan/msan.cc b/lib/msan/msan.cc
index 09622c4..caa7736 100644
--- a/lib/msan/msan.cc
+++ b/lib/msan/msan.cc
@@ -16,16 +16,17 @@
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
#include "msan_thread.h"
+#include "msan_poisoning.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
-
// ACHTUNG! No system header includes in this file.
using namespace __sanitizer;
@@ -96,19 +97,81 @@
static uptr StackOriginPC[kNumStackOriginDescrs];
static atomic_uint32_t NumStackOriginDescrs;
-static void ParseFlagsFromString(Flags *f, const char *str) {
- CommonFlags *cf = common_flags();
- ParseCommonFlagsFromString(cf, str);
- ParseFlag(str, &f->poison_heap_with_zeroes, "poison_heap_with_zeroes", "");
- ParseFlag(str, &f->poison_stack_with_zeroes, "poison_stack_with_zeroes", "");
- ParseFlag(str, &f->poison_in_malloc, "poison_in_malloc", "");
- ParseFlag(str, &f->poison_in_free, "poison_in_free", "");
- ParseFlag(str, &f->exit_code, "exit_code", "");
+void Flags::SetDefaults() {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+}
+
+// keep_going is an old name for halt_on_error,
+// and it has inverse meaning.
+class FlagHandlerKeepGoing : public FlagHandlerBase {
+ bool *halt_on_error_;
+
+ public:
+ explicit FlagHandlerKeepGoing(bool *halt_on_error)
+ : halt_on_error_(halt_on_error) {}
+ bool Parse(const char *value) final {
+ bool tmp;
+ FlagHandler<bool> h(&tmp);
+ if (!h.Parse(value)) return false;
+ *halt_on_error_ = !tmp;
+ return true;
+ }
+};
+
+static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+
+ FlagHandlerKeepGoing *fh_keep_going = new (FlagParser::Alloc) // NOLINT
+ FlagHandlerKeepGoing(&f->halt_on_error);
+ parser->RegisterHandler("keep_going", fh_keep_going,
+ "deprecated, use halt_on_error");
+}
+
+static void InitializeFlags() {
+ Flags *f = flags();
+ FlagParser parser;
+ RegisterMsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 20;
+ cf.handle_ioctl = true;
+ // FIXME: test and enable.
+ cf.check_printf = false;
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
+
+ f->SetDefaults();
+
+ // Override from user-specified string.
+ if (__msan_default_options)
+ parser.ParseString(__msan_default_options());
+
+ const char *msan_options = GetEnv("MSAN_OPTIONS");
+ parser.ParseString(msan_options);
+ VPrintf(1, "MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>");
+
+ SetVerbosity(common_flags()->verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ // Check flag values:
if (f->exit_code < 0 || f->exit_code > 127) {
Printf("Exit code not in [0, 128) range: %d\n", f->exit_code);
Die();
}
- ParseFlag(str, &f->origin_history_size, "origin_history_size", "");
if (f->origin_history_size < 0 ||
f->origin_history_size > Origin::kMaxDepth) {
Printf(
@@ -117,8 +180,6 @@
f->origin_history_size, Origin::kMaxDepth);
Die();
}
- ParseFlag(str, &f->origin_history_per_stack_limit,
- "origin_history_per_stack_limit", "");
// Limiting to kStackDepotMaxUseCount / 2 to avoid overflow in
// StackDepotHandle::inc_use_count_unsafe.
if (f->origin_history_per_stack_limit < 0 ||
@@ -129,51 +190,7 @@
f->origin_history_per_stack_limit, kStackDepotMaxUseCount / 2);
Die();
}
-
- ParseFlag(str, &f->report_umrs, "report_umrs", "");
- ParseFlag(str, &f->wrap_signals, "wrap_signals", "");
- ParseFlag(str, &f->print_stats, "print_stats", "");
- ParseFlag(str, &f->atexit, "atexit", "");
- ParseFlag(str, &f->store_context_size, "store_context_size", "");
if (f->store_context_size < 1) f->store_context_size = 1;
-
- // keep_going is an old name for halt_on_error,
- // and it has inverse meaning.
- f->halt_on_error = !f->halt_on_error;
- ParseFlag(str, &f->halt_on_error, "keep_going", "");
- f->halt_on_error = !f->halt_on_error;
- ParseFlag(str, &f->halt_on_error, "halt_on_error", "");
-}
-
-static void InitializeFlags(Flags *f, const char *options) {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = 20;
- cf->handle_ioctl = true;
- // FIXME: test and enable.
- cf->check_printf = false;
- cf->intercept_tls_get_addr = true;
-
- internal_memset(f, 0, sizeof(*f));
- f->poison_heap_with_zeroes = false;
- f->poison_stack_with_zeroes = false;
- f->poison_in_malloc = true;
- f->poison_in_free = true;
- f->exit_code = 77;
- f->origin_history_size = Origin::kMaxDepth;
- f->origin_history_per_stack_limit = 20000;
- f->report_umrs = true;
- f->wrap_signals = true;
- f->print_stats = false;
- f->atexit = false;
- f->halt_on_error = !&__msan_keep_going;
- f->store_context_size = 20;
-
- // Override from user-specified string.
- if (__msan_default_options)
- ParseFlagsFromString(f, __msan_default_options());
- ParseFlagsFromString(f, options);
}
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
@@ -205,10 +222,10 @@
GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
u32 report_origin =
- (__msan_get_track_origins() && Origin(origin).isValid()) ? origin : 0;
+ (__msan_get_track_origins() && Origin::isValidId(origin)) ? origin : 0;
ReportUMR(&stack, report_origin);
- if (__msan_get_track_origins() && !Origin(origin).isValid()) {
+ if (__msan_get_track_origins() && !Origin::isValidId(origin)) {
Printf(
" ORIGIN: invalid (%x). Might be a bug in MemorySanitizer origin "
"tracking.\n This could still be a bug in your code, too!\n",
@@ -258,32 +275,10 @@
if (t && t->InSignalHandler())
return id;
- Origin o(id);
- int depth = o.depth();
- // 0 means unlimited depth.
- if (flags()->origin_history_size > 0 && depth > 0) {
- if (depth >= flags()->origin_history_size) {
- return id;
- } else {
- ++depth;
- }
- }
-
- StackDepotHandle h = StackDepotPut_WithHandle(*stack);
- if (!h.valid()) return id;
-
- if (flags()->origin_history_per_stack_limit > 0) {
- int use_count = h.use_count();
- if (use_count > flags()->origin_history_per_stack_limit) return id;
- }
-
- u32 chained_id;
- bool inserted = ChainedOriginDepotPut(h.id(), o.id(), &chained_id);
-
- if (inserted && flags()->origin_history_per_stack_limit > 0)
- h.inc_use_count_unsafe();
-
- return Origin(chained_id, depth).raw_id();
+ Origin o = Origin::FromRawId(id);
+ stack->tag = StackTrace::TAG_UNKNOWN;
+ Origin chained = Origin::CreateChainedOrigin(o, stack);
+ return chained.raw_id();
}
} // namespace __msan
@@ -359,9 +354,7 @@
SetDieCallback(MsanDie);
InitTlsSize();
- const char *msan_options = GetEnv("MSAN_OPTIONS");
- InitializeFlags(&msan_flags, msan_options);
- if (common_flags()->help) PrintFlagDescriptions();
+ InitializeFlags();
__sanitizer_set_report_path(common_flags()->log_path);
InitializeInterceptors();
@@ -378,13 +371,10 @@
ReExec();
}
- VPrintf(1, "MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>");
-
__msan_clear_on_return();
if (__msan_get_track_origins())
VPrintf(1, "msan_track_origins\n");
- if (!InitShadow(/* prot1 */ true, /* prot2 */ true,
- /* map_shadow */ true, __msan_get_track_origins())) {
+ if (!InitShadow(/* map_shadow */ true, __msan_get_track_origins())) {
Printf("FATAL: MemorySanitizer can not mmap the shadow memory.\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Printf("FATAL: Disabling ASLR is known to cause this error.\n");
@@ -396,6 +386,8 @@
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
MsanTSDInit(MsanTSDDtor);
MsanThread *main_thread = MsanThread::Create(0, 0);
@@ -504,24 +496,7 @@
}
void __msan_set_origin(const void *a, uptr size, u32 origin) {
- // Origin mapping is 4 bytes per 4 bytes of application memory.
- // Here we extend the range such that its left and right bounds are both
- // 4 byte aligned.
- if (!__msan_get_track_origins()) return;
- uptr x = MEM_TO_ORIGIN((uptr)a);
- uptr beg = x & ~3UL; // align down.
- uptr end = (x + size + 3) & ~3UL; // align up.
- u64 origin64 = ((u64)origin << 32) | origin;
- // This is like memset, but the value is 32-bit. We unroll by 2 to write
- // 64 bits at once. May want to unroll further to get 128-bit stores.
- if (beg & 7ULL) {
- *(u32*)beg = origin;
- beg += 4;
- }
- for (uptr addr = beg; addr < (end & ~7UL); addr += 8)
- *(u64*)addr = origin64;
- if (end & 7ULL)
- *(u32*)(end - 4) = origin;
+ if (__msan_get_track_origins()) SetOrigin(a, size, origin);
}
// 'descr' is created at compile time and contains '----' in the beginning.
@@ -543,14 +518,14 @@
CHECK_LT(idx, kNumStackOriginDescrs);
StackOriginDescr[idx] = descr + 4;
StackOriginPC[idx] = pc;
- ChainedOriginDepotPut(idx, Origin::kStackRoot, &id);
+ id = Origin::CreateStackOrigin(idx).raw_id();
*id_ptr = id;
if (print)
Printf("First time: idx=%d id=%d %s %p \n", idx, id, descr + 4, pc);
}
if (print)
Printf("__msan_set_alloca_origin: descr=%s id=%x\n", descr + 4, id);
- __msan_set_origin(a, size, Origin(id, 1).raw_id());
+ __msan_set_origin(a, size, id);
}
u32 __msan_chain_origin(u32 id) {
@@ -568,6 +543,13 @@
return *(u32*)origin_ptr;
}
+int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id) {
+ Origin o = Origin::FromRawId(this_id);
+ while (o.raw_id() != prev_id && o.isChainedOrigin())
+ o = o.getNextChainedOrigin(nullptr);
+ return o.raw_id() == prev_id;
+}
+
u32 __msan_get_umr_origin() {
return __msan_origin_tls;
}
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index aed8738..ed18f21 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -25,22 +25,91 @@
# define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
#endif
-#if defined(__mips64)
-#define MEM_TO_SHADOW(mem) (((uptr)mem) & ~0x4000000000ULL)
-#define SHADOW_TO_ORIGIN(shadow) (((uptr)shadow) + 0x2000000000ULL)
-#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW(mem)))
-#define MEM_IS_APP(mem) ((uptr)mem >= 0xe000000000ULL)
-#define MEM_IS_SHADOW(mem) \
- ((uptr)mem >= 0xa000000000ULL && (uptr)mem <= 0xc000000000ULL)
-#elif defined(__x86_64__)
-#define MEM_TO_SHADOW(mem) (((uptr)mem) & ~0x400000000000ULL)
-#define SHADOW_TO_ORIGIN(shadow) (((uptr)shadow) + 0x200000000000ULL)
-#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW(mem)))
-#define MEM_IS_APP(mem) ((uptr)mem >= 0x600000000000ULL)
-#define MEM_IS_SHADOW(mem) \
- ((uptr)mem >= 0x200000000000ULL && (uptr)mem <= 0x400000000000ULL)
+struct MappingDesc {
+ uptr start;
+ uptr end;
+ enum Type {
+ INVALID, APP, SHADOW, ORIGIN
+ } type;
+ const char *name;
+};
+
+
+#if SANITIZER_LINUX && defined(__mips64)
+
+// Everything is above 0x00e000000000.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000)
+
+#elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64
+
+// Low memory: main binary, MAP_32BIT mappings and modules
+// High memory: heap, modules and main thread stack
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "low memory"},
+ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x100000000000ULL, 0x310000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x310000000000ULL, 0x380000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x380000000000ULL, 0x590000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x590000000000ULL, 0x600000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
+
+// Maps low and high app ranges to contiguous space with zero base:
+// Low: 0000 0000 0000 - 00ff ffff ffff -> 2000 0000 0000 - 20ff ffff ffff
+// High: 6000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 1fff ffff ffff
+#define LINEARIZE_MEM(mem) \
+ (((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL)
+#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x280000000000)
+
+#elif SANITIZER_LINUX && SANITIZER_WORDSIZE == 64
+
+// Requries PIE binary and ASLR enabled.
+// Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000).
+// Heap at 0x600000000000.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
+#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL)
+
+#else
+#error "Unsupported platform"
#endif
+const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
+
+#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem))))
+
+#ifndef __clang__
+__attribute__((optimize("unroll-loops")))
+#endif
+inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
+// It is critical for performance that this loop is unrolled (because then it is
+// simplified into just a few constant comparisons).
+#ifdef __clang__
+#pragma unroll
+#endif
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+ if (kMemoryLayout[i].type == mapping_type &&
+ addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
+ return true;
+ return false;
+}
+
+#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
+#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
+#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
+
// These constants must be kept in sync with the ones in MemorySanitizer.cc.
const int kMsanParamTlsSize = 800;
const int kMsanRetvalTlsSize = 800;
@@ -51,11 +120,12 @@
extern int msan_report_count;
bool ProtectRange(uptr beg, uptr end);
-bool InitShadow(bool prot1, bool prot2, bool map_shadow, bool init_origins);
+bool InitShadow(bool map_shadow, bool init_origins);
char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorThreadFinish();
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size);
void *MsanReallocate(StackTrace *stack, void *oldp, uptr size,
uptr alignment, bool zeroise);
void MsanDeallocate(StackTrace *stack, void *ptr);
@@ -93,16 +163,12 @@
void UnpoisonParam(uptr n);
void UnpoisonThreadLocalState();
-u32 GetOriginIfPoisoned(uptr a, uptr size);
-void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
-void CopyOrigin(void *dst, const void *src, uptr size, StackTrace *stack);
-void MovePoison(void *dst, const void *src, uptr size, StackTrace *stack);
-void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack);
-
// Returns a "chained" origin id, pointing to the given stack trace followed by
// the previous origin id.
u32 ChainOrigin(u32 id, StackTrace *stack);
+const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
+
#define GET_MALLOC_STACK_TRACE \
BufferedStackTrace stack; \
if (__msan_get_track_origins() && msan_inited) \
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index aa1ea1d..698b6cd 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -14,12 +14,11 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "msan.h"
#include "msan_allocator.h"
-#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
#include "msan_thread.h"
+#include "msan_poisoning.h"
namespace __msan {
@@ -75,7 +74,7 @@
if (inited) return;
__msan_init();
inited = true; // this must happen before any threads are created.
- allocator.Init();
+ allocator.Init(common_flags()->allocator_may_return_null);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -94,7 +93,7 @@
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return AllocatorReturnNull();
+ return allocator.ReturnNullOrDie();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@@ -114,11 +113,9 @@
} else if (flags()->poison_in_malloc) {
__msan_poison(allocated, size);
if (__msan_get_track_origins()) {
- u32 stack_id = StackDepotPut(*stack);
- CHECK(stack_id);
- u32 id;
- ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
- __msan_set_origin(allocated, size, Origin(id, 1).raw_id());
+ stack->tag = StackTrace::TAG_ALLOC;
+ Origin o = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(allocated, size, o.raw_id());
}
}
MSAN_MALLOC_HOOK(allocated, size);
@@ -137,11 +134,9 @@
if (flags()->poison_in_free) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
- u32 stack_id = StackDepotPut(*stack);
- CHECK(stack_id);
- u32 id;
- ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
- __msan_set_origin(p, size, Origin(id, 1).raw_id());
+ stack->tag = StackTrace::TAG_DEALLOC;
+ Origin o = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(p, size, o.raw_id());
}
}
MsanThread *t = GetCurrentThread();
@@ -155,6 +150,13 @@
}
}
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+ Init();
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+ return allocator.ReturnNullOrDie();
+ return MsanReallocate(stack, 0, nmemb * size, sizeof(u64), true);
+}
+
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
uptr alignment, bool zeroise) {
if (!old_p)
@@ -169,15 +171,22 @@
if (new_size <= actually_allocated_size) {
// We are not reallocating here.
meta->requested_size = new_size;
- if (new_size > old_size)
- __msan_poison((char*)old_p + old_size, new_size - old_size);
+ if (new_size > old_size) {
+ if (zeroise) {
+ __msan_clear_and_unpoison((char *)old_p + old_size,
+ new_size - old_size);
+ } else if (flags()->poison_in_malloc) {
+ stack->tag = StackTrace::TAG_ALLOC;
+ PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
+ }
+ }
return old_p;
}
uptr memcpy_size = Min(new_size, old_size);
void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
// Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
if (new_p) {
- __msan_memcpy(new_p, old_p, memcpy_size);
+ CopyMemory(new_p, old_p, memcpy_size, stack);
MsanDeallocate(stack, old_p);
}
return new_p;
diff --git a/lib/msan/msan_chained_origin_depot.cc b/lib/msan/msan_chained_origin_depot.cc
index f3fb3c8..c21e8e8 100644
--- a/lib/msan/msan_chained_origin_depot.cc
+++ b/lib/msan/msan_chained_origin_depot.cc
@@ -94,8 +94,7 @@
typedef Handle handle_type;
};
-// kTabSizeLog = 22 => 32Mb static storage for bucket pointers.
-static StackDepotBase<ChainedOriginDepotNode, 3, 20> chainedOriginDepot;
+static StackDepotBase<ChainedOriginDepotNode, 4, 20> chainedOriginDepot;
StackDepotStats *ChainedOriginDepotGetStats() {
return chainedOriginDepot.GetStats();
diff --git a/lib/msan/msan_flags.h b/lib/msan/msan_flags.h
index 9b93f11..4fc6d17 100644
--- a/lib/msan/msan_flags.h
+++ b/lib/msan/msan_flags.h
@@ -9,28 +9,18 @@
//
// This file is a part of MemorySanitizer.
//
-// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
#ifndef MSAN_FLAGS_H
#define MSAN_FLAGS_H
namespace __msan {
-// Flags.
struct Flags {
- int exit_code;
- int origin_history_size;
- int origin_history_per_stack_limit;
- bool poison_heap_with_zeroes; // default: false
- bool poison_stack_with_zeroes; // default: false
- bool poison_in_malloc; // default: true
- bool poison_in_free; // default: true
- bool report_umrs;
- bool wrap_signals;
- bool print_stats;
- bool halt_on_error;
- bool atexit;
- int store_context_size; // like malloc_context_size, but for uninit stores
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+
+ void SetDefaults();
};
Flags *flags();
diff --git a/lib/msan/msan_flags.inc b/lib/msan/msan_flags.inc
new file mode 100644
index 0000000..cb58ffc
--- /dev/null
+++ b/lib/msan/msan_flags.inc
@@ -0,0 +1,33 @@
+//===-- msan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_FLAG
+# error "Define MSAN_FLAG prior to including this file!"
+#endif
+
+// MSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+MSAN_FLAG(int, exit_code, 77, "")
+MSAN_FLAG(int, origin_history_size, Origin::kMaxDepth, "")
+MSAN_FLAG(int, origin_history_per_stack_limit, 20000, "")
+MSAN_FLAG(bool, poison_heap_with_zeroes, false, "")
+MSAN_FLAG(bool, poison_stack_with_zeroes, false, "")
+MSAN_FLAG(bool, poison_in_malloc, true, "")
+MSAN_FLAG(bool, poison_in_free, true, "")
+MSAN_FLAG(bool, report_umrs, true, "")
+MSAN_FLAG(bool, wrap_signals, true, "")
+MSAN_FLAG(bool, print_stats, false, "")
+MSAN_FLAG(bool, halt_on_error, !&__msan_keep_going, "")
+MSAN_FLAG(bool, atexit, false, "")
+MSAN_FLAG(int, store_context_size, 20,
+ "Like malloc_context_size, but for uninit stores.")
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index aa6b1ff..4a24394 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -20,6 +20,7 @@
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
#include "msan_thread.h"
+#include "msan_poisoning.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -42,6 +43,10 @@
using __sanitizer::atomic_store;
using __sanitizer::atomic_uintptr_t;
+#if SANITIZER_FREEBSD
+#define __errno_location __error
+#endif
+
// True if this is a nested interceptor.
static THREADLOCAL int in_interceptor_scope;
@@ -97,6 +102,7 @@
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb,
void *file) {
ENSURE_MSAN_INITED();
@@ -105,6 +111,10 @@
__msan_unpoison(ptr, res *size);
return res;
}
+#define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED INTERCEPT_FUNCTION(fread_unlocked)
+#else
+#define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED
+#endif
INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
ENSURE_MSAN_INITED();
@@ -154,12 +164,17 @@
return 0;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
void *ptr = MsanReallocate(&stack, 0, size, boundary, false);
return ptr;
}
+#define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+#else
+#define MSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
@@ -182,6 +197,7 @@
return ptr;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(void *, pvalloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
uptr PageSize = GetPageSizeCached();
@@ -193,6 +209,10 @@
void *ptr = MsanReallocate(&stack, 0, size, PageSize, false);
return ptr;
}
+#define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define MSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
INTERCEPTOR(void, free, void *ptr) {
GET_MALLOC_STACK_TRACE;
@@ -200,16 +220,22 @@
MsanDeallocate(&stack, ptr);
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(void, cfree, void *ptr) {
GET_MALLOC_STACK_TRACE;
if (ptr == 0) return;
MsanDeallocate(&stack, ptr);
}
+#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define MSAN_MAYBE_INTERCEPT_CFREE
+#endif
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
return __sanitizer_get_allocated_size(ptr);
}
+#if !SANITIZER_FREEBSD
// This function actually returns a struct by value, but we can't unpoison a
// temporary! The following is equivalent on all supported platforms, and we
// have a test to confirm that.
@@ -217,16 +243,32 @@
REAL(memset)(sret, 0, sizeof(*sret));
__msan_unpoison(sret, sizeof(*sret));
}
+#define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLINFO
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
+#define MSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOPT
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(void, malloc_stats, void) {
// FIXME: implement, but don't call REAL(malloc_stats)!
}
+#define MSAN_MAYBE_INTERCEPT_MALLOC_STATS INTERCEPT_FUNCTION(malloc_stats)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOC_STATS
+#endif
INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ if (msan_init_is_running)
+ return REAL(strlen)(s);
ENSURE_MSAN_INITED();
SIZE_T res = REAL(strlen)(s);
CHECK_UNPOISONED(s, res + 1);
@@ -249,7 +291,7 @@
GET_STORE_STACK_TRACE;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(strcpy)(dest, src); // NOLINT
- CopyPoison(dest, src, n + 1, &stack);
+ CopyShadowAndOrigin(dest, src, n + 1, &stack);
return res;
}
@@ -260,7 +302,7 @@
if (copy_size < n)
copy_size++; // trailing \0
char *res = REAL(strncpy)(dest, src, n); // NOLINT
- CopyPoison(dest, src, copy_size, &stack);
+ CopyShadowAndOrigin(dest, src, copy_size, &stack);
__msan_unpoison(dest + copy_size, n - copy_size);
return res;
}
@@ -270,47 +312,61 @@
GET_STORE_STACK_TRACE;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(stpcpy)(dest, src); // NOLINT
- CopyPoison(dest, src, n + 1, &stack);
+ CopyShadowAndOrigin(dest, src, n + 1, &stack);
return res;
}
INTERCEPTOR(char *, strdup, char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
+ // On FreeBSD strdup() leverages strlen().
+ InterceptorScope interceptor_scope;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(strdup)(src);
- CopyPoison(res, src, n + 1, &stack);
+ CopyShadowAndOrigin(res, src, n + 1, &stack);
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(char *, __strdup, char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(__strdup)(src);
- CopyPoison(res, src, n + 1, &stack);
+ CopyShadowAndOrigin(res, src, n + 1, &stack);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___STRDUP INTERCEPT_FUNCTION(__strdup)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRDUP
+#endif
INTERCEPTOR(char *, strndup, char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
+ // On FreeBSD strndup() leverages strnlen().
+ InterceptorScope interceptor_scope;
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(strndup)(src, n);
- CopyPoison(res, src, copy_size, &stack);
+ CopyShadowAndOrigin(res, src, copy_size, &stack);
__msan_unpoison(res + copy_size, 1); // \0
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(char *, __strndup, char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(__strndup)(src, n);
- CopyPoison(res, src, copy_size, &stack);
+ CopyShadowAndOrigin(res, src, copy_size, &stack);
__msan_unpoison(res + copy_size, 1); // \0
return res;
}
+#define MSAN_MAYBE_INTERCEPT___STRNDUP INTERCEPT_FUNCTION(__strndup)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRNDUP
+#endif
INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) {
ENSURE_MSAN_INITED();
@@ -326,7 +382,7 @@
SIZE_T src_size = REAL(strlen)(src);
SIZE_T dest_size = REAL(strlen)(dest);
char *res = REAL(strcat)(dest, src); // NOLINT
- CopyPoison(dest + dest_size, src, src_size + 1, &stack);
+ CopyShadowAndOrigin(dest + dest_size, src, src_size + 1, &stack);
return res;
}
@@ -336,7 +392,7 @@
SIZE_T dest_size = REAL(strlen)(dest);
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(strncat)(dest, src, n); // NOLINT
- CopyPoison(dest + dest_size, src, copy_size, &stack);
+ CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack);
__msan_unpoison(dest + dest_size + copy_size, 1); // \0
return res;
}
@@ -349,55 +405,63 @@
__msan_unpoison(endptr, sizeof(*endptr)); \
return res;
-#define INTERCEPTOR_STRTO(ret_type, func) \
- INTERCEPTOR(ret_type, func, const char *nptr, char **endptr) { \
- INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \
+#define INTERCEPTOR_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \
}
-#define INTERCEPTOR_STRTO_BASE(ret_type, func) \
- INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
- INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base); \
+#define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+ int base) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base); \
}
-#define INTERCEPTOR_STRTO_LOC(ret_type, func) \
- INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, void *loc) { \
- INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \
- }
-
-#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func) \
- INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base, \
+#define INTERCEPTOR_STRTO_LOC(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
void *loc) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \
+ }
+
+#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+ int base, void *loc) { \
INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base, loc); \
}
-INTERCEPTOR_STRTO(double, strtod) // NOLINT
-INTERCEPTOR_STRTO(float, strtof) // NOLINT
-INTERCEPTOR_STRTO(long double, strtold) // NOLINT
-INTERCEPTOR_STRTO_BASE(long, strtol) // NOLINT
-INTERCEPTOR_STRTO_BASE(long long, strtoll) // NOLINT
-INTERCEPTOR_STRTO_BASE(unsigned long, strtoul) // NOLINT
-INTERCEPTOR_STRTO_BASE(unsigned long long, strtoull) // NOLINT
-INTERCEPTOR_STRTO_LOC(double, strtod_l) // NOLINT
-INTERCEPTOR_STRTO_LOC(double, __strtod_l) // NOLINT
-INTERCEPTOR_STRTO_LOC(double, __strtod_internal) // NOLINT
-INTERCEPTOR_STRTO_LOC(float, strtof_l) // NOLINT
-INTERCEPTOR_STRTO_LOC(float, __strtof_l) // NOLINT
-INTERCEPTOR_STRTO_LOC(float, __strtof_internal) // NOLINT
-INTERCEPTOR_STRTO_LOC(long double, strtold_l) // NOLINT
-INTERCEPTOR_STRTO_LOC(long double, __strtold_l) // NOLINT
-INTERCEPTOR_STRTO_LOC(long double, __strtold_internal) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long, strtol_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long, __strtol_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long, __strtol_internal) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long long, strtoll_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long long, __strtoll_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(long long, __strtoll_internal) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long, strtoul_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long, __strtoul_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long, __strtoul_internal) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long long, strtoull_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long long, __strtoull_l) // NOLINT
-INTERCEPTOR_STRTO_BASE_LOC(unsigned long long, __strtoull_internal) // NOLINT
+#define INTERCEPTORS_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type)
+
+#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_l, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_internal, char_type)
+
+INTERCEPTORS_STRTO(double, strtod, char) // NOLINT
+INTERCEPTORS_STRTO(float, strtof, char) // NOLINT
+INTERCEPTORS_STRTO(long double, strtold, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(long, strtol, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(long long, strtoll, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long long, strtoull, char) // NOLINT
+
+INTERCEPTORS_STRTO(double, wcstod, wchar_t) // NOLINT
+INTERCEPTORS_STRTO(float, wcstof, wchar_t) // NOLINT
+INTERCEPTORS_STRTO(long double, wcstold, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t) // NOLINT
+
+#define INTERCEPT_STRTO(func) \
+ INTERCEPT_FUNCTION(func); \
+ INTERCEPT_FUNCTION(func##_l); \
+ INTERCEPT_FUNCTION(__##func##_l); \
+ INTERCEPT_FUNCTION(__##func##_internal);
+
// FIXME: support *wprintf in common format interceptors.
INTERCEPTOR(int, vswprintf, void *str, uptr size, void *format, va_list ap) {
@@ -451,11 +515,16 @@
INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime_l, s, max, format, tm, loc);
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(SIZE_T, __strftime_l, char *s, SIZE_T max, const char *format,
__sanitizer_tm *tm, void *loc) {
INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, __strftime_l, s, max, format, tm,
loc);
}
+#define MSAN_MAYBE_INTERCEPT___STRFTIME_L INTERCEPT_FUNCTION(__strftime_l)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRFTIME_L
+#endif
INTERCEPTOR(SIZE_T, wcsftime, wchar_t *s, SIZE_T max, const wchar_t *format,
__sanitizer_tm *tm) {
@@ -468,11 +537,16 @@
loc);
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(SIZE_T, __wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format,
__sanitizer_tm *tm, void *loc) {
INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, __wcsftime_l, s, max, format, tm,
loc);
}
+#define MSAN_MAYBE_INTERCEPT___WCSFTIME_L INTERCEPT_FUNCTION(__wcsftime_l)
+#else
+#define MSAN_MAYBE_INTERCEPT___WCSFTIME_L
+#endif
INTERCEPTOR(int, mbtowc, wchar_t *dest, const char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
@@ -507,7 +581,8 @@
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wcscpy)(dest, src);
- CopyPoison(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), &stack);
+ CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1),
+ &stack);
return res;
}
@@ -516,7 +591,7 @@
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wmemcpy)(dest, src, n);
- CopyPoison(dest, src, n * sizeof(wchar_t), &stack);
+ CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
return res;
}
@@ -524,14 +599,14 @@
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wmempcpy)(dest, src, n);
- CopyPoison(dest, src, n * sizeof(wchar_t), &stack);
+ CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
return res;
}
INTERCEPTOR(wchar_t *, wmemset, wchar_t *s, wchar_t c, SIZE_T n) {
CHECK(MEM_IS_APP(s));
ENSURE_MSAN_INITED();
- wchar_t *res = (wchar_t *)REAL(memset)(s, c, n * sizeof(wchar_t));
+ wchar_t *res = REAL(wmemset)(s, c, n);
__msan_unpoison(s, n * sizeof(wchar_t));
return res;
}
@@ -540,7 +615,7 @@
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wmemmove)(dest, src, n);
- MovePoison(dest, src, n * sizeof(wchar_t), &stack);
+ MoveShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
return res;
}
@@ -550,13 +625,6 @@
return res;
}
-INTERCEPTOR(double, wcstod, const wchar_t *nptr, wchar_t **endptr) {
- ENSURE_MSAN_INITED();
- double res = REAL(wcstod)(nptr, endptr);
- __msan_unpoison(endptr, sizeof(*endptr));
- return res;
-}
-
INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
ENSURE_MSAN_INITED();
int res = REAL(gettimeofday)(tv, tz);
@@ -577,6 +645,8 @@
}
INTERCEPTOR(char *, getenv, char *name) {
+ if (msan_init_is_running)
+ return REAL(getenv)(name);
ENSURE_MSAN_INITED();
char *res = REAL(getenv)(name);
if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
@@ -609,6 +679,7 @@
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__fxstat)(magic, fd, buf);
@@ -616,7 +687,12 @@
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___FXSTAT INTERCEPT_FUNCTION(__fxstat)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__fxstat64)(magic, fd, buf);
@@ -624,7 +700,20 @@
__msan_unpoison(buf, __sanitizer::struct_stat64_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___FXSTAT64 INTERCEPT_FUNCTION(__fxstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+#if SANITIZER_FREEBSD
+INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(fstatat)(fd, pathname, buf, flags);
+ if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat)
+#else
INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
int flags) {
ENSURE_MSAN_INITED();
@@ -632,7 +721,10 @@
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
+# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat)
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf,
int flags) {
ENSURE_MSAN_INITED();
@@ -640,7 +732,21 @@
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___FXSTATAT64 INTERCEPT_FUNCTION(__fxstatat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTATAT64
+#endif
+#if SANITIZER_FREEBSD
+INTERCEPTOR(int, stat, char *path, void *buf) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(stat)(path, buf);
+ if (!res)
+ __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+# define MSAN_INTERCEPT_STAT INTERCEPT_FUNCTION(stat)
+#else
INTERCEPTOR(int, __xstat, int magic, char *path, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__xstat)(magic, path, buf);
@@ -648,7 +754,10 @@
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
+# define MSAN_INTERCEPT_STAT INTERCEPT_FUNCTION(__xstat)
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, __xstat64, int magic, char *path, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__xstat64)(magic, path, buf);
@@ -656,7 +765,12 @@
__msan_unpoison(buf, __sanitizer::struct_stat64_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___XSTAT64 INTERCEPT_FUNCTION(__xstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___XSTAT64
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, __lxstat, int magic, char *path, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__lxstat)(magic, path, buf);
@@ -664,7 +778,12 @@
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___LXSTAT INTERCEPT_FUNCTION(__lxstat)
+#else
+#define MSAN_MAYBE_INTERCEPT___LXSTAT
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, __lxstat64, int magic, char *path, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__lxstat64)(magic, path, buf);
@@ -672,6 +791,10 @@
__msan_unpoison(buf, __sanitizer::struct_stat64_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT___LXSTAT64 INTERCEPT_FUNCTION(__lxstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___LXSTAT64
+#endif
INTERCEPTOR(int, pipe, int pipefd[2]) {
if (msan_init_is_running)
@@ -707,6 +830,7 @@
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) {
ENSURE_MSAN_INITED();
char *res = REAL(fgets_unlocked)(s, size, stream);
@@ -714,6 +838,10 @@
__msan_unpoison(s, REAL(strlen)(s) + 1);
return res;
}
+#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED INTERCEPT_FUNCTION(fgets_unlocked)
+#else
+#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED
+#endif
INTERCEPTOR(int, getrlimit, int resource, void *rlim) {
if (msan_init_is_running)
@@ -725,6 +853,7 @@
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, getrlimit64, int resource, void *rlim) {
if (msan_init_is_running)
return REAL(getrlimit64)(resource, rlim);
@@ -734,15 +863,34 @@
__msan_unpoison(rlim, __sanitizer::struct_rlimit64_sz);
return res;
}
+#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 INTERCEPT_FUNCTION(getrlimit64)
+#else
+#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64
+#endif
-INTERCEPTOR(int, uname, void *utsname) {
+#if SANITIZER_FREEBSD
+// FreeBSD's <sys/utsname.h> define uname() as
+// static __inline int uname(struct utsname *name) {
+// return __xuname(SYS_NMLN, (void*)name);
+// }
+INTERCEPTOR(int, __xuname, int size, void *utsname) {
ENSURE_MSAN_INITED();
- int res = REAL(uname)(utsname);
- if (!res) {
+ int res = REAL(__xuname)(size, utsname);
+ if (!res)
__msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
- }
return res;
}
+#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname)
+#else
+INTERCEPTOR(int, uname, struct utsname *utsname) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(uname)(utsname);
+ if (!res)
+ __msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
+ return res;
+}
+#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname)
+#endif
INTERCEPTOR(int, gethostname, char *name, SIZE_T len) {
ENSURE_MSAN_INITED();
@@ -756,6 +904,7 @@
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, epoll_wait, int epfd, void *events, int maxevents,
int timeout) {
ENSURE_MSAN_INITED();
@@ -765,7 +914,12 @@
}
return res;
}
+#define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT INTERCEPT_FUNCTION(epoll_wait)
+#else
+#define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
+#if !SANITIZER_FREEBSD
INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
int timeout, void *sigmask) {
ENSURE_MSAN_INITED();
@@ -775,6 +929,10 @@
}
return res;
}
+#define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT INTERCEPT_FUNCTION(epoll_pwait)
+#else
+#define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT
+#endif
INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {
ENSURE_MSAN_INITED();
@@ -794,17 +952,15 @@
__msan_unpoison(buf, res);
if (srcaddr) {
SIZE_T sz = *addrlen;
- __msan_unpoison(srcaddr, (sz < srcaddr_sz) ? sz : srcaddr_sz);
+ __msan_unpoison(srcaddr, Min(sz, srcaddr_sz));
}
}
return res;
}
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
- if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return AllocatorReturnNull();
GET_MALLOC_STACK_TRACE;
- if (!msan_inited) {
+ if (UNLIKELY(!msan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const SIZE_T kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -815,7 +971,7 @@
CHECK(allocated < kCallocPoolSize);
return mem;
}
- return MsanReallocate(&stack, 0, nmemb * size, sizeof(u64), true);
+ return MsanCalloc(&stack, nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
@@ -828,20 +984,18 @@
return MsanReallocate(&stack, 0, size, sizeof(u64), false);
}
-void __msan_allocated_memory(const void* data, uptr size) {
+void __msan_allocated_memory(const void *data, uptr size) {
GET_MALLOC_STACK_TRACE;
- if (flags()->poison_in_malloc)
- __msan_poison(data, size);
- if (__msan_get_track_origins()) {
- u32 stack_id = StackDepotPut(stack);
- u32 id;
- ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
- __msan_set_origin(data, size, Origin(id, 1).raw_id());
+ if (flags()->poison_in_malloc) {
+ stack.tag = STACK_TRACE_TAG_POISON;
+ PoisonMemory(data, size, &stack);
}
}
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
+ if (msan_init_is_running)
+ return REAL(mmap)(addr, length, prot, flags, fd, offset);
ENSURE_MSAN_INITED();
if (addr && !MEM_IS_APP(addr)) {
if (flags & map_fixed) {
@@ -857,6 +1011,7 @@
return res;
}
+#if !SANITIZER_FREEBSD
INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF64_T offset) {
ENSURE_MSAN_INITED();
@@ -873,6 +1028,10 @@
__msan_unpoison(res, RoundUpTo(length, GetPageSize()));
return res;
}
+#define MSAN_MAYBE_INTERCEPT_MMAP64 INTERCEPT_FUNCTION(mmap64)
+#else
+#define MSAN_MAYBE_INTERCEPT_MMAP64
+#endif
struct dlinfo {
char *dli_fname;
@@ -1199,6 +1358,9 @@
InterceptorScope interceptor_scope; \
__msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */ \
ENSURE_MSAN_INITED();
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ } while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
} while (false)
@@ -1216,8 +1378,11 @@
} while (false) // FIXME
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) \
- if (map) ForEachMappedRegion((link_map *)map, __msan_unpoison);
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ do { \
+ link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE((handle)); \
+ if (map) ForEachMappedRegion(map, __msan_unpoison); \
+ } while (false)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
@@ -1231,53 +1396,26 @@
#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
-static void PoisonShadow(uptr ptr, uptr size, u8 value) {
- uptr PageSize = GetPageSizeCached();
- uptr shadow_beg = MEM_TO_SHADOW(ptr);
- uptr shadow_end = MEM_TO_SHADOW(ptr + size);
- if (value ||
- shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
- REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
- } else {
- uptr page_beg = RoundUpTo(shadow_beg, PageSize);
- uptr page_end = RoundDownTo(shadow_end, PageSize);
-
- if (page_beg >= page_end) {
- REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
- } else {
- if (page_beg != shadow_beg) {
- REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
- }
- if (page_end != shadow_end) {
- REAL(memset)((void *)page_end, 0, shadow_end - page_end);
- }
- MmapFixedNoReserve(page_beg, page_end - page_beg);
- }
- }
-}
-
// These interface functions reside here so that they can use
// REAL(memset), etc.
void __msan_unpoison(const void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
- PoisonShadow((uptr)a, size, 0);
+ SetShadow(a, size, 0);
}
void __msan_poison(const void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
- PoisonShadow((uptr)a, size,
- __msan::flags()->poison_heap_with_zeroes ? 0 : -1);
+ SetShadow(a, size, __msan::flags()->poison_heap_with_zeroes ? 0 : -1);
}
void __msan_poison_stack(void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
- PoisonShadow((uptr)a, size,
- __msan::flags()->poison_stack_with_zeroes ? 0 : -1);
+ SetShadow(a, size, __msan::flags()->poison_stack_with_zeroes ? 0 : -1);
}
void __msan_clear_and_unpoison(void *a, uptr size) {
REAL(memset)(a, 0, size);
- PoisonShadow((uptr)a, size, 0);
+ SetShadow(a, size, 0);
}
void *__msan_memcpy(void *dest, const void *src, SIZE_T n) {
@@ -1286,7 +1424,7 @@
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
void *res = REAL(memcpy)(dest, src, n);
- CopyPoison(dest, src, n, &stack);
+ CopyShadowAndOrigin(dest, src, n, &stack);
return res;
}
@@ -1305,7 +1443,7 @@
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
void *res = REAL(memmove)(dest, src, n);
- MovePoison(dest, src, n, &stack);
+ MoveShadowAndOrigin(dest, src, n, &stack);
return res;
}
@@ -1316,119 +1454,29 @@
namespace __msan {
-u32 GetOriginIfPoisoned(uptr addr, uptr size) {
- unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
- for (uptr i = 0; i < size; ++i)
- if (s[i])
- return *(u32 *)SHADOW_TO_ORIGIN((s + i) & ~3UL);
- return 0;
-}
-
-void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
- u32 src_origin) {
- uptr dst_s = MEM_TO_SHADOW(addr);
- uptr src_s = src_shadow;
- uptr src_s_end = src_s + size;
-
- for (; src_s < src_s_end; ++dst_s, ++src_s)
- if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s &~3UL) = src_origin;
-}
-
-void CopyOrigin(void *dst, const void *src, uptr size, StackTrace *stack) {
- if (!__msan_get_track_origins()) return;
- if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
-
- uptr d = (uptr)dst;
- uptr beg = d & ~3UL;
- // Copy left unaligned origin if that memory is poisoned.
- if (beg < d) {
- u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
- if (o) {
- if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
- *(u32 *)MEM_TO_ORIGIN(beg) = o;
- }
- beg += 4;
- }
-
- uptr end = (d + size) & ~3UL;
- // If both ends fall into the same 4-byte slot, we are done.
- if (end < beg) return;
-
- // Copy right unaligned origin if that memory is poisoned.
- if (end < d + size) {
- u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
- if (o) {
- if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
- *(u32 *)MEM_TO_ORIGIN(end) = o;
- }
- }
-
- if (beg < end) {
- // Align src up.
- uptr s = ((uptr)src + 3) & ~3UL;
- // FIXME: factor out to msan_copy_origin_aligned
- if (__msan_get_track_origins() > 1) {
- u32 *src = (u32 *)MEM_TO_ORIGIN(s);
- u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
- u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
- u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
- u32 src_o = 0;
- u32 dst_o = 0;
- for (; src < src_end; ++src, ++src_s, ++dst) {
- if (!*src_s) continue;
- if (*src != src_o) {
- src_o = *src;
- dst_o = ChainOrigin(src_o, stack);
- }
- *dst = dst_o;
- }
- } else {
- REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
- end - beg);
- }
- }
-}
-
-void MovePoison(void *dst, const void *src, uptr size, StackTrace *stack) {
- if (!MEM_IS_APP(dst)) return;
- if (!MEM_IS_APP(src)) return;
- if (src == dst) return;
- REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
- (void *)MEM_TO_SHADOW((uptr)src), size);
- CopyOrigin(dst, src, size, stack);
-}
-
-void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack) {
- if (!MEM_IS_APP(dst)) return;
- if (!MEM_IS_APP(src)) return;
- REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
- (void *)MEM_TO_SHADOW((uptr)src), size);
- CopyOrigin(dst, src, size, stack);
-}
-
void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
InitializeCommonInterceptors();
INTERCEPT_FUNCTION(mmap);
- INTERCEPT_FUNCTION(mmap64);
+ MSAN_MAYBE_INTERCEPT_MMAP64;
INTERCEPT_FUNCTION(posix_memalign);
- INTERCEPT_FUNCTION(memalign);
+ MSAN_MAYBE_INTERCEPT_MEMALIGN;
INTERCEPT_FUNCTION(__libc_memalign);
INTERCEPT_FUNCTION(valloc);
- INTERCEPT_FUNCTION(pvalloc);
+ MSAN_MAYBE_INTERCEPT_PVALLOC;
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
INTERCEPT_FUNCTION(free);
- INTERCEPT_FUNCTION(cfree);
+ MSAN_MAYBE_INTERCEPT_CFREE;
INTERCEPT_FUNCTION(malloc_usable_size);
- INTERCEPT_FUNCTION(mallinfo);
- INTERCEPT_FUNCTION(mallopt);
- INTERCEPT_FUNCTION(malloc_stats);
+ MSAN_MAYBE_INTERCEPT_MALLINFO;
+ MSAN_MAYBE_INTERCEPT_MALLOPT;
+ MSAN_MAYBE_INTERCEPT_MALLOC_STATS;
INTERCEPT_FUNCTION(fread);
- INTERCEPT_FUNCTION(fread_unlocked);
+ MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED;
INTERCEPT_FUNCTION(readlink);
INTERCEPT_FUNCTION(memcpy);
INTERCEPT_FUNCTION(memccpy);
@@ -1443,84 +1491,69 @@
INTERCEPT_FUNCTION(strcpy); // NOLINT
INTERCEPT_FUNCTION(stpcpy); // NOLINT
INTERCEPT_FUNCTION(strdup);
- INTERCEPT_FUNCTION(__strdup);
+ MSAN_MAYBE_INTERCEPT___STRDUP;
INTERCEPT_FUNCTION(strndup);
- INTERCEPT_FUNCTION(__strndup);
+ MSAN_MAYBE_INTERCEPT___STRNDUP;
INTERCEPT_FUNCTION(strncpy); // NOLINT
INTERCEPT_FUNCTION(strlen);
INTERCEPT_FUNCTION(strnlen);
INTERCEPT_FUNCTION(gcvt);
INTERCEPT_FUNCTION(strcat); // NOLINT
INTERCEPT_FUNCTION(strncat); // NOLINT
- INTERCEPT_FUNCTION(strtod);
- INTERCEPT_FUNCTION(strtof);
- INTERCEPT_FUNCTION(strtold);
- INTERCEPT_FUNCTION(strtol);
- INTERCEPT_FUNCTION(strtoll);
- INTERCEPT_FUNCTION(strtoul);
- INTERCEPT_FUNCTION(strtoull);
- INTERCEPT_FUNCTION(strtod_l);
- INTERCEPT_FUNCTION(__strtod_l);
- INTERCEPT_FUNCTION(__strtod_internal);
- INTERCEPT_FUNCTION(strtof_l);
- INTERCEPT_FUNCTION(__strtof_l);
- INTERCEPT_FUNCTION(__strtof_internal);
- INTERCEPT_FUNCTION(strtold_l);
- INTERCEPT_FUNCTION(__strtold_l);
- INTERCEPT_FUNCTION(__strtold_internal);
- INTERCEPT_FUNCTION(strtol_l);
- INTERCEPT_FUNCTION(__strtol_l);
- INTERCEPT_FUNCTION(__strtol_internal);
- INTERCEPT_FUNCTION(strtoll_l);
- INTERCEPT_FUNCTION(__strtoll_l);
- INTERCEPT_FUNCTION(__strtoll_internal);
- INTERCEPT_FUNCTION(strtoul_l);
- INTERCEPT_FUNCTION(__strtoul_l);
- INTERCEPT_FUNCTION(__strtoul_internal);
- INTERCEPT_FUNCTION(strtoull_l);
- INTERCEPT_FUNCTION(__strtoull_l);
- INTERCEPT_FUNCTION(__strtoull_internal);
+ INTERCEPT_STRTO(strtod);
+ INTERCEPT_STRTO(strtof);
+ INTERCEPT_STRTO(strtold);
+ INTERCEPT_STRTO(strtol);
+ INTERCEPT_STRTO(strtoul);
+ INTERCEPT_STRTO(strtoll);
+ INTERCEPT_STRTO(strtoull);
+ INTERCEPT_STRTO(wcstod);
+ INTERCEPT_STRTO(wcstof);
+ INTERCEPT_STRTO(wcstold);
+ INTERCEPT_STRTO(wcstol);
+ INTERCEPT_STRTO(wcstoul);
+ INTERCEPT_STRTO(wcstoll);
+ INTERCEPT_STRTO(wcstoull);
INTERCEPT_FUNCTION(vswprintf);
INTERCEPT_FUNCTION(swprintf);
INTERCEPT_FUNCTION(strxfrm);
INTERCEPT_FUNCTION(strxfrm_l);
INTERCEPT_FUNCTION(strftime);
INTERCEPT_FUNCTION(strftime_l);
- INTERCEPT_FUNCTION(__strftime_l);
+ MSAN_MAYBE_INTERCEPT___STRFTIME_L;
INTERCEPT_FUNCTION(wcsftime);
INTERCEPT_FUNCTION(wcsftime_l);
- INTERCEPT_FUNCTION(__wcsftime_l);
+ MSAN_MAYBE_INTERCEPT___WCSFTIME_L;
INTERCEPT_FUNCTION(mbtowc);
INTERCEPT_FUNCTION(mbrtowc);
INTERCEPT_FUNCTION(wcslen);
INTERCEPT_FUNCTION(wcschr);
INTERCEPT_FUNCTION(wcscpy);
INTERCEPT_FUNCTION(wcscmp);
- INTERCEPT_FUNCTION(wcstod);
INTERCEPT_FUNCTION(getenv);
INTERCEPT_FUNCTION(setenv);
INTERCEPT_FUNCTION(putenv);
INTERCEPT_FUNCTION(gettimeofday);
INTERCEPT_FUNCTION(fcvt);
- INTERCEPT_FUNCTION(__fxstat);
- INTERCEPT_FUNCTION(__fxstatat);
- INTERCEPT_FUNCTION(__xstat);
- INTERCEPT_FUNCTION(__lxstat);
- INTERCEPT_FUNCTION(__fxstat64);
- INTERCEPT_FUNCTION(__fxstatat64);
- INTERCEPT_FUNCTION(__xstat64);
- INTERCEPT_FUNCTION(__lxstat64);
+ MSAN_MAYBE_INTERCEPT___FXSTAT;
+ MSAN_INTERCEPT_FSTATAT;
+ MSAN_INTERCEPT_STAT;
+ MSAN_MAYBE_INTERCEPT___LXSTAT;
+ MSAN_MAYBE_INTERCEPT___FXSTAT64;
+ MSAN_MAYBE_INTERCEPT___FXSTATAT64;
+ MSAN_MAYBE_INTERCEPT___XSTAT64;
+ MSAN_MAYBE_INTERCEPT___LXSTAT64;
INTERCEPT_FUNCTION(pipe);
INTERCEPT_FUNCTION(pipe2);
INTERCEPT_FUNCTION(socketpair);
INTERCEPT_FUNCTION(fgets);
- INTERCEPT_FUNCTION(fgets_unlocked);
+ MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED;
INTERCEPT_FUNCTION(getrlimit);
- INTERCEPT_FUNCTION(getrlimit64);
- INTERCEPT_FUNCTION(uname);
+ MSAN_MAYBE_INTERCEPT_GETRLIMIT64;
+ MSAN_INTERCEPT_UNAME;
INTERCEPT_FUNCTION(gethostname);
- INTERCEPT_FUNCTION(epoll_wait);
- INTERCEPT_FUNCTION(epoll_pwait);
+ MSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
+ MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT;
INTERCEPT_FUNCTION(recv);
INTERCEPT_FUNCTION(recvfrom);
INTERCEPT_FUNCTION(dladdr);
diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h
index 8641f81..f4d37d9 100644
--- a/lib/msan/msan_interface_internal.h
+++ b/lib/msan/msan_interface_internal.h
@@ -96,6 +96,13 @@
SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_get_origin(const void *a);
+// Test that this_id is a descendant of prev_id (or they are simply equal).
+// "descendant" here means that are part of the same chain, created with
+// __msan_chain_origin.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id);
+
+
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_clear_on_return();
diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc
index 2a970c0..6c18516 100644
--- a/lib/msan/msan_linux.cc
+++ b/lib/msan/msan_linux.cc
@@ -9,11 +9,11 @@
//
// This file is a part of MemorySanitizer.
//
-// Linux-specific code.
+// Linux- and FreeBSD-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "msan.h"
#include "msan_thread.h"
@@ -35,64 +35,107 @@
namespace __msan {
-#if defined(__mips64)
-static const uptr kMemBeg = 0xe000000000;
-static const uptr kMemEnd = 0xffffffffff;
-#elif defined(__x86_64__)
-static const uptr kMemBeg = 0x600000000000;
-static const uptr kMemEnd = 0x7fffffffffff;
-#endif
-
-static const uptr kShadowBeg = MEM_TO_SHADOW(kMemBeg);
-static const uptr kShadowEnd = MEM_TO_SHADOW(kMemEnd);
-static const uptr kBad1Beg = 0;
-static const uptr kBad1End = kShadowBeg - 1;
-static const uptr kBad2Beg = kShadowEnd + 1;
-static const uptr kBad2End = kMemBeg - 1;
-static const uptr kOriginsBeg = kBad2Beg;
-static const uptr kOriginsEnd = kBad2End;
-
-bool InitShadow(bool prot1, bool prot2, bool map_shadow, bool init_origins) {
- if ((uptr) & InitShadow < kMemBeg) {
- Printf("FATAL: Code below application range: %p < %p. Non-PIE build?\n",
- &InitShadow, (void *)kMemBeg);
- return false;
+void ReportMapRange(const char *descr, uptr beg, uptr size) {
+ if (size > 0) {
+ uptr end = beg + size - 1;
+ VPrintf(1, "%s : %p - %p\n", descr, beg, end);
}
+}
- VPrintf(1, "__msan_init %p\n", &__msan_init);
- VPrintf(1, "Memory : %p %p\n", kMemBeg, kMemEnd);
- VPrintf(1, "Bad2 : %p %p\n", kBad2Beg, kBad2End);
- VPrintf(1, "Origins : %p %p\n", kOriginsBeg, kOriginsEnd);
- VPrintf(1, "Shadow : %p %p\n", kShadowBeg, kShadowEnd);
- VPrintf(1, "Bad1 : %p %p\n", kBad1Beg, kBad1End);
-
- if (!MemoryRangeIsAvailable(kShadowBeg,
- init_origins ? kOriginsEnd : kShadowEnd) ||
- (prot1 && !MemoryRangeIsAvailable(kBad1Beg, kBad1End)) ||
- (prot2 && !MemoryRangeIsAvailable(kBad2Beg, kBad2End))) {
- Printf("FATAL: Shadow memory range is not available.\n");
- return false;
- }
-
- if (prot1 && !Mprotect(kBad1Beg, kBad1End - kBad1Beg))
- return false;
- if (prot2 && !Mprotect(kBad2Beg, kBad2End - kBad2Beg))
- return false;
- if (map_shadow) {
- void *shadow = MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg);
- if (shadow != (void*)kShadowBeg) return false;
- }
- if (init_origins) {
- void *origins = MmapFixedNoReserve(kOriginsBeg, kOriginsEnd - kOriginsBeg);
- if (origins != (void*)kOriginsBeg) return false;
+static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
+ if (size > 0) {
+ uptr end = beg + size - 1;
+ if (!MemoryRangeIsAvailable(beg, end)) {
+ Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
+ return false;
+ }
}
return true;
}
+static bool ProtectMemoryRange(uptr beg, uptr size) {
+ if (size > 0) {
+ uptr end = beg + size - 1;
+ if (!Mprotect(beg, size)) {
+ Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void CheckMemoryLayoutSanity() {
+ uptr prev_end = 0;
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+ CHECK_LT(start, end);
+ CHECK_EQ(prev_end, start);
+ CHECK(addr_is_type(start, type));
+ CHECK(addr_is_type((start + end) / 2, type));
+ CHECK(addr_is_type(end - 1, type));
+ if (type == MappingDesc::APP) {
+ uptr addr = start;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+ addr = (start + end) / 2;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+ addr = end - 1;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+ }
+ prev_end = end;
+ }
+}
+
+bool InitShadow(bool map_shadow, bool init_origins) {
+ // Let user know mapping parameters first.
+ VPrintf(1, "__msan_init %p\n", &__msan_init);
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+ VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
+ kMemoryLayout[i].end - 1);
+
+ CheckMemoryLayoutSanity();
+
+ if (!MEM_IS_APP(&__msan_init)) {
+ Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
+ (uptr)&__msan_init);
+ return false;
+ }
+
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ uptr size= end - start;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+ if ((map_shadow && type == MappingDesc::SHADOW) ||
+ (init_origins && type == MappingDesc::ORIGIN)) {
+ if (!CheckMemoryRangeAvailability(start, size)) return false;
+ if ((uptr)MmapFixedNoReserve(start, size) != start) return false;
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(start, size);
+ } else if (type == MappingDesc::INVALID) {
+ if (!CheckMemoryRangeAvailability(start, size)) return false;
+ if (!ProtectMemoryRange(start, size)) return false;
+ }
+ }
+
+ return true;
+}
+
void MsanDie() {
+ if (common_flags()->coverage)
+ __sanitizer_cov_dump();
if (death_callback)
death_callback();
- _exit(flags()->exit_code);
+ internal__exit(flags()->exit_code);
}
static void MsanAtExit(void) {
@@ -112,20 +155,26 @@
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
+
void MsanTSDInit(void (*destructor)(void *tsd)) {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
}
-void *MsanTSDGet() {
- CHECK(tsd_key_inited);
- return pthread_getspecific(tsd_key);
+static THREADLOCAL MsanThread* msan_current_thread;
+
+MsanThread *GetCurrentThread() {
+ return msan_current_thread;
}
-void MsanTSDSet(void *tsd) {
+void SetCurrentThread(MsanThread *t) {
+ // Make sure we do not reset the current MsanThread.
+ CHECK_EQ(0, msan_current_thread);
+ msan_current_thread = t;
+ // Make sure that MsanTSDDtor gets called at the end.
CHECK(tsd_key_inited);
- pthread_setspecific(tsd_key, tsd);
+ pthread_setspecific(tsd_key, (void *)t);
}
void MsanTSDDtor(void *tsd) {
@@ -135,9 +184,12 @@
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
+ msan_current_thread = nullptr;
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
MsanThread::TSDDtor(tsd);
}
} // namespace __msan
-#endif // __linux__
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/msan/msan_origin.h b/lib/msan/msan_origin.h
index a415650..36c168b 100644
--- a/lib/msan/msan_origin.h
+++ b/lib/msan/msan_origin.h
@@ -12,6 +12,9 @@
#ifndef MSAN_ORIGIN_H
#define MSAN_ORIGIN_H
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "msan_chained_origin_depot.h"
+
namespace __msan {
// Origin handling.
@@ -20,9 +23,22 @@
// the program and describes, more or less exactly, how this memory came to be
// uninitialized.
//
-// Origin ids are values of ChainedOriginDepot, which is a mapping of (stack_id,
-// prev_id) -> id, where
-// * stack_id describes an event in the program, usually a memory store.
+// There are 3 kinds of origin ids:
+// 1xxx xxxx xxxx xxxx heap origin id
+// 0000 xxxx xxxx xxxx stack origin id
+// 0zzz xxxx xxxx xxxx chained origin id
+//
+// Heap origin id describes a heap memory allocation and contains (in the xxx
+// part) a value of StackDepot.
+//
+// Stack origin id describes a stack memory allocation and contains (in the xxx
+// part) an index into StackOriginDescr and StackOriginPC. We don't store a
+// stack trace for such origins for performance reasons.
+//
+// Chained origin id describes an event of storing an uninitialized value to
+// memory. The xxx part is a value of ChainedOriginDepot, which is a mapping of
+// (stack_id, prev_id) -> id, where
+// * stack_id describes the event.
// StackDepot keeps a mapping between those and corresponding stack traces.
// * prev_id is another origin id that describes the earlier part of the
// uninitialized value history.
@@ -33,43 +49,119 @@
// points in value history marked with origin ids, and edges are events that are
// marked with stack_id.
//
-// There are 2 special root origin ids:
-// * kHeapRoot - an origin with prev_id == kHeapRoot describes an event of
-// allocating memory from heap.
-// * kStackRoot - an origin with prev_id == kStackRoot describes an event of
-// allocating memory from stack (i.e. on function entry).
-// Note that ChainedOriginDepot does not store any node for kHeapRoot or
-// kStackRoot. These are just special id values.
-//
-// Three highest bits of origin id are used to store the length (or depth) of
-// the origin chain. Special depth value of 0 means unlimited.
+// The "zzz" bits of chained origin id are used to store the length (or depth)
+// of the origin chain.
class Origin {
public:
- static const int kDepthBits = 3;
- static const int kDepthShift = 32 - kDepthBits;
- static const u32 kIdMask = ((u32)-1) >> (32 - kDepthShift);
- static const u32 kDepthMask = ~kIdMask;
+ static bool isValidId(u32 id) { return id != 0 && id != (u32)-1; }
- static const int kMaxDepth = (1 << kDepthBits) - 1;
-
- static const u32 kHeapRoot = (u32)-1;
- static const u32 kStackRoot = (u32)-2;
-
- explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
- Origin(u32 id, u32 depth) : raw_id_((depth << kDepthShift) | id) {
- CHECK_EQ(this->depth(), depth);
- CHECK_EQ(this->id(), id);
- }
- int depth() const { return raw_id_ >> kDepthShift; }
- u32 id() const { return raw_id_ & kIdMask; }
u32 raw_id() const { return raw_id_; }
- bool isStackRoot() const { return raw_id_ == kStackRoot; }
- bool isHeapRoot() const { return raw_id_ == kHeapRoot; }
- bool isValid() const { return raw_id_ != 0 && raw_id_ != (u32)-1; }
+ bool isHeapOrigin() const {
+ // 1xxx xxxx xxxx xxxx
+ return raw_id_ >> kHeapShift == 0;
+ }
+ bool isStackOrigin() const {
+ // 1000 xxxx xxxx xxxx
+ return (raw_id_ >> kDepthShift) == (1 << kDepthBits);
+ }
+ bool isChainedOrigin() const {
+ // 1zzz xxxx xxxx xxxx, zzz != 000
+ return (raw_id_ >> kDepthShift) > (1 << kDepthBits);
+ }
+ u32 getChainedId() const {
+ CHECK(isChainedOrigin());
+ return raw_id_ & kChainedIdMask;
+ }
+ u32 getStackId() const {
+ CHECK(isStackOrigin());
+ return raw_id_ & kChainedIdMask;
+ }
+ u32 getHeapId() const {
+ CHECK(isHeapOrigin());
+ return raw_id_ & kHeapIdMask;
+ }
+
+ // Returns the next origin in the chain and the current stack trace.
+ Origin getNextChainedOrigin(StackTrace *stack) const {
+ CHECK(isChainedOrigin());
+ u32 prev_id;
+ u32 stack_id = ChainedOriginDepotGet(getChainedId(), &prev_id);
+ if (stack) *stack = StackDepotGet(stack_id);
+ return Origin(prev_id);
+ }
+
+ StackTrace getStackTraceForHeapOrigin() const {
+ return StackDepotGet(getHeapId());
+ }
+
+ static Origin CreateStackOrigin(u32 id) {
+ CHECK((id & kStackIdMask) == id);
+ return Origin((1 << kHeapShift) | id);
+ }
+
+ static Origin CreateHeapOrigin(StackTrace *stack) {
+ u32 stack_id = StackDepotPut(*stack);
+ CHECK(stack_id);
+ CHECK((stack_id & kHeapIdMask) == stack_id);
+ return Origin(stack_id);
+ }
+
+ static Origin CreateChainedOrigin(Origin prev, StackTrace *stack) {
+ int depth = prev.isChainedOrigin() ? prev.depth() : 0;
+ // depth is the length of the chain minus 1.
+ // origin_history_size of 0 means unlimited depth.
+ if (flags()->origin_history_size > 0) {
+ if (depth + 1 >= flags()->origin_history_size) {
+ return prev;
+ } else {
+ ++depth;
+ CHECK(depth < (1 << kDepthBits));
+ }
+ }
+
+ StackDepotHandle h = StackDepotPut_WithHandle(*stack);
+ if (!h.valid()) return prev;
+
+ if (flags()->origin_history_per_stack_limit > 0) {
+ int use_count = h.use_count();
+ if (use_count > flags()->origin_history_per_stack_limit) return prev;
+ }
+
+ u32 chained_id;
+ bool inserted = ChainedOriginDepotPut(h.id(), prev.raw_id(), &chained_id);
+ CHECK((chained_id & kChainedIdMask) == chained_id);
+
+ if (inserted && flags()->origin_history_per_stack_limit > 0)
+ h.inc_use_count_unsafe();
+
+ return Origin((1 << kHeapShift) | (depth << kDepthShift) | chained_id);
+ }
+
+ static Origin FromRawId(u32 id) {
+ return Origin(id);
+ }
private:
+ static const int kDepthBits = 3;
+ static const int kDepthShift = 32 - kDepthBits - 1;
+
+ static const int kHeapShift = 31;
+ static const u32 kChainedIdMask = ((u32)-1) >> (32 - kDepthShift);
+ static const u32 kStackIdMask = ((u32)-1) >> (32 - kDepthShift);
+ static const u32 kHeapIdMask = ((u32)-1) >> (32 - kHeapShift);
+
u32 raw_id_;
+
+ explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
+
+ int depth() const {
+ CHECK(isChainedOrigin());
+ return (raw_id_ >> kDepthShift) & ((1 << kDepthBits) - 1);
+ }
+
+ public:
+ static const int kMaxDepth = (1 << kDepthBits) - 1;
};
} // namespace __msan
diff --git a/lib/msan/msan_poisoning.cc b/lib/msan/msan_poisoning.cc
new file mode 100644
index 0000000..96411fd
--- /dev/null
+++ b/lib/msan/msan_poisoning.cc
@@ -0,0 +1,174 @@
+//===-- msan_poisoning.cc ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "msan_poisoning.h"
+
+#include "interception/interception.h"
+#include "msan_origin.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memmove, void *dest, const void *src, uptr n)
+
+namespace __msan {
+
+u32 GetOriginIfPoisoned(uptr addr, uptr size) {
+ unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
+ for (uptr i = 0; i < size; ++i)
+ if (s[i]) return *(u32 *)SHADOW_TO_ORIGIN(((uptr)s + i) & ~3UL);
+ return 0;
+}
+
+void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
+ u32 src_origin) {
+ uptr dst_s = MEM_TO_SHADOW(addr);
+ uptr src_s = src_shadow;
+ uptr src_s_end = src_s + size;
+
+ for (; src_s < src_s_end; ++dst_s, ++src_s)
+ if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s & ~3UL) = src_origin;
+}
+
+void CopyOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
+
+ uptr d = (uptr)dst;
+ uptr beg = d & ~3UL;
+ // Copy left unaligned origin if that memory is poisoned.
+ if (beg < d) {
+ u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
+ if (o) {
+ if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
+ *(u32 *)MEM_TO_ORIGIN(beg) = o;
+ }
+ beg += 4;
+ }
+
+ uptr end = (d + size) & ~3UL;
+ // If both ends fall into the same 4-byte slot, we are done.
+ if (end < beg) return;
+
+ // Copy right unaligned origin if that memory is poisoned.
+ if (end < d + size) {
+ u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
+ if (o) {
+ if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
+ *(u32 *)MEM_TO_ORIGIN(end) = o;
+ }
+ }
+
+ if (beg < end) {
+ // Align src up.
+ uptr s = ((uptr)src + 3) & ~3UL;
+ // FIXME: factor out to msan_copy_origin_aligned
+ if (__msan_get_track_origins() > 1) {
+ u32 *src = (u32 *)MEM_TO_ORIGIN(s);
+ u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
+ u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
+ u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
+ u32 src_o = 0;
+ u32 dst_o = 0;
+ for (; src < src_end; ++src, ++src_s, ++dst) {
+ if (!*src_s) continue;
+ if (*src != src_o) {
+ src_o = *src;
+ dst_o = ChainOrigin(src_o, stack);
+ }
+ *dst = dst_o;
+ }
+ } else {
+ REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
+ end - beg);
+ }
+ }
+}
+
+void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst)) return;
+ if (!MEM_IS_APP(src)) return;
+ if (src == dst) return;
+ REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
+ (void *)MEM_TO_SHADOW((uptr)src), size);
+ if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+}
+
+void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst)) return;
+ if (!MEM_IS_APP(src)) return;
+ REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
+ (void *)MEM_TO_SHADOW((uptr)src), size);
+ if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+}
+
+void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack) {
+ REAL(memcpy)(dst, src, size);
+ CopyShadowAndOrigin(dst, src, size, stack);
+}
+
+void SetShadow(const void *ptr, uptr size, u8 value) {
+ uptr PageSize = GetPageSizeCached();
+ uptr shadow_beg = MEM_TO_SHADOW(ptr);
+ uptr shadow_end = MEM_TO_SHADOW((uptr)ptr + size);
+ if (value ||
+ shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void *)shadow_beg, value, shadow_end - shadow_beg);
+ } else {
+ uptr page_beg = RoundUpTo(shadow_beg, PageSize);
+ uptr page_end = RoundDownTo(shadow_end, PageSize);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ MmapFixedNoReserve(page_beg, page_end - page_beg);
+ }
+ }
+}
+
+void SetOrigin(const void *dst, uptr size, u32 origin) {
+ // Origin mapping is 4 bytes per 4 bytes of application memory.
+ // Here we extend the range such that its left and right bounds are both
+ // 4 byte aligned.
+ uptr x = MEM_TO_ORIGIN((uptr)dst);
+ uptr beg = x & ~3UL; // align down.
+ uptr end = (x + size + 3) & ~3UL; // align up.
+ u64 origin64 = ((u64)origin << 32) | origin;
+ // This is like memset, but the value is 32-bit. We unroll by 2 to write
+ // 64 bits at once. May want to unroll further to get 128-bit stores.
+ if (beg & 7ULL) {
+ *(u32 *)beg = origin;
+ beg += 4;
+ }
+ for (uptr addr = beg; addr < (end & ~7UL); addr += 8) *(u64 *)addr = origin64;
+ if (end & 7ULL) *(u32 *)(end - 4) = origin;
+}
+
+void PoisonMemory(const void *dst, uptr size, StackTrace *stack) {
+ SetShadow(dst, size, (u8)-1);
+
+ if (__msan_get_track_origins()) {
+ Origin o = Origin::CreateHeapOrigin(stack);
+ SetOrigin(dst, size, o.raw_id());
+ }
+}
+
+} // namespace __msan
diff --git a/lib/msan/msan_poisoning.h b/lib/msan/msan_poisoning.h
new file mode 100644
index 0000000..edacbee
--- /dev/null
+++ b/lib/msan/msan_poisoning.h
@@ -0,0 +1,59 @@
+//===-- msan_poisoning.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_POISONING_H
+#define MSAN_POISONING_H
+
+#include "msan.h"
+
+namespace __msan {
+
+// Return origin for the first poisoned byte in the memory range, or 0.
+u32 GetOriginIfPoisoned(uptr addr, uptr size);
+
+// Walk [addr, addr+size) app memory region, copying origin tags from the
+// corresponding positions in [src_origin, src_origin+size) where the
+// corresponding shadow in [src_shadow, src_shadow+size) is non-zero.
+void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
+
+// Copy origin from src (app address) to dst (app address), creating chained
+// origin ids as necessary, without overriding origin for fully initialized
+// quads.
+void CopyOrigin(const void *dst, const void *src, uptr size, StackTrace *stack);
+
+// memmove() shadow and origin. Dst and src are application addresses.
+// See CopyOrigin() for the origin copying logic.
+void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack);
+
+// memcpy() shadow and origin. Dst and src are application addresses.
+// See CopyOrigin() for the origin copying logic.
+void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack);
+
+// memcpy() app memory, and do "the right thing" to the corresponding shadow and
+// origin regions.
+void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack);
+
+// Fill shadow will value. Ptr is an application address.
+void SetShadow(const void *ptr, uptr size, u8 value);
+
+// Set origin for the memory region.
+void SetOrigin(const void *dst, uptr size, u32 origin);
+
+// Mark memory region uninitialized, with origins.
+void PoisonMemory(const void *dst, uptr size, StackTrace *stack);
+
+} // namespace __msan
+
+#endif // MSAN_POISONING_H
diff --git a/lib/msan/msan_report.cc b/lib/msan/msan_report.cc
index f4978c7..33c28b2 100644
--- a/lib/msan/msan_report.cc
+++ b/lib/msan/msan_report.cc
@@ -61,34 +61,38 @@
static void DescribeOrigin(u32 id) {
VPrintf(1, " raw origin id: %d\n", id);
Decorator d;
- while (true) {
- Origin o(id);
- if (!o.isValid()) {
- Printf(" %sinvalid origin id(%d)%s\n", d.Warning(), id, d.End());
- break;
+ Origin o = Origin::FromRawId(id);
+ while (o.isChainedOrigin()) {
+ StackTrace stack;
+ o = o.getNextChainedOrigin(&stack);
+ Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
+ d.End());
+ stack.Print();
+ }
+ if (o.isStackOrigin()) {
+ uptr pc;
+ const char *so = GetStackOriginDescr(o.getStackId(), &pc);
+ DescribeStackOrigin(so, pc);
+ } else {
+ StackTrace stack = o.getStackTraceForHeapOrigin();
+ switch (stack.tag) {
+ case StackTrace::TAG_ALLOC:
+ Printf(" %sUninitialized value was created by a heap allocation%s\n",
+ d.Origin(), d.End());
+ break;
+ case StackTrace::TAG_DEALLOC:
+ Printf(" %sUninitialized value was created by a heap deallocation%s\n",
+ d.Origin(), d.End());
+ break;
+ case STACK_TRACE_TAG_POISON:
+ Printf(" %sMemory was marked as uninitialized%s\n", d.Origin(),
+ d.End());
+ break;
+ default:
+ Printf(" %sUninitialized value was created%s\n", d.Origin(), d.End());
+ break;
}
- u32 prev_id;
- u32 stack_id = ChainedOriginDepotGet(o.id(), &prev_id);
- Origin prev_o(prev_id);
-
- if (prev_o.isStackRoot()) {
- uptr pc;
- const char *so = GetStackOriginDescr(stack_id, &pc);
- DescribeStackOrigin(so, pc);
- break;
- } else if (prev_o.isHeapRoot()) {
- Printf(" %sUninitialized value was created by a heap allocation%s\n",
- d.Origin(), d.End());
- StackDepotGet(stack_id).Print();
- break;
- } else {
- // chained origin
- // FIXME: copied? modified? passed through? observed?
- Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
- d.End());
- StackDepotGet(stack_id).Print();
- id = prev_id;
- }
+ stack.Print();
}
}
@@ -266,7 +270,7 @@
Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
d.End());
- if (__sanitizer::common_flags()->verbosity > 0)
+ if (__sanitizer::Verbosity())
DescribeMemoryRange(start, size);
}
diff --git a/lib/msan/msan_thread.cc b/lib/msan/msan_thread.cc
index 2a1e05a..e15a247 100644
--- a/lib/msan/msan_thread.cc
+++ b/lib/msan/msan_thread.cc
@@ -36,6 +36,7 @@
if (tls_begin_ != tls_end_)
__msan_unpoison((void *)tls_begin_, tls_end_ - tls_begin_);
DTLS *dtls = DTLS_Get();
+ CHECK_NE(dtls, 0);
for (uptr i = 0; i < dtls->dtv_size; ++i)
__msan_unpoison((void *)(dtls->dtv[i].beg), dtls->dtv[i].size);
}
@@ -78,15 +79,4 @@
return res;
}
-MsanThread *GetCurrentThread() {
- return reinterpret_cast<MsanThread *>(MsanTSDGet());
-}
-
-void SetCurrentThread(MsanThread *t) {
- // Make sure we do not reset the current MsanThread.
- CHECK_EQ(0, MsanTSDGet());
- MsanTSDSet(t);
- CHECK_EQ(t, MsanTSDGet());
-}
-
} // namespace __msan
diff --git a/lib/msan/tests/CMakeLists.txt b/lib/msan/tests/CMakeLists.txt
index f3c11ba..e008bd3 100644
--- a/lib/msan/tests/CMakeLists.txt
+++ b/lib/msan/tests/CMakeLists.txt
@@ -15,12 +15,11 @@
set(MSAN_LOADABLE_SOURCE msan_loadable.cc)
set(MSAN_UNITTEST_HEADERS
msan_test_config.h
- msandr_test_so.h
../../../include/sanitizer/msan_interface.h
)
-set(MSANDR_UNITTEST_SOURCE msandr_test_so.cc)
set(MSAN_UNITTEST_COMMON_CFLAGS
-I${COMPILER_RT_LIBCXX_PATH}/include
+ ${COMPILER_RT_TEST_CFLAGS}
${COMPILER_RT_GTEST_CFLAGS}
-I${COMPILER_RT_SOURCE_DIR}/include
-I${COMPILER_RT_SOURCE_DIR}/lib
@@ -113,25 +112,13 @@
msan_compile(MSAN_INST_LOADABLE_OBJECTS ${MSAN_LOADABLE_SOURCE} ${arch} "${kind}"
${MSAN_UNITTEST_INSTRUMENTED_CFLAGS} ${ARGN})
- # Uninstrumented shared object for MSanDR tests.
- set(MSANDR_TEST_OBJECTS)
- msan_compile(MSANDR_TEST_OBJECTS ${MSANDR_UNITTEST_SOURCE} ${arch} "${kind}"
- ${MSAN_UNITTEST_COMMON_CFLAGS})
-
# Instrumented loadable library tests.
set(MSAN_LOADABLE_SO)
msan_link_shared(MSAN_LOADABLE_SO "libmsan_loadable" ${arch} "${kind}"
OBJECTS ${MSAN_INST_LOADABLE_OBJECTS}
DEPS ${MSAN_INST_LOADABLE_OBJECTS})
- # Uninstrumented shared library tests.
- set(MSANDR_TEST_SO)
- msan_link_shared(MSANDR_TEST_SO "libmsandr_test" ${arch} "${kind}"
- OBJECTS ${MSANDR_TEST_OBJECTS}
- DEPS ${MSANDR_TEST_OBJECTS})
-
- set(MSAN_TEST_OBJECTS ${MSAN_INST_TEST_OBJECTS} ${MSAN_INST_GTEST}
- ${MSANDR_TEST_SO})
+ set(MSAN_TEST_OBJECTS ${MSAN_INST_TEST_OBJECTS} ${MSAN_INST_GTEST})
set(MSAN_TEST_DEPS ${MSAN_TEST_OBJECTS} libcxx_msan${kind}
${MSAN_LOADABLE_SO})
if(NOT COMPILER_RT_STANDALONE_BUILD)
@@ -149,9 +136,9 @@
# We should only build MSan unit tests if we can build instrumented libcxx.
if(COMPILER_RT_CAN_EXECUTE_TESTS AND COMPILER_RT_HAS_LIBCXX_SOURCES)
- if(CAN_TARGET_x86_64)
- add_msan_tests_for_arch(x86_64 "")
- add_msan_tests_for_arch(x86_64 "-with-call"
+ foreach(arch ${MSAN_SUPPORTED_ARCH})
+ add_msan_tests_for_arch(${arch} "")
+ add_msan_tests_for_arch(${arch} "-with-call"
-mllvm -msan-instrumentation-with-call-threshold=0)
- endif()
+ endforeach()
endif()
diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc
index 12012a0..00dd20a 100644
--- a/lib/msan/tests/msan_test.cc
+++ b/lib/msan/tests/msan_test.cc
@@ -20,7 +20,19 @@
#include "sanitizer/allocator_interface.h"
#include "sanitizer/msan_interface.h"
-#include "msandr_test_so.h"
+
+#if defined(__FreeBSD__)
+# define _KERNEL // To declare 'shminfo' structure.
+# include <sys/shm.h>
+# undef _KERNEL
+extern "C" {
+// <sys/shm.h> doesn't declare these functions in _KERNEL mode.
+void *shmat(int, const void *, int);
+int shmget(key_t, size_t, int);
+int shmctl(int, int, struct shmid_ds *);
+int shmdt(const void *);
+}
+#endif
#include <inttypes.h>
#include <stdlib.h>
@@ -28,7 +40,6 @@
#include <stdio.h>
#include <wchar.h>
#include <math.h>
-#include <malloc.h>
#include <arpa/inet.h>
#include <dlfcn.h>
@@ -44,20 +55,40 @@
#include <sys/resource.h>
#include <sys/ioctl.h>
#include <sys/statvfs.h>
-#include <sys/sysinfo.h>
#include <sys/utsname.h>
#include <sys/mman.h>
-#include <sys/vfs.h>
#include <dirent.h>
#include <pwd.h>
#include <sys/socket.h>
#include <netdb.h>
#include <wordexp.h>
-#include <mntent.h>
-#include <netinet/ether.h>
#include <sys/ipc.h>
#include <sys/shm.h>
+#if !defined(__FreeBSD__)
+# include <malloc.h>
+# include <sys/sysinfo.h>
+# include <sys/vfs.h>
+# include <mntent.h>
+# include <netinet/ether.h>
+#else
+# include <signal.h>
+# include <netinet/in.h>
+# include <pthread_np.h>
+# include <sys/uio.h>
+# include <sys/mount.h>
+# include <sys/sysctl.h>
+# include <net/ethernet.h>
+# define f_namelen f_namemax // FreeBSD names this statfs field so.
+# define cpu_set_t cpuset_t
+extern "C" {
+// FreeBSD's <ssp/string.h> defines mempcpy() to be a macro expanding into
+// a __builtin___mempcpy_chk() call, but since Msan RTL defines it as an
+// ordinary function, we can declare it here to complete the tests.
+void *mempcpy(void *dest, const void *src, size_t n);
+}
+#endif
+
#if defined(__i386__) || defined(__x86_64__)
# include <emmintrin.h>
# define MSAN_HAS_M128 1
@@ -69,7 +100,23 @@
# include <immintrin.h>
#endif
-static const size_t kPageSize = 4096;
+// On FreeBSD procfs is not enabled by default.
+#if defined(__FreeBSD__)
+# define FILE_TO_READ "/bin/cat"
+# define DIR_TO_READ "/bin"
+# define SUBFILE_TO_READ "cat"
+# define SYMLINK_TO_READ "/usr/bin/tar"
+# define SUPERUSER_GROUP "wheel"
+#else
+# define FILE_TO_READ "/proc/self/stat"
+# define DIR_TO_READ "/proc/self"
+# define SUBFILE_TO_READ "stat"
+# define SYMLINK_TO_READ "/proc/self/exe"
+# define SUPERUSER_GROUP "root"
+#endif
+
+const size_t kPageSize = 4096;
+const size_t kMaxPathLength = 4096;
typedef unsigned char U1;
typedef unsigned short U2; // NOLINT
@@ -87,9 +134,12 @@
__msan_set_origin(&x, sizeof(x), 0x1234);
U4 origin = __msan_get_origin(&x);
__msan_set_origin(&x, sizeof(x), 0);
- return origin == 0x1234;
+ return __msan_origin_is_descendant_or_same(origin, 0x1234);
}
+#define EXPECT_ORIGIN(expected, origin) \
+ EXPECT_TRUE(__msan_origin_is_descendant_or_same((origin), (expected)))
+
#define EXPECT_UMR(action) \
do { \
__msan_set_expect_umr(1); \
@@ -97,14 +147,13 @@
__msan_set_expect_umr(0); \
} while (0)
-#define EXPECT_UMR_O(action, origin) \
- do { \
- __msan_set_expect_umr(1); \
- action; \
- __msan_set_expect_umr(0); \
- if (TrackingOrigins()) \
- EXPECT_EQ(origin, __msan_get_umr_origin()); \
- } while (0)
+#define EXPECT_UMR_O(action, origin) \
+ do { \
+ __msan_set_expect_umr(1); \
+ action; \
+ __msan_set_expect_umr(0); \
+ if (TrackingOrigins()) EXPECT_ORIGIN(origin, __msan_get_umr_origin()); \
+ } while (0)
#define EXPECT_POISONED(x) ExpectPoisoned(x)
@@ -119,15 +168,14 @@
template<typename T>
void ExpectPoisonedWithOrigin(const T& t, unsigned origin) {
EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t)));
- if (TrackingOrigins())
- EXPECT_EQ(origin, __msan_get_origin((void*)&t));
+ if (TrackingOrigins()) EXPECT_ORIGIN(origin, __msan_get_origin((void *)&t));
}
-#define EXPECT_NOT_POISONED(x) ExpectNotPoisoned(x)
+#define EXPECT_NOT_POISONED(x) EXPECT_EQ(true, TestForNotPoisoned((x)))
template<typename T>
-void ExpectNotPoisoned(const T& t) {
- EXPECT_EQ(-1, __msan_test_shadow((void*)&t, sizeof(t)));
+bool TestForNotPoisoned(const T& t) {
+ return __msan_test_shadow((void*)&t, sizeof(t)) == -1;
}
static U8 poisoned_array[100];
@@ -494,10 +542,9 @@
TEST(MemorySanitizer, DynRet) {
ReturnPoisoned<S8>();
- EXPECT_NOT_POISONED(clearenv());
+ EXPECT_NOT_POISONED(atoi("0"));
}
-
TEST(MemorySanitizer, DynRet1) {
ReturnPoisoned<S8>();
}
@@ -552,7 +599,7 @@
TEST(MemorySanitizer, strerror_r) {
errno = 0;
char buf[1000];
- char *res = strerror_r(EINVAL, buf, sizeof(buf));
+ char *res = (char*) (size_t) strerror_r(EINVAL, buf, sizeof(buf));
ASSERT_EQ(0, errno);
if (!res) res = buf; // POSIX version success.
EXPECT_NOT_POISONED(strlen(res));
@@ -560,7 +607,7 @@
TEST(MemorySanitizer, fread) {
char *x = new char[32];
- FILE *f = fopen("/proc/self/stat", "r");
+ FILE *f = fopen(FILE_TO_READ, "r");
ASSERT_TRUE(f != NULL);
fread(x, 1, 32, f);
EXPECT_NOT_POISONED(x[0]);
@@ -572,7 +619,7 @@
TEST(MemorySanitizer, read) {
char *x = new char[32];
- int fd = open("/proc/self/stat", O_RDONLY);
+ int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = read(fd, x, 32);
ASSERT_EQ(sz, 32);
@@ -585,7 +632,7 @@
TEST(MemorySanitizer, pread) {
char *x = new char[32];
- int fd = open("/proc/self/stat", O_RDONLY);
+ int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = pread(fd, x, 32, 0);
ASSERT_EQ(sz, 32);
@@ -603,11 +650,11 @@
iov[0].iov_len = 5;
iov[1].iov_base = buf + 10;
iov[1].iov_len = 2000;
- int fd = open("/proc/self/stat", O_RDONLY);
+ int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = readv(fd, iov, 2);
ASSERT_GE(sz, 0);
- ASSERT_LT(sz, 5 + 2000);
+ ASSERT_LE(sz, 5 + 2000);
ASSERT_GT((size_t)sz, iov[0].iov_len);
EXPECT_POISONED(buf[0]);
EXPECT_NOT_POISONED(buf[1]);
@@ -627,11 +674,11 @@
iov[0].iov_len = 5;
iov[1].iov_base = buf + 10;
iov[1].iov_len = 2000;
- int fd = open("/proc/self/stat", O_RDONLY);
+ int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = preadv(fd, iov, 2, 3);
ASSERT_GE(sz, 0);
- ASSERT_LT(sz, 5 + 2000);
+ ASSERT_LE(sz, 5 + 2000);
ASSERT_GT((size_t)sz, iov[0].iov_len);
EXPECT_POISONED(buf[0]);
EXPECT_NOT_POISONED(buf[1]);
@@ -653,15 +700,14 @@
TEST(MemorySanitizer, readlink) {
char *x = new char[1000];
- readlink("/proc/self/exe", x, 1000);
+ readlink(SYMLINK_TO_READ, x, 1000);
EXPECT_NOT_POISONED(x[0]);
delete [] x;
}
-
TEST(MemorySanitizer, stat) {
struct stat* st = new struct stat;
- int res = stat("/proc/self/stat", st);
+ int res = stat(FILE_TO_READ, st);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(st->st_dev);
EXPECT_NOT_POISONED(st->st_mode);
@@ -670,9 +716,9 @@
TEST(MemorySanitizer, fstatat) {
struct stat* st = new struct stat;
- int dirfd = open("/proc/self", O_RDONLY);
+ int dirfd = open(DIR_TO_READ, O_RDONLY);
ASSERT_GT(dirfd, 0);
- int res = fstatat(dirfd, "stat", st, 0);
+ int res = fstatat(dirfd, SUBFILE_TO_READ, st, 0);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(st->st_dev);
EXPECT_NOT_POISONED(st->st_mode);
@@ -764,6 +810,8 @@
close(pipefd[1]);
}
+// There is no ppoll() on FreeBSD.
+#if !defined (__FreeBSD__)
TEST(MemorySanitizer, ppoll) {
int* pipefd = new int[2];
int res = pipe(pipefd);
@@ -788,6 +836,7 @@
close(pipefd[0]);
close(pipefd[1]);
}
+#endif
TEST(MemorySanitizer, poll_positive) {
int* pipefd = new int[2];
@@ -852,8 +901,11 @@
res = fcntl(connect_socket, F_SETFL, O_NONBLOCK);
ASSERT_EQ(0, res);
res = connect(connect_socket, (struct sockaddr *)&sai, sizeof(sai));
- ASSERT_EQ(-1, res);
- ASSERT_EQ(EINPROGRESS, errno);
+ // On FreeBSD this connection completes immediately.
+ if (res != 0) {
+ ASSERT_EQ(-1, res);
+ ASSERT_EQ(EINPROGRESS, errno);
+ }
__msan_poison(&sai, sizeof(sai));
int new_sock = accept(listen_socket, (struct sockaddr *)&sai, &sz);
@@ -974,7 +1026,6 @@
ASSERT_EQ(0, res);
ASSERT_EQ(sizeof(client_sai), sz);
-
const char *s = "message text";
struct iovec iov;
iov.iov_base = (void *)s;
@@ -1126,12 +1177,15 @@
free(res);
}
+// There's no get_current_dir_name() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, get_current_dir_name) {
char* res = get_current_dir_name();
ASSERT_TRUE(res != NULL);
EXPECT_NOT_POISONED(res[0]);
free(res);
}
+#endif
TEST(MemorySanitizer, shmctl) {
int id = shmget(IPC_PRIVATE, 4096, 0644 | IPC_CREAT);
@@ -1142,6 +1196,8 @@
ASSERT_GT(res, -1);
EXPECT_NOT_POISONED(ds);
+ // FreeBSD does not support shmctl(IPC_INFO) and shmctl(SHM_INFO).
+#if !defined(__FreeBSD__)
struct shminfo si;
res = shmctl(id, IPC_INFO, (struct shmid_ds *)&si);
ASSERT_GT(res, -1);
@@ -1151,6 +1207,7 @@
res = shmctl(id, SHM_INFO, (struct shmid_ds *)&s_i);
ASSERT_GT(res, -1);
EXPECT_NOT_POISONED(s_i);
+#endif
res = shmctl(id, IPC_RMID, 0);
ASSERT_GT(res, -1);
@@ -1158,7 +1215,7 @@
TEST(MemorySanitizer, shmat) {
void *p = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, p);
((char *)p)[10] = *GetPoisoned<U1>();
@@ -1184,6 +1241,8 @@
ASSERT_GT(res, -1);
}
+// There's no random_r() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, random_r) {
int32_t x;
char z[64];
@@ -1199,6 +1258,7 @@
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(x);
}
+#endif
TEST(MemorySanitizer, confstr) {
char buf[3];
@@ -1216,6 +1276,16 @@
ASSERT_EQ(res, strlen(buf2) + 1);
}
+TEST(MemorySanitizer, opendir) {
+ DIR *dir = opendir(".");
+ closedir(dir);
+
+ char name[10] = ".";
+ __msan_poison(name, sizeof(name));
+ EXPECT_UMR(dir = opendir(name));
+ closedir(dir);
+}
+
TEST(MemorySanitizer, readdir) {
DIR *dir = opendir(".");
struct dirent *d = readdir(dir);
@@ -1252,6 +1322,8 @@
free(res);
}
+// There's no canonicalize_file_name() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, canonicalize_file_name) {
const char* relpath = ".";
char* res = canonicalize_file_name(relpath);
@@ -1259,6 +1331,7 @@
EXPECT_NOT_POISONED(res[0]);
free(res);
}
+#endif
extern char **environ;
@@ -1539,55 +1612,74 @@
EXPECT_POISONED(a[7]);
}
-#define TEST_STRTO_INT(func_name) \
- TEST(MemorySanitizer, func_name) { \
- char *e; \
- EXPECT_EQ(1U, func_name("1", &e, 10)); \
- EXPECT_NOT_POISONED((S8)e); \
+#define TEST_STRTO_INT(func_name, char_type, str_prefix) \
+ TEST(MemorySanitizer, func_name) { \
+ char_type *e; \
+ EXPECT_EQ(1U, func_name(str_prefix##"1", &e, 10)); \
+ EXPECT_NOT_POISONED((S8)e); \
}
-#define TEST_STRTO_FLOAT(func_name) \
- TEST(MemorySanitizer, func_name) { \
- char *e; \
- EXPECT_NE(0, func_name("1.5", &e)); \
- EXPECT_NOT_POISONED((S8)e); \
+#define TEST_STRTO_FLOAT(func_name, char_type, str_prefix) \
+ TEST(MemorySanitizer, func_name) { \
+ char_type *e; \
+ EXPECT_NE(0, func_name(str_prefix##"1.5", &e)); \
+ EXPECT_NOT_POISONED((S8)e); \
}
-#define TEST_STRTO_FLOAT_LOC(func_name) \
+#define TEST_STRTO_FLOAT_LOC(func_name, char_type, str_prefix) \
TEST(MemorySanitizer, func_name) { \
locale_t loc = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); \
- char *e; \
- EXPECT_NE(0, func_name("1.5", &e, loc)); \
+ char_type *e; \
+ EXPECT_NE(0, func_name(str_prefix##"1.5", &e, loc)); \
EXPECT_NOT_POISONED((S8)e); \
freelocale(loc); \
}
-#define TEST_STRTO_INT_LOC(func_name) \
+#define TEST_STRTO_INT_LOC(func_name, char_type, str_prefix) \
TEST(MemorySanitizer, func_name) { \
locale_t loc = newlocale(LC_NUMERIC_MASK, "C", (locale_t)0); \
- char *e; \
- ASSERT_EQ(1U, func_name("1", &e, 10, loc)); \
+ char_type *e; \
+ ASSERT_EQ(1U, func_name(str_prefix##"1", &e, 10, loc)); \
EXPECT_NOT_POISONED((S8)e); \
freelocale(loc); \
}
-TEST_STRTO_INT(strtol)
-TEST_STRTO_INT(strtoll)
-TEST_STRTO_INT(strtoul)
-TEST_STRTO_INT(strtoull)
+TEST_STRTO_INT(strtol, char, )
+TEST_STRTO_INT(strtoll, char, )
+TEST_STRTO_INT(strtoul, char, )
+TEST_STRTO_INT(strtoull, char, )
-TEST_STRTO_FLOAT(strtof)
-TEST_STRTO_FLOAT(strtod)
-TEST_STRTO_FLOAT(strtold)
+TEST_STRTO_FLOAT(strtof, char, )
+TEST_STRTO_FLOAT(strtod, char, )
+TEST_STRTO_FLOAT(strtold, char, )
-TEST_STRTO_FLOAT_LOC(strtof_l)
-TEST_STRTO_FLOAT_LOC(strtod_l)
-TEST_STRTO_FLOAT_LOC(strtold_l)
+TEST_STRTO_FLOAT_LOC(strtof_l, char, )
+TEST_STRTO_FLOAT_LOC(strtod_l, char, )
+TEST_STRTO_FLOAT_LOC(strtold_l, char, )
-TEST_STRTO_INT_LOC(strtol_l)
-TEST_STRTO_INT_LOC(strtoll_l)
-TEST_STRTO_INT_LOC(strtoul_l)
-TEST_STRTO_INT_LOC(strtoull_l)
+TEST_STRTO_INT_LOC(strtol_l, char, )
+TEST_STRTO_INT_LOC(strtoll_l, char, )
+TEST_STRTO_INT_LOC(strtoul_l, char, )
+TEST_STRTO_INT_LOC(strtoull_l, char, )
+
+TEST_STRTO_INT(wcstol, wchar_t, L)
+TEST_STRTO_INT(wcstoll, wchar_t, L)
+TEST_STRTO_INT(wcstoul, wchar_t, L)
+TEST_STRTO_INT(wcstoull, wchar_t, L)
+
+TEST_STRTO_FLOAT(wcstof, wchar_t, L)
+TEST_STRTO_FLOAT(wcstod, wchar_t, L)
+TEST_STRTO_FLOAT(wcstold, wchar_t, L)
+
+TEST_STRTO_FLOAT_LOC(wcstof_l, wchar_t, L)
+TEST_STRTO_FLOAT_LOC(wcstod_l, wchar_t, L)
+TEST_STRTO_FLOAT_LOC(wcstold_l, wchar_t, L)
+
+TEST_STRTO_INT_LOC(wcstol_l, wchar_t, L)
+TEST_STRTO_INT_LOC(wcstoll_l, wchar_t, L)
+TEST_STRTO_INT_LOC(wcstoul_l, wchar_t, L)
+TEST_STRTO_INT_LOC(wcstoull_l, wchar_t, L)
+
TEST(MemorySanitizer, strtoimax) {
char *e;
@@ -1603,12 +1695,20 @@
#ifdef __GLIBC__
extern "C" float __strtof_l(const char *nptr, char **endptr, locale_t loc);
-TEST_STRTO_FLOAT_LOC(__strtof_l)
+TEST_STRTO_FLOAT_LOC(__strtof_l, char, )
extern "C" double __strtod_l(const char *nptr, char **endptr, locale_t loc);
-TEST_STRTO_FLOAT_LOC(__strtod_l)
+TEST_STRTO_FLOAT_LOC(__strtod_l, char, )
extern "C" long double __strtold_l(const char *nptr, char **endptr,
locale_t loc);
-TEST_STRTO_FLOAT_LOC(__strtold_l)
+TEST_STRTO_FLOAT_LOC(__strtold_l, char, )
+
+extern "C" float __wcstof_l(const wchar_t *nptr, wchar_t **endptr, locale_t loc);
+TEST_STRTO_FLOAT_LOC(__wcstof_l, wchar_t, L)
+extern "C" double __wcstod_l(const wchar_t *nptr, wchar_t **endptr, locale_t loc);
+TEST_STRTO_FLOAT_LOC(__wcstod_l, wchar_t, L)
+extern "C" long double __wcstold_l(const wchar_t *nptr, wchar_t **endptr,
+ locale_t loc);
+TEST_STRTO_FLOAT_LOC(__wcstold_l, wchar_t, L)
#endif // __GLIBC__
TEST(MemorySanitizer, modf) {
@@ -1629,26 +1729,35 @@
EXPECT_NOT_POISONED(y);
}
+// There's no sincos() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sincos) {
double s, c;
sincos(0.2, &s, &c);
EXPECT_NOT_POISONED(s);
EXPECT_NOT_POISONED(c);
}
+#endif
+// There's no sincosf() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sincosf) {
float s, c;
sincosf(0.2, &s, &c);
EXPECT_NOT_POISONED(s);
EXPECT_NOT_POISONED(c);
}
+#endif
+// There's no sincosl() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sincosl) {
long double s, c;
sincosl(0.2, &s, &c);
EXPECT_NOT_POISONED(s);
EXPECT_NOT_POISONED(c);
}
+#endif
TEST(MemorySanitizer, remquo) {
int quo;
@@ -1703,13 +1812,18 @@
EXPECT_NOT_POISONED(sgn);
}
+// There's no lgammal_r() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, lgammal_r) {
int sgn;
long double res = lgammal_r(1.1, &sgn);
ASSERT_NE(0.0, res);
EXPECT_NOT_POISONED(sgn);
}
+#endif
+// There's no drand48_r() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, drand48_r) {
struct drand48_data buf;
srand48_r(0, &buf);
@@ -1717,7 +1831,10 @@
drand48_r(&buf, &d);
EXPECT_NOT_POISONED(d);
}
+#endif
+// There's no lrand48_r() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, lrand48_r) {
struct drand48_data buf;
srand48_r(0, &buf);
@@ -1725,6 +1842,7 @@
lrand48_r(&buf, &d);
EXPECT_NOT_POISONED(d);
}
+#endif
TEST(MemorySanitizer, sprintf) { // NOLINT
char buff[10];
@@ -1834,6 +1952,16 @@
EXPECT_POISONED(buff[2]);
}
+TEST(MemorySanitizer, wmemset) {
+ wchar_t x[25];
+ break_optimization(x);
+ EXPECT_POISONED(x[0]);
+ wmemset(x, L'A', 10);
+ EXPECT_EQ(x[0], L'A');
+ EXPECT_EQ(x[9], L'A');
+ EXPECT_POISONED(x[10]);
+}
+
TEST(MemorySanitizer, mbtowc) {
const char *x = "abc";
wchar_t wx;
@@ -1979,6 +2107,8 @@
EXPECT_NE(0U, strlen(time.tm_zone));
}
+// There's no getmntent() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, getmntent) {
FILE *fp = setmntent("/etc/fstab", "r");
struct mntent *mnt = getmntent(fp);
@@ -1991,7 +2121,10 @@
EXPECT_NOT_POISONED(mnt->mnt_passno);
fclose(fp);
}
+#endif
+// There's no getmntent_r() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, getmntent_r) {
FILE *fp = setmntent("/etc/fstab", "r");
struct mntent mntbuf;
@@ -2006,6 +2139,7 @@
EXPECT_NOT_POISONED(mnt->mnt_passno);
fclose(fp);
}
+#endif
TEST(MemorySanitizer, ether) {
const char *asc = "11:22:33:44:55:66";
@@ -2045,6 +2179,8 @@
}
}
+// There's no fcvt() on FreeBSD.
+#if !defined(__FreeBSD__)
// FIXME: enable and add ecvt.
// FIXME: check why msandr does nt handle fcvt.
TEST(MemorySanitizer, fcvt) {
@@ -2060,7 +2196,10 @@
EXPECT_NOT_POISONED(str[0]);
ASSERT_NE(0U, strlen(str));
}
+#endif
+// There's no fcvt_long() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fcvt_long) {
int a, b;
break_optimization(&a);
@@ -2074,7 +2213,7 @@
EXPECT_NOT_POISONED(str[0]);
ASSERT_NE(0U, strlen(str));
}
-
+#endif
TEST(MemorySanitizer, memchr) {
char x[10];
@@ -2676,9 +2815,20 @@
EXPECT_NOT_POISONED(usage.ru_nivcsw);
}
-#ifdef __GLIBC__
-extern char *program_invocation_name;
-#else // __GLIBC__
+#if defined(__FreeBSD__)
+static void GetProgramPath(char *buf, size_t sz) {
+ int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+ int res = sysctl(mib, 4, buf, &sz, NULL, 0);
+ ASSERT_EQ(0, res);
+}
+#elif defined(__GLIBC__)
+static void GetProgramPath(char *buf, size_t sz) {
+ extern char *program_invocation_name;
+ int res = snprintf(buf, sz, "%s", program_invocation_name);
+ ASSERT_GE(res, 0);
+ ASSERT_LT((size_t)res, sz);
+}
+#else
# error "TODO: port this"
#endif
@@ -2713,21 +2863,29 @@
// Compute the path to our loadable DSO. We assume it's in the same
// directory. Only use string routines that we intercept so far to do this.
-static int PathToLoadable(char *buf, size_t sz) {
- const char *basename = "libmsan_loadable.x86_64.so";
- char *argv0 = program_invocation_name;
- char *last_slash = strrchr(argv0, '/');
- assert(last_slash);
- int res =
- snprintf(buf, sz, "%.*s/%s", int(last_slash - argv0), argv0, basename);
- assert(res >= 0);
- return (size_t)res < sz ? 0 : res;
+static void GetPathToLoadable(char *buf, size_t sz) {
+ char program_path[kMaxPathLength];
+ GetProgramPath(program_path, sizeof(program_path));
+
+ const char *last_slash = strrchr(program_path, '/');
+ ASSERT_NE(nullptr, last_slash);
+ size_t dir_len = (size_t)(last_slash - program_path);
+#if defined(__x86_64__)
+ static const char basename[] = "libmsan_loadable.x86_64.so";
+#elif defined(__MIPSEB__) || defined(MIPSEB)
+ static const char basename[] = "libmsan_loadable.mips64.so";
+#elif defined(__mips64)
+ static const char basename[] = "libmsan_loadable.mips64el.so";
+#endif
+ int res = snprintf(buf, sz, "%.*s/%s",
+ (int)dir_len, program_path, basename);
+ ASSERT_GE(res, 0);
+ ASSERT_LT((size_t)res, sz);
}
TEST(MemorySanitizer, dl_iterate_phdr) {
- char path[4096];
- int res = PathToLoadable(path, sizeof(path));
- ASSERT_EQ(0, res);
+ char path[kMaxPathLength];
+ GetPathToLoadable(path, sizeof(path));
// Having at least one dlopen'ed library in the process makes this more
// entertaining.
@@ -2737,15 +2895,13 @@
int count = 0;
int result = dl_iterate_phdr(dl_phdr_callback, &count);
ASSERT_GT(count, 0);
-
+
dlclose(lib);
}
-
TEST(MemorySanitizer, dlopen) {
- char path[4096];
- int res = PathToLoadable(path, sizeof(path));
- ASSERT_EQ(0, res);
+ char path[kMaxPathLength];
+ GetPathToLoadable(path, sizeof(path));
// We need to clear shadow for globals when doing dlopen. In order to test
// this, we have to poison the shadow for the DSO before we load it. In
@@ -2770,19 +2926,22 @@
// Regression test for a crash in dlopen() interceptor.
TEST(MemorySanitizer, dlopenFailed) {
- const char *path = "/libmsan_loadable_does_not_exist.x86_64.so";
+ const char *path = "/libmsan_loadable_does_not_exist.so";
void *lib = dlopen(path, RTLD_LAZY);
ASSERT_TRUE(lib == NULL);
}
#endif // MSAN_TEST_DISABLE_DLOPEN
+// There's no sched_getaffinity() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sched_getaffinity) {
cpu_set_t mask;
int res = sched_getaffinity(getpid(), sizeof(mask), &mask);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(mask);
}
+#endif
TEST(MemorySanitizer, scanf) {
const char *input = "42 hello";
@@ -3012,11 +3171,14 @@
free(p);
}
+// There's no memalign() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, memalign) {
void *p = memalign(4096, 13);
EXPECT_EQ(0U, (uintptr_t)p % kPageSize);
free(p);
}
+#endif
TEST(MemorySanitizer, valloc) {
void *a = valloc(100);
@@ -3024,6 +3186,8 @@
free(a);
}
+// There's no pvalloc() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, pvalloc) {
void *p = pvalloc(kPageSize + 100);
EXPECT_EQ(0U, (uintptr_t)p % kPageSize);
@@ -3035,6 +3199,7 @@
EXPECT_EQ(kPageSize, __sanitizer_get_allocated_size(p));
free(p);
}
+#endif
TEST(MemorySanitizer, inet_pton) {
const char *s = "1:0:0:0:0:0:0:8";
@@ -3078,12 +3243,15 @@
EXPECT_NOT_POISONED(strlen(buf));
}
+// There's no sysinfo() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sysinfo) {
struct sysinfo info;
int res = sysinfo(&info);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(info);
}
+#endif
TEST(MemorySanitizer, getpwuid) {
struct passwd *p = getpwuid(0); // root
@@ -3138,8 +3306,10 @@
struct group grp;
struct group *grpres;
char buf[10000];
- int res = getgrnam_r("root", &grp, buf, sizeof(buf), &grpres);
+ int res = getgrnam_r(SUPERUSER_GROUP, &grp, buf, sizeof(buf), &grpres);
ASSERT_EQ(0, res);
+ // Note that getgrnam_r() returns 0 if the matching group is not found.
+ ASSERT_NE(nullptr, grpres);
EXPECT_NOT_POISONED(grp.gr_name);
ASSERT_TRUE(grp.gr_name != NULL);
EXPECT_NOT_POISONED(grp.gr_name[0]);
@@ -3171,6 +3341,8 @@
EXPECT_NOT_POISONED(pwdres);
}
+// There's no fgetpwent() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fgetpwent) {
FILE *fp = fopen("/etc/passwd", "r");
struct passwd *p = fgetpwent(fp);
@@ -3181,6 +3353,7 @@
EXPECT_NOT_POISONED(p->pw_uid);
fclose(fp);
}
+#endif
TEST(MemorySanitizer, getgrent) {
setgrent();
@@ -3192,6 +3365,8 @@
EXPECT_NOT_POISONED(p->gr_gid);
}
+// There's no fgetgrent() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fgetgrent) {
FILE *fp = fopen("/etc/group", "r");
struct group *grp = fgetgrent(fp);
@@ -3206,6 +3381,7 @@
}
fclose(fp);
}
+#endif
TEST(MemorySanitizer, getgrent_r) {
struct group grp;
@@ -3221,6 +3397,8 @@
EXPECT_NOT_POISONED(grpres);
}
+// There's no fgetgrent_r() on FreeBSD.
+#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fgetgrent_r) {
FILE *fp = fopen("/etc/group", "r");
struct group grp;
@@ -3236,6 +3414,7 @@
EXPECT_NOT_POISONED(grpres);
fclose(fp);
}
+#endif
TEST(MemorySanitizer, getgroups) {
int n = getgroups(0, 0);
@@ -3363,7 +3542,7 @@
}
TEST(MemorySanitizer, UnalignedLoad) {
- char x[32];
+ char x[32] __attribute__((aligned(8)));
U4 origin = __LINE__;
for (unsigned i = 0; i < sizeof(x) / 4; ++i)
__msan_set_origin(x + 4 * i, 4, origin + i);
@@ -3397,7 +3576,7 @@
}
TEST(MemorySanitizer, UnalignedStore16) {
- char x[5];
+ char x[5] __attribute__((aligned(4)));
U2 y2 = 0;
U4 origin = __LINE__;
__msan_poison(&y2, 1);
@@ -3408,11 +3587,10 @@
EXPECT_POISONED_O(x[1], origin);
EXPECT_NOT_POISONED(x[2]);
EXPECT_POISONED_O(x[3], origin);
- EXPECT_POISONED_O(x[4], origin);
}
TEST(MemorySanitizer, UnalignedStore32) {
- char x[8];
+ char x[8] __attribute__((aligned(4)));
U4 y4 = 0;
U4 origin = __LINE__;
__msan_poison(&y4, 2);
@@ -3430,7 +3608,7 @@
}
TEST(MemorySanitizer, UnalignedStore64) {
- char x[16];
+ char x[16] __attribute__((aligned(8)));
U8 y8 = 0;
U4 origin = __LINE__;
__msan_poison(&y8, 3);
@@ -3453,7 +3631,7 @@
}
TEST(MemorySanitizer, UnalignedStore16_precise) {
- char x[8];
+ char x[8] __attribute__((aligned(4)));
U2 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@@ -3476,7 +3654,7 @@
}
TEST(MemorySanitizer, UnalignedStore16_precise2) {
- char x[8];
+ char x[8] __attribute__((aligned(4)));
U2 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@@ -3499,7 +3677,7 @@
}
TEST(MemorySanitizer, UnalignedStore64_precise) {
- char x[12];
+ char x[12] __attribute__((aligned(8)));
U8 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@@ -3531,7 +3709,7 @@
}
TEST(MemorySanitizer, UnalignedStore64_precise2) {
- char x[12];
+ char x[12] __attribute__((aligned(8)));
U8 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@@ -3561,7 +3739,7 @@
EXPECT_POISONED_O(x[11], originx3);
}
-#if defined(__clang__)
+#if (defined(__x86_64__) && defined(__clang__))
namespace {
typedef U1 V16x8 __attribute__((__vector_size__(16)));
typedef U2 V8x16 __attribute__((__vector_size__(16)));
@@ -3725,15 +3903,15 @@
#endif // defined(__clang__)
TEST(MemorySanitizerOrigins, SetGet) {
- EXPECT_EQ(TrackingOrigins(), __msan_get_track_origins());
+ EXPECT_EQ(TrackingOrigins(), !!__msan_get_track_origins());
if (!TrackingOrigins()) return;
int x;
__msan_set_origin(&x, sizeof(x), 1234);
- EXPECT_EQ(1234U, __msan_get_origin(&x));
+ EXPECT_ORIGIN(1234U, __msan_get_origin(&x));
__msan_set_origin(&x, sizeof(x), 5678);
- EXPECT_EQ(5678U, __msan_get_origin(&x));
+ EXPECT_ORIGIN(5678U, __msan_get_origin(&x));
__msan_set_origin(&x, sizeof(x), 0);
- EXPECT_EQ(0U, __msan_get_origin(&x));
+ EXPECT_ORIGIN(0U, __msan_get_origin(&x));
}
namespace {
@@ -3743,19 +3921,18 @@
U2 b;
};
-// http://code.google.com/p/memory-sanitizer/issues/detail?id=6
-TEST(MemorySanitizerOrigins, DISABLED_InitializedStoreDoesNotChangeOrigin) {
+TEST(MemorySanitizerOrigins, InitializedStoreDoesNotChangeOrigin) {
if (!TrackingOrigins()) return;
S s;
U4 origin = rand(); // NOLINT
s.a = *GetPoisonedO<U2>(0, origin);
- EXPECT_EQ(origin, __msan_get_origin(&s.a));
- EXPECT_EQ(origin, __msan_get_origin(&s.b));
+ EXPECT_ORIGIN(origin, __msan_get_origin(&s.a));
+ EXPECT_ORIGIN(origin, __msan_get_origin(&s.b));
s.b = 42;
- EXPECT_EQ(origin, __msan_get_origin(&s.a));
- EXPECT_EQ(origin, __msan_get_origin(&s.b));
+ EXPECT_ORIGIN(origin, __msan_get_origin(&s.a));
+ EXPECT_ORIGIN(origin, __msan_get_origin(&s.b));
}
} // namespace
@@ -3771,7 +3948,8 @@
*z = op(*x, *y);
U4 origin = __msan_get_origin(z);
EXPECT_POISONED_O(*z, origin);
- EXPECT_EQ(true, origin == ox || origin == oy);
+ EXPECT_EQ(true, __msan_origin_is_descendant_or_same(origin, ox) ||
+ __msan_origin_is_descendant_or_same(origin, oy));
// y is poisoned, x is not.
*x = 10101;
@@ -3780,7 +3958,7 @@
__msan_set_origin(z, sizeof(*z), 0);
*z = op(*x, *y);
EXPECT_POISONED_O(*z, oy);
- EXPECT_EQ(__msan_get_origin(z), oy);
+ EXPECT_ORIGIN(oy, __msan_get_origin(z));
// x is poisoned, y is not.
*x = *GetPoisonedO<T>(0, ox);
@@ -3789,7 +3967,7 @@
__msan_set_origin(z, sizeof(*z), 0);
*z = op(*x, *y);
EXPECT_POISONED_O(*z, ox);
- EXPECT_EQ(__msan_get_origin(z), ox);
+ EXPECT_ORIGIN(ox, __msan_get_origin(z));
}
template<class T> INLINE T XOR(const T &a, const T&b) { return a ^ b; }
@@ -4081,7 +4259,8 @@
// Allocate the page that was released to the OS in free() with the real mmap,
// bypassing the interceptor.
- char *q = (char *)real_mmap(p, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ char *q = (char *)real_mmap(p, 4096, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE((char *)0, q);
ASSERT_TRUE(q <= p);
diff --git a/lib/msan/tests/msandr_test_so.cc b/lib/msan/tests/msandr_test_so.cc
deleted file mode 100644
index eb605d4..0000000
--- a/lib/msan/tests/msandr_test_so.cc
+++ /dev/null
@@ -1,38 +0,0 @@
-//===-- msandr_test_so.cc ------------------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of MemorySanitizer.
-//
-// MemorySanitizer unit tests.
-//===----------------------------------------------------------------------===//
-
-#include "msandr_test_so.h"
-
-void dso_memfill(char* s, unsigned n) {
- for (unsigned i = 0; i < n; ++i)
- s[i] = i;
-}
-
-int dso_callfn(int (*fn)(void)) {
- volatile int x = fn();
- return x;
-}
-
-int dso_callfn1(int (*fn)(long long, long long, long long)) { //NOLINT
- volatile int x = fn(1, 2, 3);
- return x;
-}
-
-int dso_stack_store(void (*fn)(int*, int*), int x) {
- int y = x + 1;
- fn(&x, &y);
- return y;
-}
-
-void break_optimization(void *x) {}
diff --git a/lib/msan/tests/msandr_test_so.h b/lib/msan/tests/msandr_test_so.h
deleted file mode 100644
index cd75ff3..0000000
--- a/lib/msan/tests/msandr_test_so.h
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- msandr_test_so.h ----------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of MemorySanitizer.
-//
-// MemorySanitizer unit tests.
-//===----------------------------------------------------------------------===//
-
-#ifndef MSANDR_MSANDR_TEST_SO_H
-#define MSANDR_MSANDR_TEST_SO_H
-
-void dso_memfill(char* s, unsigned n);
-int dso_callfn(int (*fn)(void));
-int dso_callfn1(int (*fn)(long long, long long, long long)); //NOLINT
-int dso_stack_store(void (*fn)(int*, int*), int x);
-void break_optimization(void *x);
-
-#endif
diff --git a/lib/profile/InstrProfiling.h b/lib/profile/InstrProfiling.h
index a086f3d..2b1bd00 100644
--- a/lib/profile/InstrProfiling.h
+++ b/lib/profile/InstrProfiling.h
@@ -57,9 +57,6 @@
uint64_t *__llvm_profile_begin_counters(void);
uint64_t *__llvm_profile_end_counters(void);
-#define PROFILE_RANGE_SIZE(Range) \
- (__llvm_profile_end_ ## Range () - __llvm_profile_begin_ ## Range ())
-
/*!
* \brief Write instrumentation data to the current file.
*
diff --git a/lib/profile/InstrProfilingBuffer.c b/lib/profile/InstrProfilingBuffer.c
index 3351b07..3c429c8 100644
--- a/lib/profile/InstrProfilingBuffer.c
+++ b/lib/profile/InstrProfilingBuffer.c
@@ -8,17 +8,38 @@
\*===----------------------------------------------------------------------===*/
#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
#include <string.h>
__attribute__((visibility("hidden")))
uint64_t __llvm_profile_get_size_for_buffer(void) {
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+ const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *NamesBegin = __llvm_profile_begin_names();
+ const char *NamesEnd = __llvm_profile_end_names();
+
+ return __llvm_profile_get_size_for_buffer_internal(
+ DataBegin, DataEnd, CountersBegin, CountersEnd, NamesBegin, NamesEnd);
+}
+
+#define PROFILE_RANGE_SIZE(Range) (Range##End - Range##Begin)
+
+__attribute__((visibility("hidden")))
+uint64_t __llvm_profile_get_size_for_buffer_internal(
+ const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+ const uint64_t *CountersEnd, const char *NamesBegin,
+ const char *NamesEnd) {
/* Match logic in __llvm_profile_write_buffer(). */
- const uint64_t NamesSize = PROFILE_RANGE_SIZE(names) * sizeof(char);
+ const uint64_t NamesSize = PROFILE_RANGE_SIZE(Names) * sizeof(char);
const uint64_t Padding = sizeof(uint64_t) - NamesSize % sizeof(uint64_t);
return sizeof(uint64_t) * PROFILE_HEADER_SIZE +
- PROFILE_RANGE_SIZE(data) * sizeof(__llvm_profile_data) +
- PROFILE_RANGE_SIZE(counters) * sizeof(uint64_t) +
- NamesSize + Padding;
+ PROFILE_RANGE_SIZE(Data) * sizeof(__llvm_profile_data) +
+ PROFILE_RANGE_SIZE(Counters) * sizeof(uint64_t) +
+ NamesSize + Padding;
}
__attribute__((visibility("hidden")))
@@ -33,6 +54,20 @@
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
+ return __llvm_profile_write_buffer_internal(Buffer, DataBegin, DataEnd,
+ CountersBegin, CountersEnd,
+ NamesBegin, NamesEnd);
+}
+
+__attribute__((visibility("hidden")))
+int __llvm_profile_write_buffer_internal(
+ char *Buffer, const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+ const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
+ /* Match logic in __llvm_profile_get_size_for_buffer().
+ * Match logic in __llvm_profile_write_file().
+ */
+
/* Calculate size of sections. */
const uint64_t DataSize = DataEnd - DataBegin;
const uint64_t CountersSize = CountersEnd - CountersBegin;
diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c
index 5aef390..daa3094 100644
--- a/lib/profile/InstrProfilingFile.c
+++ b/lib/profile/InstrProfilingFile.c
@@ -11,6 +11,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/errno.h>
#define UNCONST(ptr) ((void *)(uintptr_t)(ptr))
@@ -84,12 +85,15 @@
}
static void truncateCurrentFile(void) {
- const char *Filename = __llvm_profile_CurrentFilename;
+ const char *Filename;
+ FILE *File;
+
+ Filename = __llvm_profile_CurrentFilename;
if (!Filename || !Filename[0])
return;
/* Truncate the file. Later we'll reopen and append. */
- FILE *File = fopen(Filename, "w");
+ File = fopen(Filename, "w");
if (!File)
return;
fclose(File);
@@ -100,15 +104,16 @@
int getpid(void);
static int setFilenameFromEnvironment(void) {
const char *Filename = getenv("LLVM_PROFILE_FILE");
+#define MAX_PID_SIZE 16
+ char PidChars[MAX_PID_SIZE] = {0};
+ int NumPids = 0, PidLength = 0;
+ char *Allocated;
+ int I, J;
+
if (!Filename || !Filename[0])
return -1;
/* Check the filename for "%p", which indicates a pid-substitution. */
-#define MAX_PID_SIZE 16
- char PidChars[MAX_PID_SIZE] = {0};
- int NumPids = 0;
- int PidLength = 0;
- int I;
for (I = 0; Filename[I]; ++I)
if (Filename[I] == '%' && Filename[++I] == 'p')
if (!NumPids++) {
@@ -122,12 +127,11 @@
}
/* Allocate enough space for the substituted filename. */
- char *Allocated = (char*)malloc(I + NumPids*(PidLength - 2) + 1);
+ Allocated = malloc(I + NumPids*(PidLength - 2) + 1);
if (!Allocated)
return -1;
/* Construct the new filename. */
- int J;
for (I = 0, J = 0; Filename[I]; ++I)
if (Filename[I] == '%') {
if (Filename[++I] == 'p') {
@@ -170,12 +174,18 @@
__attribute__((visibility("hidden")))
int __llvm_profile_write_file(void) {
+ int rc;
+
/* Check the filename. */
if (!__llvm_profile_CurrentFilename)
return -1;
/* Write the file. */
- return writeFileWithName(__llvm_profile_CurrentFilename);
+ rc = writeFileWithName(__llvm_profile_CurrentFilename);
+ if (rc && getenv("LLVM_PROFILE_VERBOSE_ERRORS"))
+ fprintf(stderr, "LLVM Profile: Failed to write file \"%s\": %s\n",
+ __llvm_profile_CurrentFilename, strerror(errno));
+ return rc;
}
static void writeFileWithoutReturn(void) {
diff --git a/lib/profile/InstrProfilingInternal.h b/lib/profile/InstrProfilingInternal.h
new file mode 100644
index 0000000..ede39cd
--- /dev/null
+++ b/lib/profile/InstrProfilingInternal.h
@@ -0,0 +1,40 @@
+/*===- InstrProfiling.h- Support library for PGO instrumentation ----------===*\
+|*
+|* The LLVM Compiler Infrastructure
+|*
+|* This file is distributed under the University of Illinois Open Source
+|* License. See LICENSE.TXT for details.
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef PROFILE_INSTRPROFILING_INTERNALH_
+#define PROFILE_INSTRPROFILING_INTERNALH_
+
+#include "InstrProfiling.h"
+
+/*!
+ * \brief Write instrumentation data to the given buffer, given explicit
+ * pointers to the live data in memory. This function is probably not what you
+ * want. Use __llvm_profile_get_size_for_buffer instead. Use this function if
+ * your program has a custom memory layout.
+ */
+uint64_t __llvm_profile_get_size_for_buffer_internal(
+ const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
+ const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+ const char *NamesBegin, const char *NamesEnd);
+
+/*!
+ * \brief Write instrumentation data to the given buffer, given explicit
+ * pointers to the live data in memory. This function is probably not what you
+ * want. Use __llvm_profile_write_buffer instead. Use this function if your
+ * program has a custom memory layout.
+ *
+ * \pre \c Buffer is the start of a buffer at least as big as \a
+ * __llvm_profile_get_size_for_buffer_internal().
+ */
+int __llvm_profile_write_buffer_internal(
+ char *Buffer, const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+ const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd);
+
+#endif
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index fe4418c..6eb6ca8 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -7,6 +7,7 @@
sanitizer_deadlock_detector1.cc
sanitizer_deadlock_detector2.cc
sanitizer_flags.cc
+ sanitizer_flag_parser.cc
sanitizer_libc.cc
sanitizer_libignore.cc
sanitizer_linux.cc
@@ -63,7 +64,10 @@
sanitizer_common_syscalls.inc
sanitizer_deadlock_detector.h
sanitizer_deadlock_detector_interface.h
+ sanitizer_flag_parser.h
sanitizer_flags.h
+ sanitizer_flags.inc
+ sanitizer_interface_internal.h
sanitizer_internal_defs.h
sanitizer_lfstack.h
sanitizer_libc.h
@@ -105,11 +109,10 @@
set(SANITIZER_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_no_rtti_flag(SANITIZER_CFLAGS)
-# Stack frames on PowerPC are much larger than anticipated.
-if(NOT ${LLVM_NATIVE_ARCH} STREQUAL "PowerPC")
- append_list_if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG -Wframe-larger-than=512 SANITIZER_CFLAGS)
-endif()
-append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors SANITIZER_CFLAGS)
+append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=512
+ SANITIZER_CFLAGS)
+append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
+ SANITIZER_CFLAGS)
add_custom_target(sanitizer_common)
set(SANITIZER_RUNTIME_LIBRARIES)
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index 47509f8..03b3e83 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -14,7 +14,6 @@
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
namespace __sanitizer {
@@ -61,7 +60,7 @@
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init();
+ internal_allocator_instance->Init(/* may_return_null*/ false);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -140,14 +139,12 @@
return (max / size) < n;
}
-void *AllocatorReturnNull() {
- if (common_flags()->allocator_may_return_null)
- return 0;
+void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
- return 0;
+ Die();
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 2321801..b5105f8 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -23,8 +23,8 @@
namespace __sanitizer {
-// Depending on allocator_may_return_null either return 0 or crash.
-void *AllocatorReturnNull();
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull();
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
@@ -211,6 +211,7 @@
void Init() {
internal_memset(this, 0, sizeof(*this));
}
+ void InitLinkerInitialized() {}
void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
@@ -240,11 +241,14 @@
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void InitLinkerInitialized() {
next_ = this;
prev_ = this;
}
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
@@ -1002,9 +1006,14 @@
template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
+ void InitLinkerInitialized(bool may_return_null) {
page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -1012,7 +1021,9 @@
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
- if (map_size < size) return AllocatorReturnNull(); // Overflow.
+ // Overflow.
+ if (map_size < size)
+ return ReturnNullOrDie();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_));
@@ -1048,6 +1059,16 @@
return reinterpret_cast<void*>(res);
}
+ void *ReturnNullOrDie() {
+ if (atomic_load(&may_return_null_, memory_order_acquire))
+ return 0;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@@ -1226,6 +1247,7 @@
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
+ atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
@@ -1239,19 +1261,32 @@
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void Init() {
+ void InitCommon(bool may_return_null) {
primary_.Init();
- secondary_.Init();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(bool may_return_null) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null);
+ }
+
+ void Init(bool may_return_null) {
+ secondary_.Init(may_return_null);
stats_.Init();
+ InitCommon(may_return_null);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false) {
+ bool cleared = false, bool check_rss_limit = false) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size)
- return AllocatorReturnNull();
+ return ReturnNullOrDie();
+ if (check_rss_limit && RssLimitIsExceeded())
+ return ReturnNullOrDie();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@@ -1267,6 +1302,30 @@
return res;
}
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDie() {
+ if (MayReturnNull())
+ return 0;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
@@ -1379,6 +1438,8 @@
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
};
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index 4409fd6..9b9cfd0 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -49,6 +49,15 @@
void InternalFree(void *p, InternalAllocatorCache *cache = 0);
InternalAllocator *internal_allocator();
+enum InternalAllocEnum {
+ INTERNAL_ALLOC
+};
+
} // namespace __sanitizer
+inline void *operator new(__sanitizer::operator_new_size_type size,
+ InternalAllocEnum) {
+ return InternalAlloc(size);
+}
+
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index c77e50e..4be3c7a 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -12,13 +12,17 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool";
+atomic_uint32_t current_verbosity;
+
uptr GetPageSizeCached() {
static uptr PageSize;
if (!PageSize)
@@ -26,19 +30,66 @@
return PageSize;
}
+StaticSpinMutex report_file_mu;
+ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
-// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid|
-// isn't equal to the current PID, try to obtain file descriptor by opening
-// file "report_path_prefix.<PID>".
-fd_t report_fd = kStderrFd;
+void RawWrite(const char *buffer) {
+ report_file.Write(buffer, internal_strlen(buffer));
+}
-// Set via __sanitizer_set_report_path.
-bool log_to_file = false;
-char report_path_prefix[sizeof(report_path_prefix)];
+void ReportFile::ReopenIfNecessary() {
+ mu->CheckLocked();
+ if (fd == kStdoutFd || fd == kStderrFd) return;
-// PID of process that opened |report_fd|. If a fork() occurs, the PID of the
-// child thread will be different from |report_fd_pid|.
-uptr report_fd_pid = 0;
+ uptr pid = internal_getpid();
+ // If in tracer, use the parent's file.
+ if (pid == stoptheworld_tracer_pid)
+ pid = stoptheworld_tracer_ppid;
+ if (fd != kInvalidFd) {
+ // If the report file is already opened by the current process,
+ // do nothing. Otherwise the report file was opened by the parent
+ // process, close it now.
+ if (fd_pid == pid)
+ return;
+ else
+ internal_close(fd);
+ }
+
+ internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
+ uptr openrv = OpenFile(full_path, true);
+ if (internal_iserror(openrv)) {
+ const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+ internal_write(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ internal_write(kStderrFd, full_path, internal_strlen(full_path));
+ Die();
+ }
+ fd = openrv;
+ fd_pid = pid;
+}
+
+void ReportFile::SetReportPath(const char *path) {
+ if (!path)
+ return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+
+ SpinMutexLock l(mu);
+ if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
+ internal_close(fd);
+ fd = kInvalidFd;
+ if (internal_strcmp(path, "stdout") == 0) {
+ fd = kStdoutFd;
+ } else if (internal_strcmp(path, "stderr") == 0) {
+ fd = kStderrFd;
+ } else {
+ internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ }
+}
// PID of the tracer task in StopTheWorld. It shares the address space with the
// main process, but has a different PID and thus requires special handling.
@@ -47,19 +98,23 @@
// writing to the same log file.
uptr stoptheworld_tracer_ppid = 0;
-static DieCallbackType DieCallback;
+static DieCallbackType InternalDieCallback, UserDieCallback;
void SetDieCallback(DieCallbackType callback) {
- DieCallback = callback;
+ InternalDieCallback = callback;
+}
+void SetUserDieCallback(DieCallbackType callback) {
+ UserDieCallback = callback;
}
DieCallbackType GetDieCallback() {
- return DieCallback;
+ return InternalDieCallback;
}
void NORETURN Die() {
- if (DieCallback) {
- DieCallback();
- }
+ if (UserDieCallback)
+ UserDieCallback();
+ if (InternalDieCallback)
+ InternalDieCallback();
internal__exit(1);
}
@@ -78,8 +133,8 @@
Die();
}
-uptr ReadFileToBuffer(const char *file_name, char **buff,
- uptr *buff_size, uptr max_len) {
+uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr max_len, int *errno_p) {
uptr PageSize = GetPageSizeCached();
uptr kMinFileLen = PageSize;
uptr read_len = 0;
@@ -88,7 +143,7 @@
// The files we usually open are not seekable, so try different buffer sizes.
for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
uptr openrv = OpenFile(file_name, /*write*/ false);
- if (internal_iserror(openrv)) return 0;
+ if (internal_iserror(openrv, errno_p)) return 0;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
*buff = (char*)MmapOrDie(size, __func__);
@@ -98,6 +153,10 @@
bool reached_eof = false;
while (read_len + PageSize <= size) {
uptr just_read = internal_read(fd, *buff + read_len, PageSize);
+ if (internal_iserror(just_read, errno_p)) {
+ UnmapOrDie(*buff, *buff_size);
+ return 0;
+ }
if (just_read == 0) {
reached_eof = true;
break;
@@ -166,9 +225,8 @@
void ReportErrorSummary(const char *error_message) {
if (!common_flags()->print_summary)
return;
- InternalScopedBuffer<char> buff(kMaxSummaryLength);
- internal_snprintf(buff.data(), buff.size(),
- "SUMMARY: %s: %s", SanitizerToolName, error_message);
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
}
@@ -176,31 +234,39 @@
int line, const char *function) {
if (!common_flags()->print_summary)
return;
- InternalScopedBuffer<char> buff(kMaxSummaryLength);
- internal_snprintf(
- buff.data(), buff.size(), "%s %s:%d %s", error_type,
- file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??",
- line, function ? function : "??");
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("%s %s:%d %s", error_type,
+ file ? StripPathPrefix(file, common_flags()->strip_path_prefix)
+ : "??",
+ line, function ? function : "??");
ReportErrorSummary(buff.data());
}
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
full_name_ = internal_strdup(module_name);
base_address_ = base_address;
- n_ranges_ = 0;
+ ranges_.clear();
+}
+
+void LoadedModule::clear() {
+ InternalFree(full_name_);
+ while (!ranges_.empty()) {
+ AddressRange *r = ranges_.front();
+ ranges_.pop_front();
+ InternalFree(r);
+ }
}
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
- CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
- ranges_[n_ranges_].beg = beg;
- ranges_[n_ranges_].end = end;
- exec_[n_ranges_] = executable;
- n_ranges_++;
+ void *mem = InternalAlloc(sizeof(AddressRange));
+ AddressRange *r = new(mem) AddressRange(beg, end, executable);
+ ranges_.push_back(r);
}
bool LoadedModule::containsAddress(uptr address) const {
- for (uptr i = 0; i < n_ranges_; i++) {
- if (ranges_[i].beg <= address && address < ranges_[i].end)
+ for (Iterator iter = ranges(); iter.hasNext();) {
+ const AddressRange *r = iter.next();
+ if (r->beg <= address && address < r->end)
return true;
}
return false;
@@ -212,12 +278,9 @@
if (!common_flags()->mmap_limit_mb) return;
uptr total_mmaped =
atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
- if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) {
- // Since for now mmap_limit_mb is not a user-facing flag, just CHECK.
- uptr mmap_limit_mb = common_flags()->mmap_limit_mb;
- common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK.
- RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb);
- }
+ // Since for now mmap_limit_mb is not a user-facing flag, just kill
+ // a program. Use RAW_CHECK to avoid extra mmaps in reporting.
+ RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
}
void DecreaseTotalMmap(uptr size) {
@@ -225,39 +288,63 @@
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
}
+bool TemplateMatch(const char *templ, const char *str) {
+ if (str == 0 || str[0] == 0)
+ return false;
+ bool start = false;
+ if (templ && templ[0] == '^') {
+ start = true;
+ templ++;
+ }
+ bool asterisk = false;
+ while (templ && templ[0]) {
+ if (templ[0] == '*') {
+ templ++;
+ start = false;
+ asterisk = true;
+ continue;
+ }
+ if (templ[0] == '$')
+ return str[0] == 0 || asterisk;
+ if (str[0] == 0)
+ return false;
+ char *tpos = (char*)internal_strchr(templ, '*');
+ char *tpos1 = (char*)internal_strchr(templ, '$');
+ if (tpos == 0 || (tpos1 && tpos1 < tpos))
+ tpos = tpos1;
+ if (tpos != 0)
+ tpos[0] = 0;
+ const char *str0 = str;
+ const char *spos = internal_strstr(str, templ);
+ str = spos + internal_strlen(templ);
+ templ = tpos;
+ if (tpos)
+ tpos[0] = tpos == tpos1 ? '$' : '*';
+ if (spos == 0)
+ return false;
+ if (start && spos != str0)
+ return false;
+ start = false;
+ asterisk = false;
+ }
+ return true;
+}
+
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
extern "C" {
void __sanitizer_set_report_path(const char *path) {
- if (!path)
- return;
- uptr len = internal_strlen(path);
- if (len > sizeof(report_path_prefix) - 100) {
- Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
- path[0], path[1], path[2], path[3],
- path[4], path[5], path[6], path[7]);
- Die();
- }
- if (report_fd != kStdoutFd &&
- report_fd != kStderrFd &&
- report_fd != kInvalidFd)
- internal_close(report_fd);
- report_fd = kInvalidFd;
- log_to_file = false;
- if (internal_strcmp(path, "stdout") == 0) {
- report_fd = kStdoutFd;
- } else if (internal_strcmp(path, "stderr") == 0) {
- report_fd = kStderrFd;
- } else {
- internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix));
- report_path_prefix[len] = '\0';
- log_to_file = true;
- }
+ report_file.SetReportPath(path);
}
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index c1e2101..ff13ef1 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
+// This file is shared between run-time libraries of sanitizers.
+//
// It declares common functions and classes that are used in both runtimes.
// Implementation of some functions are provided in sanitizer_common, while
// others must be defined by run-time library itself.
@@ -16,10 +16,12 @@
#ifndef SANITIZER_COMMON_H
#define SANITIZER_COMMON_H
+#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
-#include "sanitizer_flags.h"
namespace __sanitizer {
struct StackTrace;
@@ -34,12 +36,20 @@
const uptr kCacheLineSize = 64;
#endif
-const uptr kMaxPathLength = 512;
+const uptr kMaxPathLength = 4096;
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
extern const char *SanitizerToolName; // Can be changed by the tool.
+extern atomic_uint32_t current_verbosity;
+INLINE void SetVerbosity(int verbosity) {
+ atomic_store(¤t_verbosity, verbosity, memory_order_relaxed);
+}
+INLINE int Verbosity() {
+ return atomic_load(¤t_verbosity, memory_order_relaxed);
+}
+
uptr GetPageSize();
uptr GetPageSizeCached();
uptr GetMmapGranularity();
@@ -66,6 +76,9 @@
void FlushUnneededShadowMemory(uptr addr, uptr size);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
+uptr GetRSS();
+void NoHugePagesInRegion(uptr addr, uptr length);
+void DontDumpShadowMemory(uptr addr, uptr length);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
@@ -128,29 +141,48 @@
// IO
void RawWrite(const char *buffer);
-bool PrintsToTty();
-// Caching version of PrintsToTty(). Not thread-safe.
-bool PrintsToTtyCached();
bool ColorizeReports();
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \
do { \
- if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
+ if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
} while (0)
#define VPrintf(level, ...) \
do { \
- if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
+ if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
} while (0)
// Can be used to prevent mixing error reports from different sanitizers.
extern StaticSpinMutex CommonSanitizerReportMutex;
-void MaybeOpenReportFile();
-extern fd_t report_fd;
-extern bool log_to_file;
-extern char report_path_prefix[4096];
-extern uptr report_fd_pid;
+
+struct ReportFile {
+ void Write(const char *buffer, uptr length);
+ bool PrintsToTty();
+ void SetReportPath(const char *path);
+
+ // Don't use fields directly. They are only declared public to allow
+ // aggregate initialization.
+
+ // Protects fields below.
+ StaticSpinMutex *mu;
+ // Opened file descriptor. Defaults to stderr. It may be equal to
+ // kInvalidFd, in which case new file will be opened when necessary.
+ fd_t fd;
+ // Path prefix of report file, set via __sanitizer_set_report_path.
+ char path_prefix[kMaxPathLength];
+ // Full path to report, obtained as <path_prefix>.PID
+ char full_path[kMaxPathLength];
+ // PID of the process that opened fd. If a fork() occurs,
+ // the PID of child will be different from fd_pid.
+ uptr fd_pid;
+
+ private:
+ void ReopenIfNecessary();
+};
+extern ReportFile report_file;
+
extern uptr stoptheworld_tracer_pid;
extern uptr stoptheworld_tracer_ppid;
@@ -159,8 +191,8 @@
// The resulting buffer is mmaped and stored in '*buff'.
// The size of the mmaped region is stored in '*buff_size',
// Returns the number of read bytes or 0 if file can not be opened.
-uptr ReadFileToBuffer(const char *file_name, char **buff,
- uptr *buff_size, uptr max_len);
+uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr max_len, int *errno_p = nullptr);
// Maps given file to virtual memory, and returns pointer to it
// (or NULL if the mapping failes). Stores the size of mmaped region
// in '*buff_size'.
@@ -194,10 +226,13 @@
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void SetSandboxingCallback(void (*f)());
-void CovUpdateMapping(uptr caller_pc = 0);
+void CoverageUpdateMapping();
void CovBeforeFork();
void CovAfterFork(int child_pid);
+void InitializeCoverage(bool enabled, const char *coverage_dir);
+void ReInitializeCoverage(bool enabled, const char *coverage_dir);
+
void InitTlsSize();
uptr GetTlsSize();
@@ -207,6 +242,7 @@
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
+bool TemplateMatch(const char *templ, const char *str);
// Exit
void NORETURN Abort();
@@ -225,11 +261,18 @@
// to do tool-specific job.
typedef void (*DieCallbackType)(void);
void SetDieCallback(DieCallbackType);
+void SetUserDieCallback(DieCallbackType);
DieCallbackType GetDieCallback();
typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
+// Callback will be called if soft_rss_limit_mb is given and the limit is
+// exceeded (exceeded==true) or if rss went down below the limit
+// (exceeded==false).
+// The callback should be registered once at the tool init time.
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
+
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsDeadlySignal(int signum);
@@ -356,14 +399,14 @@
// small vectors.
// WARNING: The current implementation supports only POD types.
template<typename T>
-class InternalMmapVector {
+class InternalMmapVectorNoCtor {
public:
- explicit InternalMmapVector(uptr initial_capacity) {
+ void Initialize(uptr initial_capacity) {
capacity_ = Max(initial_capacity, (uptr)1);
size_ = 0;
- data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
+ data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
}
- ~InternalMmapVector() {
+ void Destroy() {
UnmapOrDie(data_, capacity_ * sizeof(T));
}
T &operator[](uptr i) {
@@ -414,15 +457,24 @@
UnmapOrDie(old_data, capacity_ * sizeof(T));
capacity_ = new_capacity;
}
- // Disallow evil constructors.
- InternalMmapVector(const InternalMmapVector&);
- void operator=(const InternalMmapVector&);
T *data_;
uptr capacity_;
uptr size_;
};
+template<typename T>
+class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
+ public:
+ explicit InternalMmapVector(uptr initial_capacity) {
+ InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
+ }
+ ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
+ // Disallow evil constructors.
+ InternalMmapVector(const InternalMmapVector&);
+ void operator=(const InternalMmapVector&);
+};
+
// HeapSort for arrays and InternalMmapVector.
template<class Container, class Compare>
void InternalSort(Container *v, uptr size, Compare comp) {
@@ -481,28 +533,30 @@
class LoadedModule {
public:
LoadedModule(const char *module_name, uptr base_address);
+ void clear();
void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
- uptr n_ranges() const { return n_ranges_; }
- uptr address_range_start(int i) const { return ranges_[i].beg; }
- uptr address_range_end(int i) const { return ranges_[i].end; }
- bool address_range_executable(int i) const { return exec_[i]; }
-
- private:
struct AddressRange {
+ AddressRange *next;
uptr beg;
uptr end;
+ bool executable;
+
+ AddressRange(uptr beg, uptr end, bool executable)
+ : next(nullptr), beg(beg), end(end), executable(executable) {}
};
- char *full_name_;
+
+ typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
+ Iterator ranges() const { return Iterator(&ranges_); }
+
+ private:
+ char *full_name_; // Owned.
uptr base_address_;
- static const uptr kMaxNumberOfAddressRanges = 6;
- AddressRange ranges_[kMaxNumberOfAddressRanges];
- bool exec_[kMaxNumberOfAddressRanges];
- uptr n_ranges_;
+ IntrusiveList<AddressRange> ranges_;
};
// OS-dependent function that fills array with descriptions of at most
@@ -535,6 +589,24 @@
INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
INLINE void SanitizerInitializeUnwinder() {}
#endif
+
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
+void MaybeStartBackgroudThread();
+
+// Make the compiler think that something is going on there.
+// Use this inside a loop that looks like memset/memcpy/etc to prevent the
+// compiler from recognising it and turning it into an actual call to
+// memset/memcpy/etc.
+static inline void SanitizerBreakOptimization(void *arg) {
+#if _MSC_VER
+ // FIXME: make sure this is actually enough.
+ __asm;
+#else
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
+}
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index 274e87c..f724115 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -17,10 +17,12 @@
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_INITIALIZE_RANGE
+// COMMON_INTERCEPTOR_DIR_ACQUIRE
// COMMON_INTERCEPTOR_FD_ACQUIRE
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_FD_ACCESS
// COMMON_INTERCEPTOR_SET_THREAD_NAME
+// COMMON_INTERCEPTOR_ON_DLOPEN
// COMMON_INTERCEPTOR_ON_EXIT
// COMMON_INTERCEPTOR_MUTEX_LOCK
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
@@ -43,6 +45,8 @@
#if SANITIZER_FREEBSD
#define pthread_setname_np pthread_set_name_np
+#define inet_aton __inet_aton
+#define inet_pton __inet_pton
#endif
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
@@ -82,7 +86,7 @@
#endif
#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) {}
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}
#endif
#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
@@ -98,6 +102,10 @@
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
#endif
+#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {}
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
@@ -915,6 +923,16 @@
va_list ap)
VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+#if SANITIZER_INTERCEPT_PRINTF_L
+INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)
+
+INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)
+#endif // SANITIZER_INTERCEPT_PRINTF_L
+
INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
@@ -991,6 +1009,14 @@
#define INIT_PRINTF
#endif
+#if SANITIZER_INTERCEPT_PRINTF_L
+#define INIT_PRINTF_L \
+ COMMON_INTERCEPT_FUNCTION(snprintf_l); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf_l);
+#else
+#define INIT_PRINTF_L
+#endif
+
#if SANITIZER_INTERCEPT_ISOC99_PRINTF
#define INIT_ISOC99_PRINTF \
COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
@@ -1007,8 +1033,18 @@
#if SANITIZER_INTERCEPT_IOCTL
#include "sanitizer_common_interceptors_ioctl.inc"
-INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
+INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
+ // We need a frame pointer, because we call into ioctl_common_[pre|post] which
+ // can trigger a report and we need to be able to unwind through this
+ // function. On Mac in debug mode we might not have a frame pointer, because
+ // ioctl_common_[pre|post] doesn't get inlined here.
+ ENABLE_FRAME_POINTER;
+
void *ctx;
+ va_list ap;
+ va_start(ap, request);
+ void *arg = va_arg(ap, void *);
+ va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);
CHECK(ioctl_initialized);
@@ -1017,6 +1053,10 @@
// This effectively disables ioctl handling in TSan.
if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
+ // Although request is unsigned long, the rest of the interceptor uses it
+ // as just "unsigned" to save space, because we know that all values fit in
+ // "unsigned" - they are compile-time constants.
+
const ioctl_desc *desc = ioctl_lookup(request);
ioctl_desc decoded_desc;
if (!desc) {
@@ -2139,6 +2179,16 @@
#endif
#if SANITIZER_INTERCEPT_READDIR
+INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ __sanitizer_dirent *res = REAL(opendir)(path);
+ if (res != 0)
+ COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
+ return res;
+}
+
INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
@@ -2167,6 +2217,7 @@
}
#define INIT_READDIR \
+ COMMON_INTERCEPT_FUNCTION(opendir); \
COMMON_INTERCEPT_FUNCTION(readdir); \
COMMON_INTERCEPT_FUNCTION(readdir_r);
#else
@@ -2560,6 +2611,19 @@
#define INIT_SCHED_GETAFFINITY
#endif
+#if SANITIZER_INTERCEPT_SCHED_GETPARAM
+INTERCEPTOR(int, sched_getparam, int pid, void *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);
+ int res = REAL(sched_getparam)(pid, param);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);
+ return res;
+}
+#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);
+#else
+#define INIT_SCHED_GETPARAM
+#endif
+
#if SANITIZER_INTERCEPT_STRERROR
INTERCEPTOR(char *, strerror, int errnum) {
void *ctx;
@@ -3868,6 +3932,12 @@
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+// If you see any crashes around this functions, there are 2 known issues with
+// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// 2. It can be called recursively if sanitizer code uses __tls_get_addr
+// to access thread local variables (it should not happen normally,
+// because sanitizers use initial-exec tls model).
INTERCEPTOR(void *, __tls_get_addr, void *arg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
@@ -4629,6 +4699,7 @@
INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+ COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
void *res = REAL(dlopen)(filename, flag);
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
return res;
@@ -4762,6 +4833,7 @@
INIT_SCANF;
INIT_ISOC99_SCANF;
INIT_PRINTF;
+ INIT_PRINTF_L;
INIT_ISOC99_PRINTF;
INIT_FREXP;
INIT_FREXPF_FREXPL;
@@ -4812,6 +4884,7 @@
INIT_CANONICALIZE_FILE_NAME;
INIT_CONFSTR;
INIT_SCHED_GETAFFINITY;
+ INIT_SCHED_GETPARAM;
INIT_STRERROR;
INIT_STRERROR_R;
INIT_XPG_STRERROR_R;
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
old mode 100755
new mode 100644
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
index 4374f56..17ef689 100644
--- a/lib/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -13,35 +13,27 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-bool PrintsToTty() {
- MaybeOpenReportFile();
- return internal_isatty(report_fd) != 0;
-}
-
-bool PrintsToTtyCached() {
- // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
- // printing on Windows.
- if (SANITIZER_WINDOWS)
- return 0;
-
- static int cached = 0;
- static bool prints_to_tty;
- if (!cached) { // Not thread-safe.
- prints_to_tty = PrintsToTty();
- cached = 1;
- }
- return prints_to_tty;
+bool ReportFile::PrintsToTty() {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ return internal_isatty(fd) != 0;
}
bool ColorizeReports() {
+ // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
+ // printing on Windows.
+ if (SANITIZER_WINDOWS)
+ return false;
+
const char *flag = common_flags()->color;
return internal_strcmp(flag, "always") == 0 ||
- (internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
+ (internal_strcmp(flag, "auto") == 0 && report_file.PrintsToTty());
}
static void (*sandboxing_callback)();
@@ -52,16 +44,85 @@
void ReportErrorSummary(const char *error_type, StackTrace *stack) {
if (!common_flags()->print_summary)
return;
- AddressInfo ai;
#if !SANITIZER_GO
if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) {
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
- Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ const AddressInfo &ai = frame->info;
+ ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
+ frame->ClearAll();
}
-#endif
+#else
+ AddressInfo ai;
ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
+#endif
+}
+
+static void (*SoftRssLimitExceededCallback)(bool exceeded);
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
+ CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
+ SoftRssLimitExceededCallback = Callback;
+}
+
+void BackgroundThread(void *arg) {
+ uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
+ uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ uptr prev_reported_rss = 0;
+ uptr prev_reported_stack_depot_size = 0;
+ bool reached_soft_rss_limit = false;
+ while (true) {
+ SleepForMillis(100);
+ uptr current_rss_mb = GetRSS() >> 20;
+ if (Verbosity()) {
+ // If RSS has grown 10% since last time, print some information.
+ if (prev_reported_rss * 11 / 10 < current_rss_mb) {
+ Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
+ prev_reported_rss = current_rss_mb;
+ }
+ // If stack depot has grown 10% since last time, print it too.
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ if (prev_reported_stack_depot_size * 11 / 10 <
+ stack_depot_stats->allocated) {
+ Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
+ SanitizerToolName,
+ stack_depot_stats->n_uniq_ids,
+ stack_depot_stats->allocated >> 20);
+ prev_reported_stack_depot_size = stack_depot_stats->allocated;
+ }
+ }
+ // Check RSS against the limit.
+ if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
+ Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
+ DumpProcessMap();
+ Die();
+ }
+ if (soft_rss_limit_mb) {
+ if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
+ reached_soft_rss_limit = true;
+ Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
+ if (SoftRssLimitExceededCallback)
+ SoftRssLimitExceededCallback(true);
+ } else if (soft_rss_limit_mb >= current_rss_mb &&
+ reached_soft_rss_limit) {
+ reached_soft_rss_limit = false;
+ if (SoftRssLimitExceededCallback)
+ SoftRssLimitExceededCallback(false);
+ }
+ }
+ }
+}
+
+void MaybeStartBackgroudThread() {
+ if (!SANITIZER_LINUX) return; // Need to implement/test on other platforms.
+ // Start the background thread if one of the rss limits is given.
+ if (!common_flags()->hard_rss_limit_mb &&
+ !common_flags()->soft_rss_limit_mb) return;
+ if (!&real_pthread_create) return; // Can't spawn the thread anyway.
+ internal_start_thread(BackgroundThread, nullptr);
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_common_syscalls.inc b/lib/sanitizer_common/sanitizer_common_syscalls.inc
index a52338b..f2c054e 100644
--- a/lib/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -1443,6 +1443,7 @@
POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
+#if SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
if (filename)
PRE_READ(filename,
@@ -1552,6 +1553,7 @@
PRE_SYSCALL(getegid16)() {}
POST_SYSCALL(getegid16)(long res) {}
+#endif // SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(utime)(void *filename, void *times) {}
@@ -2297,7 +2299,8 @@
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64))
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2316,7 +2319,8 @@
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64))
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
index bd98adb..49887b1 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -12,14 +12,16 @@
//
// Compiler instrumentation:
// For every interesting basic block the compiler injects the following code:
-// if (*Guard) {
-// __sanitizer_cov();
-// *Guard = 1;
+// if (Guard < 0) {
+// __sanitizer_cov(&Guard);
// }
+// At the module start up time __sanitizer_cov_module_init sets the guards
+// to consecutive negative numbers (-1, -2, -3, ...).
// It's fine to call __sanitizer_cov more than once for a given block.
//
// Run-time:
// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
+// and atomically set Guard to -Guard.
// - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple:
@@ -56,23 +58,32 @@
static bool cov_sandboxed = false;
static int cov_fd = kInvalidFd;
static unsigned int cov_max_block_size = 0;
+static bool coverage_enabled = false;
+static const char *coverage_dir;
namespace __sanitizer {
class CoverageData {
public:
void Init();
+ void Enable();
+ void Disable();
+ void ReInit();
void BeforeFork();
void AfterFork(int child_pid);
void Extend(uptr npcs);
- void Add(uptr pc);
+ void Add(uptr pc, u32 *guard);
void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
uptr cache_size);
void DumpCallerCalleePairs();
void DumpTrace();
ALWAYS_INLINE
- void TraceBasicaBlock(uptr *cache);
+ void TraceBasicBlock(s32 *id);
+
+ void InitializeGuardArray(s32 *guards);
+ void InitializeGuards(s32 *guards, uptr n, const char *module_name);
+ void ReinitializeGuards();
uptr *data();
uptr size();
@@ -80,7 +91,7 @@
private:
// Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous.
- static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
+ static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 24, 1 << 27);
// The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024;
@@ -96,45 +107,44 @@
// Descriptor of the file mapped pc array.
int pc_fd;
+ // Vector of coverage guard arrays, protected by mu.
+ InternalMmapVectorNoCtor<s32*> guard_array_vec;
+
+ // Vector of module (compilation unit) names.
+ InternalMmapVectorNoCtor<const char*> comp_unit_name_vec;
+
// Caller-Callee (cc) array, size and current index.
static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
uptr **cc_array;
atomic_uintptr_t cc_array_index;
atomic_uintptr_t cc_array_size;
- // Tracing (tr) pc and event arrays, their size and current index.
+ // Tracing event array, size and current pointer.
// We record all events (basic block entries) in a global buffer of u32
- // values. Each such value is an index in the table of TracedPc objects.
+ // values. Each such value is the index in pc_array.
// So far the tracing is highly experimental:
// - not thread-safe;
// - does not support long traces;
// - not tuned for performance.
- struct TracedPc {
- uptr pc;
- const char *module_name;
- uptr module_offset;
- };
static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
u32 *tr_event_array;
uptr tr_event_array_size;
- uptr tr_event_array_index;
+ u32 *tr_event_pointer;
static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
- TracedPc *tr_pc_array;
- uptr tr_pc_array_size;
- uptr tr_pc_array_index;
StaticSpinMutex mu;
void DirectOpen();
- void ReInit();
};
static CoverageData coverage_data;
+void CovUpdateMapping(const char *path, uptr caller_pc = 0);
+
void CoverageData::DirectOpen() {
- InternalScopedString path(1024);
+ InternalScopedString path(kMaxPathLength);
internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
- common_flags()->coverage_dir, internal_getpid());
+ coverage_dir, internal_getpid());
pc_fd = OpenFile(path.data(), true);
if (internal_iserror(pc_fd)) {
Report(" Coverage: failed to open %s for writing\n", path.data());
@@ -142,19 +152,23 @@
}
pc_array_mapped_size = 0;
- CovUpdateMapping();
+ CovUpdateMapping(coverage_dir);
}
void CoverageData::Init() {
+ pc_fd = kInvalidFd;
+}
+
+void CoverageData::Enable() {
+ if (pc_array)
+ return;
pc_array = reinterpret_cast<uptr *>(
MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
- pc_fd = kInvalidFd;
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
if (common_flags()->coverage_direct) {
atomic_store(&pc_array_size, 0, memory_order_relaxed);
- atomic_store(&pc_array_index, 0, memory_order_relaxed);
} else {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
- atomic_store(&pc_array_index, 0, memory_order_relaxed);
}
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
@@ -162,30 +176,72 @@
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
atomic_store(&cc_array_index, 0, memory_order_relaxed);
- tr_event_array = reinterpret_cast<u32 *>(
- MmapNoReserveOrDie(sizeof(tr_event_array[0]) * kTrEventArrayMaxSize,
- "CovInit::tr_event_array"));
+ // Allocate tr_event_array with a guard page at the end.
+ tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
+ sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
+ "CovInit::tr_event_array"));
+ Mprotect(reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
+ GetMmapGranularity());
tr_event_array_size = kTrEventArrayMaxSize;
- tr_event_array_index = 0;
+ tr_event_pointer = tr_event_array;
+}
- tr_pc_array = reinterpret_cast<TracedPc *>(MmapNoReserveOrDie(
- sizeof(tr_pc_array[0]) * kTrEventArrayMaxSize, "CovInit::tr_pc_array"));
- tr_pc_array_size = kTrEventArrayMaxSize;
- tr_pc_array_index = 0;
+void CoverageData::InitializeGuardArray(s32 *guards) {
+ Enable(); // Make sure coverage is enabled at this point.
+ s32 n = guards[0];
+ for (s32 j = 1; j <= n; j++) {
+ uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+ guards[j] = -static_cast<s32>(idx + 1);
+ }
+}
+
+void CoverageData::Disable() {
+ if (pc_array) {
+ internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
+ pc_array = nullptr;
+ }
+ if (cc_array) {
+ internal_munmap(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
+ cc_array = nullptr;
+ }
+ if (tr_event_array) {
+ internal_munmap(tr_event_array,
+ sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
+ GetMmapGranularity());
+ tr_event_array = nullptr;
+ tr_event_pointer = nullptr;
+ }
+ if (pc_fd != kInvalidFd) {
+ internal_close(pc_fd);
+ pc_fd = kInvalidFd;
+ }
+}
+
+void CoverageData::ReinitializeGuards() {
+ // Assuming single thread.
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ for (uptr i = 0; i < guard_array_vec.size(); i++)
+ InitializeGuardArray(guard_array_vec[i]);
}
void CoverageData::ReInit() {
- internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
- if (pc_fd != kInvalidFd) internal_close(pc_fd);
- if (common_flags()->coverage_direct) {
- // In memory-mapped mode we must extend the new file to the known array
- // size.
- uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
- Init();
- if (size) Extend(size);
- } else {
- Init();
+ Disable();
+ if (coverage_enabled) {
+ if (common_flags()->coverage_direct) {
+ // In memory-mapped mode we must extend the new file to the known array
+ // size.
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ Enable();
+ if (size) Extend(size);
+ if (coverage_enabled) CovUpdateMapping(coverage_dir);
+ } else {
+ Enable();
+ }
}
+ // Re-initialize the guards.
+ // We are single-threaded now, no need to grab any lock.
+ CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
+ ReinitializeGuards();
}
void CoverageData::BeforeFork() {
@@ -203,15 +259,16 @@
if (!common_flags()->coverage_direct) return;
SpinMutexLock l(&mu);
- if (pc_fd == kInvalidFd) DirectOpen();
- CHECK_NE(pc_fd, kInvalidFd);
-
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
size += npcs * sizeof(uptr);
- if (size > pc_array_mapped_size) {
+ if (coverage_enabled && size > pc_array_mapped_size) {
+ if (pc_fd == kInvalidFd) DirectOpen();
+ CHECK_NE(pc_fd, kInvalidFd);
+
uptr new_mapped_size = pc_array_mapped_size;
while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
+ CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
// Extend the file and map the new space at the end of pc_array.
uptr res = internal_ftruncate(pc_fd, new_mapped_size);
@@ -220,21 +277,43 @@
Printf("failed to extend raw coverage file: %d\n", err);
Die();
}
- void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
+
+ uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
+ void *p = MapWritableFileToMemory((void *)next_map_base,
new_mapped_size - pc_array_mapped_size,
pc_fd, pc_array_mapped_size);
- CHECK_EQ(p, pc_array + pc_array_mapped_size);
+ CHECK_EQ((uptr)p, next_map_base);
pc_array_mapped_size = new_mapped_size;
}
atomic_store(&pc_array_size, size, memory_order_release);
}
-// Simply add the pc into the vector under lock. If the function is called more
-// than once for a given PC it will be inserted multiple times, which is fine.
-void CoverageData::Add(uptr pc) {
+void CoverageData::InitializeGuards(s32 *guards, uptr n,
+ const char *module_name) {
+ // The array 'guards' has n+1 elements, we use the element zero
+ // to store 'n'.
+ CHECK_LT(n, 1 << 30);
+ guards[0] = static_cast<s32>(n);
+ InitializeGuardArray(guards);
+ SpinMutexLock l(&mu);
+ comp_unit_name_vec.push_back(module_name);
+ guard_array_vec.push_back(guards);
+}
+
+// If guard is negative, atomically set it to -guard and store the PC in
+// pc_array.
+void CoverageData::Add(uptr pc, u32 *guard) {
+ atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
+ s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
+ if (guard_value >= 0) return;
+
+ atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
if (!pc_array) return;
- uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+
+ uptr idx = -guard_value - 1;
+ if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
+ return; // May happen after fork when pc_array_index becomes 0.
CHECK_LT(idx * sizeof(uptr),
atomic_load(&pc_array_size, memory_order_acquire));
pc_array[idx] = pc;
@@ -334,20 +413,19 @@
// If packed = true and name == 0: <pid>.<sancov>.<packed>.
// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
// user-supplied).
-static int CovOpenFile(bool packed, const char* name) {
- InternalScopedBuffer<char> path(1024);
+static int CovOpenFile(bool packed, const char *name,
+ const char *extension = "sancov") {
+ InternalScopedString path(kMaxPathLength);
if (!packed) {
CHECK(name);
- internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
- common_flags()->coverage_dir, name, internal_getpid());
+ path.append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
+ extension);
} else {
if (!name)
- internal_snprintf((char *)path.data(), path.size(),
- "%s/%zd.sancov.packed", common_flags()->coverage_dir,
- internal_getpid());
+ path.append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
+ extension);
else
- internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed",
- common_flags()->coverage_dir, name);
+ path.append("%s/%s.%s.packed", coverage_dir, name, extension);
}
uptr fd = OpenFile(path.data(), true);
if (internal_iserror(fd)) {
@@ -359,35 +437,49 @@
// Dump trace PCs and trace events into two separate files.
void CoverageData::DumpTrace() {
- uptr max_idx = tr_event_array_index;
+ uptr max_idx = tr_event_pointer - tr_event_array;
if (!max_idx) return;
auto sym = Symbolizer::GetOrInit();
if (!sym)
return;
InternalScopedString out(32 << 20);
- for (uptr i = 0; i < max_idx; i++) {
- u32 pc_idx = tr_event_array[i];
- TracedPc *t = &tr_pc_array[pc_idx];
- if (!t->module_name) {
- const char *module_name = "<unknown>";
- uptr module_address = 0;
- sym->GetModuleNameAndOffsetForPC(t->pc, &module_name, &module_address);
- t->module_name = internal_strdup(module_name);
- t->module_offset = module_address;
- out.append("%s 0x%zx\n", t->module_name, t->module_offset);
- }
+ for (uptr i = 0, n = size(); i < n; i++) {
+ const char *module_name = "<unknown>";
+ uptr module_address = 0;
+ sym->GetModuleNameAndOffsetForPC(pc_array[i], &module_name,
+ &module_address);
+ out.append("%s 0x%zx\n", module_name, module_address);
}
int fd = CovOpenFile(false, "trace-points");
if (fd < 0) return;
internal_write(fd, out.data(), out.length());
internal_close(fd);
+ fd = CovOpenFile(false, "trace-compunits");
+ if (fd < 0) return;
+ out.clear();
+ for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
+ out.append("%s\n", comp_unit_name_vec[i]);
+ internal_write(fd, out.data(), out.length());
+ internal_close(fd);
+
fd = CovOpenFile(false, "trace-events");
if (fd < 0) return;
- internal_write(fd, tr_event_array, max_idx * sizeof(tr_event_array[0]));
+ uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
+ u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
+ // The trace file could be huge, and may not be written with a single syscall.
+ while (bytes_to_write) {
+ uptr actually_written = internal_write(fd, event_bytes, bytes_to_write);
+ if (actually_written <= bytes_to_write) {
+ bytes_to_write -= actually_written;
+ event_bytes += actually_written;
+ } else {
+ break;
+ }
+ }
internal_close(fd);
- VReport(1, " CovDump: Trace: %zd PCs written\n", tr_pc_array_index);
- VReport(1, " CovDump: Trace: %zd Events written\n", tr_event_array_index);
+ VReport(1, " CovDump: Trace: %zd PCs written\n", size());
+ VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
}
// This function dumps the caller=>callee pairs into a file as a sequence of
@@ -432,28 +524,45 @@
// Record the current PC into the event buffer.
// Every event is a u32 value (index in tr_pc_array_index) so we compute
// it once and then cache in the provided 'cache' storage.
-void CoverageData::TraceBasicaBlock(uptr *cache) {
- CHECK(common_flags()->coverage);
- uptr idx = *cache;
- if (!idx) {
- CHECK_LT(tr_pc_array_index, kTrPcArrayMaxSize);
- idx = tr_pc_array_index++;
- TracedPc *t = &tr_pc_array[idx];
- t->pc = GET_CALLER_PC();
- *cache = idx;
- CHECK_LT(idx, 1U << 31);
+//
+// This function will eventually be inlined by the compiler.
+void CoverageData::TraceBasicBlock(s32 *id) {
+ // Will trap here if
+ // 1. coverage is not enabled at run-time.
+ // 2. The array tr_event_array is full.
+ *tr_event_pointer = static_cast<u32>(*id - 1);
+ tr_event_pointer++;
+}
+
+static void CovDumpAsBitSet() {
+ if (!common_flags()->coverage_bitset) return;
+ if (!coverage_data.size()) return;
+ int fd = CovOpenFile(/* packed */false, "combined", "bitset-sancov");
+ if (fd < 0) return;
+ uptr n = coverage_data.size();
+ uptr n_set_bits = 0;
+ InternalScopedBuffer<char> out(n);
+ for (uptr i = 0; i < n; i++) {
+ uptr pc = coverage_data.data()[i];
+ out[i] = pc ? '1' : '0';
+ if (pc)
+ n_set_bits++;
}
- CHECK_LT(tr_event_array_index, tr_event_array_size);
- tr_event_array[tr_event_array_index] = static_cast<u32>(idx);
- tr_event_array_index++;
+ internal_write(fd, out.data(), n);
+ internal_close(fd);
+ VReport(1, " CovDump: bitset of %zd bits written, %zd bits are set\n", n,
+ n_set_bits);
}
// Dump the coverage on disk.
static void CovDump() {
- if (!common_flags()->coverage || common_flags()->coverage_direct) return;
+ if (!coverage_enabled || common_flags()->coverage_direct) return;
#if !SANITIZER_WINDOWS
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
return;
+ CovDumpAsBitSet();
+ coverage_data.DumpTrace();
+ if (!common_flags()->coverage_pcs) return;
uptr size = coverage_data.size();
InternalMmapVector<u32> offsets(size);
uptr *vb = coverage_data.data();
@@ -461,8 +570,8 @@
SortArray(vb, size);
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr mb, me, off, prot;
- InternalScopedBuffer<char> module(4096);
- InternalScopedBuffer<char> path(4096 * 2);
+ InternalScopedString module(kMaxPathLength);
+ InternalScopedString path(kMaxPathLength);
for (int i = 0;
proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
i++) {
@@ -488,9 +597,9 @@
}
} else {
// One file per module per process.
- internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
- common_flags()->coverage_dir, module_name,
- internal_getpid());
+ path.clear();
+ path.append("%s/%s.%zd.sancov", coverage_dir, module_name,
+ internal_getpid());
int fd = CovOpenFile(false /* packed */, module_name);
if (fd > 0) {
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
@@ -504,13 +613,12 @@
if (cov_fd >= 0)
internal_close(cov_fd);
coverage_data.DumpCallerCalleePairs();
- coverage_data.DumpTrace();
#endif // !SANITIZER_WINDOWS
}
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
if (!args) return;
- if (!common_flags()->coverage) return;
+ if (!coverage_enabled) return;
cov_sandboxed = args->coverage_sandboxed;
if (!cov_sandboxed) return;
cov_fd = args->coverage_fd;
@@ -522,7 +630,7 @@
int MaybeOpenCovFile(const char *name) {
CHECK(name);
- if (!common_flags()->coverage) return -1;
+ if (!coverage_enabled) return -1;
return CovOpenFile(true /* packed */, name);
}
@@ -534,27 +642,60 @@
coverage_data.AfterFork(child_pid);
}
+void InitializeCoverage(bool enabled, const char *dir) {
+ if (coverage_enabled)
+ return; // May happen if two sanitizer enable coverage in the same process.
+ coverage_enabled = enabled;
+ coverage_dir = dir;
+ coverage_data.Init();
+ if (enabled) coverage_data.Enable();
+#if !SANITIZER_WINDOWS
+ if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
+#endif
+}
+
+void ReInitializeCoverage(bool enabled, const char *dir) {
+ coverage_enabled = enabled;
+ coverage_dir = dir;
+ coverage_data.ReInit();
+}
+
+void CoverageUpdateMapping() {
+ if (coverage_enabled)
+ CovUpdateMapping(coverage_dir);
+}
+
} // namespace __sanitizer
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
- coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
+ coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
+ guard);
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
+ atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
+ if (__sanitizer::atomic_load(atomic_guard, memory_order_relaxed))
+ __sanitizer_cov(guard);
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
callee, callee_cache16, 16);
}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
+ coverage_enabled = true;
+ coverage_dir = common_flags()->coverage_dir;
coverage_data.Init();
}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) {
- if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
- if (SANITIZER_ANDROID) {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
+SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_cov_module_init(s32 *guards, uptr npcs, const char *module_name) {
+ coverage_data.InitializeGuards(guards, npcs, module_name);
+ if (!common_flags()->coverage_direct) return;
+ if (SANITIZER_ANDROID && coverage_enabled) {
// dlopen/dlclose interceptors do not work on Android, so we rely on
// Extend() calls to update .sancov.map.
- CovUpdateMapping(GET_CALLER_PC());
+ CovUpdateMapping(coverage_dir, GET_CALLER_PC());
}
coverage_data.Extend(npcs);
}
@@ -568,11 +709,23 @@
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_func_enter(uptr *cache) {
- coverage_data.TraceBasicaBlock(cache);
+void __sanitizer_cov_trace_func_enter(s32 *id) {
+ coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_basic_block(uptr *cache) {
- coverage_data.TraceBasicaBlock(cache);
+void __sanitizer_cov_trace_basic_block(s32 *id) {
+ coverage_data.TraceBasicBlock(id);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_reset_coverage() {
+ coverage_data.ReinitializeGuards();
+ internal_bzero_aligned16(
+ coverage_data.data(),
+ RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_coverage_guards(uptr **data) {
+ *data = coverage_data.data();
+ return coverage_data.size();
}
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
index dddf2f0..6b5e91f 100644
--- a/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
@@ -62,8 +62,8 @@
static CachedMapping cached_mapping;
static StaticSpinMutex mapping_mu;
-void CovUpdateMapping(uptr caller_pc) {
- if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
+void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
+ if (!common_flags()->coverage_direct) return;
SpinMutexLock l(&mapping_mu);
@@ -71,38 +71,41 @@
return;
InternalScopedString text(kMaxTextSize);
- InternalScopedBuffer<char> modules_data(kMaxNumberOfModules *
- sizeof(LoadedModule));
- LoadedModule *modules = (LoadedModule *)modules_data.data();
- CHECK(modules);
- int n_modules = GetListOfModules(modules, kMaxNumberOfModules,
- /* filter */ 0);
- text.append("%d\n", sizeof(uptr) * 8);
- for (int i = 0; i < n_modules; ++i) {
- const char *module_name = StripModuleName(modules[i].full_name());
- for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
- if (modules[i].address_range_executable(j)) {
- uptr start = modules[i].address_range_start(j);
- uptr end = modules[i].address_range_end(j);
- uptr base = modules[i].base_address();
- text.append("%zx %zx %zx %s\n", start, end, base, module_name);
- if (caller_pc && caller_pc >= start && caller_pc < end)
- cached_mapping.SetModuleRange(start, end);
+ {
+ InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
+ CHECK(modules.data());
+ int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
+ /* filter */ 0);
+
+ text.append("%d\n", sizeof(uptr) * 8);
+ for (int i = 0; i < n_modules; ++i) {
+ const char *module_name = StripModuleName(modules[i].full_name());
+ uptr base = modules[i].base_address();
+ for (auto iter = modules[i].ranges(); iter.hasNext();) {
+ const auto *range = iter.next();
+ if (range->executable) {
+ uptr start = range->beg;
+ uptr end = range->end;
+ text.append("%zx %zx %zx %s\n", start, end, base, module_name);
+ if (caller_pc && caller_pc >= start && caller_pc < end)
+ cached_mapping.SetModuleRange(start, end);
+ }
}
+ modules[i].clear();
}
}
int err;
- InternalScopedString tmp_path(64 +
- internal_strlen(common_flags()->coverage_dir));
+ InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
- "%s/%zd.sancov.map.tmp", common_flags()->coverage_dir,
- internal_getpid());
+ "%s/%zd.sancov.map.tmp", coverage_dir,
+ internal_getpid());
CHECK_LE(res, tmp_path.size());
uptr map_fd = OpenFile(tmp_path.data(), true);
- if (internal_iserror(map_fd)) {
- Report(" Coverage: failed to open %s for writing\n", tmp_path.data());
+ if (internal_iserror(map_fd, &err)) {
+ Report(" Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
+ err);
Die();
}
@@ -113,9 +116,9 @@
}
internal_close(map_fd);
- InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir));
+ InternalScopedString path(64 + internal_strlen(coverage_dir));
res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
- common_flags()->coverage_dir, internal_getpid());
+ coverage_dir, internal_getpid());
CHECK_LE(res, path.size());
res = internal_rename(tmp_path.data(), path.data());
if (internal_iserror(res, &err)) {
diff --git a/lib/sanitizer_common/sanitizer_deadlock_detector.h b/lib/sanitizer_common/sanitizer_deadlock_detector.h
index 90e1cc4..86d5743 100644
--- a/lib/sanitizer_common/sanitizer_deadlock_detector.h
+++ b/lib/sanitizer_common/sanitizer_deadlock_detector.h
@@ -50,6 +50,8 @@
if (epoch_ == current_epoch) return;
bv_.clear();
epoch_ = current_epoch;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
}
uptr getEpoch() const { return epoch_; }
@@ -83,7 +85,8 @@
}
}
// Printf("remLock: %zx %zx\n", lock_id, epoch_);
- CHECK(bv_.clearBit(lock_id));
+ if (!bv_.clearBit(lock_id))
+ return; // probably addLock happened before flush
if (n_all_locks_) {
for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
@@ -175,6 +178,7 @@
recycled_nodes_.clear();
available_nodes_.setAll();
g_.clear();
+ n_edges_ = 0;
return getAvailableNode(data);
}
diff --git a/lib/sanitizer_common/sanitizer_flag_parser.cc b/lib/sanitizer_common/sanitizer_flag_parser.cc
new file mode 100644
index 0000000..d125002
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flag_parser.cc
@@ -0,0 +1,153 @@
+//===-- sanitizer_flag_parser.cc ------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flag_parser.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+LowLevelAllocator FlagParser::Alloc;
+
+class UnknownFlags {
+ static const int kMaxUnknownFlags = 20;
+ const char *unknown_flags_[kMaxUnknownFlags];
+ int n_unknown_flags_;
+
+ public:
+ void Add(const char *name) {
+ CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
+ unknown_flags_[n_unknown_flags_++] = name;
+ }
+
+ void Report() {
+ if (!n_unknown_flags_) return;
+ Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
+ for (int i = 0; i < n_unknown_flags_; ++i)
+ Printf(" %s\n", unknown_flags_[i]);
+ n_unknown_flags_ = 0;
+ }
+};
+
+UnknownFlags unknown_flags;
+
+void ReportUnrecognizedFlags() {
+ unknown_flags.Report();
+}
+
+char *FlagParser::ll_strndup(const char *s, uptr n) {
+ uptr len = internal_strnlen(s, n);
+ char *s2 = (char*)Alloc.Allocate(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+void FlagParser::PrintFlagDescriptions() {
+ Printf("Available flags for %s:\n", SanitizerToolName);
+ for (int i = 0; i < n_flags_; ++i)
+ Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
+}
+
+void FlagParser::fatal_error(const char *err) {
+ Printf("ERROR: %s\n", err);
+ Die();
+}
+
+bool FlagParser::is_space(char c) {
+ return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
+ c == '\r';
+}
+
+void FlagParser::skip_whitespace() {
+ while (is_space(buf_[pos_])) ++pos_;
+}
+
+void FlagParser::parse_flag() {
+ uptr name_start = pos_;
+ while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != '=') fatal_error("expected '='");
+ char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
+
+ uptr value_start = ++pos_;
+ char *value;
+ if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
+ char quote = buf_[pos_++];
+ while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
+ if (buf_[pos_] == 0) fatal_error("unterminated string");
+ value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
+ ++pos_; // consume the closing quote
+ } else {
+ while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
+ fatal_error("expected separator or eol");
+ value = ll_strndup(buf_ + value_start, pos_ - value_start);
+ }
+
+ bool res = run_handler(name, value);
+ if (!res) fatal_error("Flag parsing failed.");
+}
+
+void FlagParser::parse_flags() {
+ while (true) {
+ skip_whitespace();
+ if (buf_[pos_] == 0) break;
+ parse_flag();
+ }
+
+ // Do a sanity check for certain flags.
+ if (common_flags_dont_use.malloc_context_size < 1)
+ common_flags_dont_use.malloc_context_size = 1;
+}
+
+void FlagParser::ParseString(const char *s) {
+ if (!s) return;
+ // Backup current parser state to allow nested ParseString() calls.
+ const char *old_buf_ = buf_;
+ uptr old_pos_ = pos_;
+ buf_ = s;
+ pos_ = 0;
+
+ parse_flags();
+
+ buf_ = old_buf_;
+ pos_ = old_pos_;
+}
+
+bool FlagParser::run_handler(const char *name, const char *value) {
+ for (int i = 0; i < n_flags_; ++i) {
+ if (internal_strcmp(name, flags_[i].name) == 0)
+ return flags_[i].handler->Parse(value);
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ unknown_flags.Add(name);
+ return true;
+}
+
+void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc) {
+ CHECK_LT(n_flags_, kMaxFlags);
+ flags_[n_flags_].name = name;
+ flags_[n_flags_].desc = desc;
+ flags_[n_flags_].handler = handler;
+ ++n_flags_;
+}
+
+FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
+ flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+}
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_flag_parser.h b/lib/sanitizer_common/sanitizer_flag_parser.h
new file mode 100644
index 0000000..0ac7634
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -0,0 +1,121 @@
+//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAG_REGISTRY_H
+#define SANITIZER_FLAG_REGISTRY_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class FlagHandlerBase {
+ public:
+ virtual bool Parse(const char *value) { return false; }
+};
+
+template <typename T>
+class FlagHandler : public FlagHandlerBase {
+ T *t_;
+
+ public:
+ explicit FlagHandler(T *t) : t_(t) {}
+ bool Parse(const char *value) final;
+};
+
+template <>
+inline bool FlagHandler<bool>::Parse(const char *value) {
+ if (internal_strcmp(value, "0") == 0 ||
+ internal_strcmp(value, "no") == 0 ||
+ internal_strcmp(value, "false") == 0) {
+ *t_ = false;
+ return true;
+ }
+ if (internal_strcmp(value, "1") == 0 ||
+ internal_strcmp(value, "yes") == 0 ||
+ internal_strcmp(value, "true") == 0) {
+ *t_ = true;
+ return true;
+ }
+ Printf("ERROR: Invalid value for bool option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<const char *>::Parse(const char *value) {
+ *t_ = internal_strdup(value);
+ return true;
+}
+
+template <>
+inline bool FlagHandler<int>::Parse(const char *value) {
+ char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<uptr>::Parse(const char *value) {
+ char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
+ return ok;
+}
+
+class FlagParser {
+ static const int kMaxFlags = 200;
+ struct Flag {
+ const char *name;
+ const char *desc;
+ FlagHandlerBase *handler;
+ } *flags_;
+ int n_flags_;
+
+ const char *buf_;
+ uptr pos_;
+
+ public:
+ FlagParser();
+ void RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc);
+ void ParseString(const char *s);
+ void PrintFlagDescriptions();
+
+ static LowLevelAllocator Alloc;
+
+ private:
+ void fatal_error(const char *err);
+ bool is_space(char c);
+ void skip_whitespace();
+ void parse_flags();
+ void parse_flag();
+ bool run_handler(const char *name, const char *value);
+ char *ll_strndup(const char *s, uptr n);
+};
+
+template <typename T>
+static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
+ T *var) {
+ FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT
+ parser->RegisterHandler(name, fh, desc);
+}
+
+void ReportUnrecognizedFlags();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAG_REGISTRY_H
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index 40b6ec0..e835b46 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -16,6 +16,7 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
+#include "sanitizer_flag_parser.h"
namespace __sanitizer {
@@ -34,274 +35,53 @@
# define SANITIZER_NEEDS_SEGV 1
#endif
-void SetCommonFlagsDefaults(CommonFlags *f) {
- f->symbolize = true;
- f->external_symbolizer_path = 0;
- f->allow_addr2line = false;
- f->strip_path_prefix = "";
- f->fast_unwind_on_check = false;
- f->fast_unwind_on_fatal = false;
- f->fast_unwind_on_malloc = true;
- f->handle_ioctl = false;
- f->malloc_context_size = 1;
- f->log_path = "stderr";
- f->verbosity = 0;
- f->detect_leaks = true;
- f->leak_check_at_exit = true;
- f->allocator_may_return_null = false;
- f->print_summary = true;
- f->check_printf = true;
- // TODO(glider): tools may want to set different defaults for handle_segv.
- f->handle_segv = SANITIZER_NEEDS_SEGV;
- f->allow_user_segv_handler = false;
- f->use_sigaltstack = true;
- f->detect_deadlocks = false;
- f->clear_shadow_mmap_threshold = 64 * 1024;
- f->color = "auto";
- f->legacy_pthread_cond = false;
- f->intercept_tls_get_addr = false;
- f->coverage = false;
- f->coverage_direct = SANITIZER_ANDROID;
- f->coverage_dir = ".";
- f->full_address_space = false;
- f->suppressions = "";
- f->print_suppressions = true;
- f->disable_coredump = (SANITIZER_WORDSIZE == 64);
- f->symbolize_inline_frames = true;
- f->stack_trace_format = "DEFAULT";
+void CommonFlags::SetDefaults() {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
}
-void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
- ParseFlag(str, &f->symbolize, "symbolize",
- "If set, use the online symbolizer from common sanitizer runtime to turn "
- "virtual addresses to file/line locations.");
- ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
- "Path to external symbolizer. If empty, the tool will search $PATH for "
- "the symbolizer.");
- ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
- "If set, allows online symbolizer to run addr2line binary to symbolize "
- "stack traces (addr2line will only be used if llvm-symbolizer binary is "
- "unavailable.");
- ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
- "Strips this prefix from file paths in error reports.");
- ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
- "If available, use the fast frame-pointer-based unwinder on "
- "internal CHECK failures.");
- ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
- "If available, use the fast frame-pointer-based unwinder on fatal "
- "errors.");
- ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
- "If available, use the fast frame-pointer-based unwinder on "
- "malloc/free.");
- ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
- "Intercept and handle ioctl requests.");
- ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
- "Max number of stack frames kept for each allocation/deallocation.");
- ParseFlag(str, &f->log_path, "log_path",
- "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
- "\"stderr\". The default is \"stderr\".");
- ParseFlag(str, &f->verbosity, "verbosity",
- "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
- ParseFlag(str, &f->detect_leaks, "detect_leaks",
- "Enable memory leak detection.");
- ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
- "Invoke leak checking in an atexit handler. Has no effect if "
- "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
- "handler has a chance to run.");
- ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
- "If false, the allocator will crash instead of returning 0 on "
- "out-of-memory.");
- ParseFlag(str, &f->print_summary, "print_summary",
- "If false, disable printing error summaries in addition to error "
- "reports.");
- ParseFlag(str, &f->check_printf, "check_printf",
- "Check printf arguments.");
- ParseFlag(str, &f->handle_segv, "handle_segv",
- "If set, registers the tool's custom SEGV handler (both SIGBUS and "
- "SIGSEGV on OSX).");
- ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
- "If set, allows user to register a SEGV handler even if the tool "
- "registers one.");
- ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
- "If set, uses alternate stack for signal handling.");
- ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
- "If set, deadlock detection is enabled.");
- ParseFlag(str, &f->clear_shadow_mmap_threshold,
- "clear_shadow_mmap_threshold",
- "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
- "memset(). This is the threshold size in bytes.");
- ParseFlag(str, &f->color, "color",
- "Colorize reports: (always|never|auto).");
- ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
- "Enables support for dynamic libraries linked with libpthread 2.2.5.");
- ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
- "Intercept __tls_get_addr.");
- ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
- ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
- "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
- "not a user-facing flag, used mosly for testing the tools");
- ParseFlag(str, &f->coverage, "coverage",
- "If set, coverage information will be dumped at program shutdown (if the "
- "coverage instrumentation was enabled at compile time).");
- ParseFlag(str, &f->coverage_direct, "coverage_direct",
- "If set, coverage information will be dumped directly to a memory "
- "mapped file. This way data is not lost even if the process is "
- "suddenly killed.");
- ParseFlag(str, &f->coverage_dir, "coverage_dir",
- "Target directory for coverage dumps. Defaults to the current "
- "directory.");
- ParseFlag(str, &f->full_address_space, "full_address_space",
- "Sanitize complete address space; "
- "by default kernel area on 32-bit platforms will not be sanitized");
- ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
- ParseFlag(str, &f->print_suppressions, "print_suppressions",
- "Print matched suppressions at exit.");
- ParseFlag(str, &f->disable_coredump, "disable_coredump",
- "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
- "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
- "default and for sanitizers that don't reserve lots of virtual memory.");
- ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
- "Print inlined frames in stacktraces. Defaults to true.");
- ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
- "Format string used to render stack frames. "
- "See sanitizer_stacktrace_printer.h for the format description. "
- "Use DEFAULT to get default format.");
-
- // Do a sanity check for certain flags.
- if (f->malloc_context_size < 1)
- f->malloc_context_size = 1;
+void CommonFlags::CopyFrom(const CommonFlags &other) {
+ internal_memcpy(this, &other, sizeof(*this));
}
-static bool GetFlagValue(const char *env, const char *name,
- const char **value, int *value_length) {
- if (env == 0)
- return false;
- const char *pos = 0;
- for (;;) {
- pos = internal_strstr(env, name);
- if (pos == 0)
+class FlagHandlerInclude : public FlagHandlerBase {
+ static const uptr kMaxIncludeSize = 1 << 15;
+ FlagParser *parser_;
+
+ public:
+ explicit FlagHandlerInclude(FlagParser *parser) : parser_(parser) {}
+ bool Parse(const char *value) final {
+ char *data;
+ uptr data_mapped_size;
+ int err;
+ uptr len =
+ ReadFileToBuffer(value, &data, &data_mapped_size,
+ Max(kMaxIncludeSize, GetPageSizeCached()), &err);
+ if (!len) {
+ Printf("Failed to read options from '%s': error %d\n", value, err);
return false;
- const char *name_end = pos + internal_strlen(name);
- if ((pos != env &&
- ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) ||
- *name_end != '=') {
- // Seems to be middle of another flag name or value.
- env = pos + 1;
- continue;
}
- pos = name_end;
- break;
+ parser_->ParseString(data);
+ UnmapOrDie(data, data_mapped_size);
+ return true;
}
- const char *end;
- if (pos[0] != '=') {
- end = pos;
- } else {
- pos += 1;
- if (pos[0] == '"') {
- pos += 1;
- end = internal_strchr(pos, '"');
- } else if (pos[0] == '\'') {
- pos += 1;
- end = internal_strchr(pos, '\'');
- } else {
- // Read until the next space or colon.
- end = pos + internal_strcspn(pos, " :");
- }
- if (end == 0)
- end = pos + internal_strlen(pos);
- }
- *value = pos;
- *value_length = end - pos;
- return true;
+};
+
+void RegisterIncludeFlag(FlagParser *parser, CommonFlags *cf) {
+ FlagHandlerInclude *fh_include =
+ new (FlagParser::Alloc) FlagHandlerInclude(parser); // NOLINT
+ parser->RegisterHandler("include", fh_include,
+ "read more options from the given file");
}
-static bool StartsWith(const char *flag, int flag_length, const char *value) {
- if (!flag || !value)
- return false;
- int value_length = internal_strlen(value);
- return (flag_length >= value_length) &&
- (0 == internal_strncmp(flag, value, value_length));
-}
+void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &cf->Name);
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
-static LowLevelAllocator allocator_for_flags;
-
-// The linear scan is suboptimal, but the number of flags is relatively small.
-bool FlagInDescriptionList(const char *name) {
- IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
- while (it.hasNext()) {
- if (!internal_strcmp(it.next()->name, name)) return true;
- }
- return false;
-}
-
-void AddFlagDescription(const char *name, const char *description) {
- if (FlagInDescriptionList(name)) return;
- FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
- new_description->name = name;
- new_description->description = description;
- flag_descriptions.push_back(new_description);
-}
-
-// TODO(glider): put the descriptions inside CommonFlags.
-void PrintFlagDescriptions() {
- IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
- Printf("Available flags for %s:\n", SanitizerToolName);
- while (it.hasNext()) {
- FlagDescription *descr = it.next();
- Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
- }
-}
-
-void ParseFlag(const char *env, bool *flag,
- const char *name, const char *descr) {
- const char *value;
- int value_length;
- AddFlagDescription(name, descr);
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- if (StartsWith(value, value_length, "0") ||
- StartsWith(value, value_length, "no") ||
- StartsWith(value, value_length, "false"))
- *flag = false;
- if (StartsWith(value, value_length, "1") ||
- StartsWith(value, value_length, "yes") ||
- StartsWith(value, value_length, "true"))
- *flag = true;
-}
-
-void ParseFlag(const char *env, int *flag,
- const char *name, const char *descr) {
- const char *value;
- int value_length;
- AddFlagDescription(name, descr);
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- *flag = static_cast<int>(internal_atoll(value));
-}
-
-void ParseFlag(const char *env, uptr *flag,
- const char *name, const char *descr) {
- const char *value;
- int value_length;
- AddFlagDescription(name, descr);
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- *flag = static_cast<uptr>(internal_atoll(value));
-}
-
-void ParseFlag(const char *env, const char **flag,
- const char *name, const char *descr) {
- const char *value;
- int value_length;
- AddFlagDescription(name, descr);
- if (!GetFlagValue(env, name, &value, &value_length))
- return;
- // Copy the flag value. Don't use locks here, as flags are parsed at
- // tool startup.
- char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
- internal_memcpy(value_copy, value, value_length);
- value_copy[value_length] = '\0';
- *flag = value_copy;
+ RegisterIncludeFlag(parser, cf);
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_flags.h b/lib/sanitizer_common/sanitizer_flags.h
index 4791397..fda6d71 100644
--- a/lib/sanitizer_common/sanitizer_flags.h
+++ b/lib/sanitizer_common/sanitizer_flags.h
@@ -18,62 +18,38 @@
namespace __sanitizer {
-void ParseFlag(const char *env, bool *flag,
- const char *name, const char *descr);
-void ParseFlag(const char *env, int *flag,
- const char *name, const char *descr);
-void ParseFlag(const char *env, uptr *flag,
- const char *name, const char *descr);
-void ParseFlag(const char *env, const char **flag,
- const char *name, const char *descr);
-
struct CommonFlags {
- bool symbolize;
- const char *external_symbolizer_path;
- bool allow_addr2line;
- const char *strip_path_prefix;
- bool fast_unwind_on_check;
- bool fast_unwind_on_fatal;
- bool fast_unwind_on_malloc;
- bool handle_ioctl;
- int malloc_context_size;
- const char *log_path;
- int verbosity;
- bool detect_leaks;
- bool leak_check_at_exit;
- bool allocator_may_return_null;
- bool print_summary;
- bool check_printf;
- bool handle_segv;
- bool allow_user_segv_handler;
- bool use_sigaltstack;
- bool detect_deadlocks;
- uptr clear_shadow_mmap_threshold;
- const char *color;
- bool legacy_pthread_cond;
- bool intercept_tls_get_addr;
- bool help;
- uptr mmap_limit_mb;
- bool coverage;
- bool coverage_direct;
- const char *coverage_dir;
- bool full_address_space;
- const char *suppressions;
- bool print_suppressions;
- bool disable_coredump;
- bool symbolize_inline_frames;
- const char *stack_trace_format;
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ void SetDefaults();
+ void CopyFrom(const CommonFlags &other);
};
-inline CommonFlags *common_flags() {
- extern CommonFlags common_flags_dont_use;
+// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
+extern CommonFlags common_flags_dont_use;
+inline const CommonFlags *common_flags() {
return &common_flags_dont_use;
}
-void SetCommonFlagsDefaults(CommonFlags *f);
-void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
-void PrintFlagDescriptions();
+inline void SetCommonFlagsDefaults() {
+ common_flags_dont_use.SetDefaults();
+}
+// This function can only be used to setup tool-specific overrides for
+// CommonFlags defaults. Generally, it should only be used right after
+// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
+// only during the flags initialization (i.e. before they are used for
+// the first time).
+inline void OverrideCommonFlags(const CommonFlags &cf) {
+ common_flags_dont_use.CopyFrom(cf);
+}
+
+class FlagParser;
+void RegisterCommonFlags(FlagParser *parser,
+ CommonFlags *cf = &common_flags_dont_use);
+void RegisterIncludeFlag(FlagParser *parser, CommonFlags *cf);
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H
diff --git a/lib/sanitizer_common/sanitizer_flags.inc b/lib/sanitizer_common/sanitizer_flags.inc
new file mode 100644
index 0000000..58f7f37
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_flags.inc
@@ -0,0 +1,148 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes common flags available in all sanitizers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef COMMON_FLAG
+#error "Define COMMON_FLAG prior to including this file!"
+#endif
+
+// COMMON_FLAG(Type, Name, DefaultValue, Description)
+// Supported types: bool, const char *, int, uptr.
+// Default value must be a compile-time constant.
+// Description must be a string literal.
+
+COMMON_FLAG(
+ bool, symbolize, true,
+ "If set, use the online symbolizer from common sanitizer runtime to turn "
+ "virtual addresses to file/line locations.")
+COMMON_FLAG(
+ const char *, external_symbolizer_path, 0,
+ "Path to external symbolizer. If empty, the tool will search $PATH for "
+ "the symbolizer.")
+COMMON_FLAG(
+ bool, allow_addr2line, false,
+ "If set, allows online symbolizer to run addr2line binary to symbolize "
+ "stack traces (addr2line will only be used if llvm-symbolizer binary is "
+ "unavailable.")
+COMMON_FLAG(const char *, strip_path_prefix, "",
+ "Strips this prefix from file paths in error reports.")
+COMMON_FLAG(bool, fast_unwind_on_check, false,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "internal CHECK failures.")
+COMMON_FLAG(bool, fast_unwind_on_fatal, false,
+ "If available, use the fast frame-pointer-based unwinder on fatal "
+ "errors.")
+COMMON_FLAG(bool, fast_unwind_on_malloc, true,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "malloc/free.")
+COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
+COMMON_FLAG(int, malloc_context_size, 1,
+ "Max number of stack frames kept for each allocation/deallocation.")
+COMMON_FLAG(
+ const char *, log_path, "stderr",
+ "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
+ "\"stderr\". The default is \"stderr\".")
+COMMON_FLAG(
+ int, verbosity, 0,
+ "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
+COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
+COMMON_FLAG(
+ bool, leak_check_at_exit, true,
+ "Invoke leak checking in an atexit handler. Has no effect if "
+ "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
+ "handler has a chance to run.")
+COMMON_FLAG(bool, allocator_may_return_null, false,
+ "If false, the allocator will crash instead of returning 0 on "
+ "out-of-memory.")
+COMMON_FLAG(bool, print_summary, true,
+ "If false, disable printing error summaries in addition to error "
+ "reports.")
+COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
+COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
+ "If set, registers the tool's custom SEGV handler (both SIGBUS and "
+ "SIGSEGV on OSX).")
+COMMON_FLAG(bool, allow_user_segv_handler, false,
+ "If set, allows user to register a SEGV handler even if the tool "
+ "registers one.")
+COMMON_FLAG(bool, use_sigaltstack, true,
+ "If set, uses alternate stack for signal handling.")
+COMMON_FLAG(bool, detect_deadlocks, false,
+ "If set, deadlock detection is enabled.")
+COMMON_FLAG(
+ uptr, clear_shadow_mmap_threshold, 64 * 1024,
+ "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
+ "memset(). This is the threshold size in bytes.")
+COMMON_FLAG(const char *, color, "auto",
+ "Colorize reports: (always|never|auto).")
+COMMON_FLAG(
+ bool, legacy_pthread_cond, false,
+ "Enables support for dynamic libraries linked with libpthread 2.2.5.")
+COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
+COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
+COMMON_FLAG(uptr, mmap_limit_mb, 0,
+ "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
+ "not a user-facing flag, used mosly for testing the tools")
+COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
+ "Hard RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS and aborts the process if the"
+ " limit is reached")
+COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
+ "Soft RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS. If the limit is reached"
+ " all subsequent malloc/new calls will fail or return NULL"
+ " (depending on the value of allocator_may_return_null)"
+ " until the RSS goes below the soft limit."
+ " This limit does not affect memory allocations other than"
+ " malloc/new.")
+COMMON_FLAG(bool, can_use_proc_maps_statm, true,
+ "If false, do not attempt to read /proc/maps/statm."
+ " Mostly useful for testing sanitizers.")
+COMMON_FLAG(
+ bool, coverage, false,
+ "If set, coverage information will be dumped at program shutdown (if the "
+ "coverage instrumentation was enabled at compile time).")
+// On by default, but works only if coverage == true.
+COMMON_FLAG(bool, coverage_pcs, true,
+ "If set (and if 'coverage' is set too), the coverage information "
+ "will be dumped as a set of PC offsets for every module.")
+COMMON_FLAG(bool, coverage_bitset, false,
+ "If set (and if 'coverage' is set too), the coverage information "
+ "will also be dumped as a bitset to a separate file.")
+COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
+ "If set, coverage information will be dumped directly to a memory "
+ "mapped file. This way data is not lost even if the process is "
+ "suddenly killed.")
+COMMON_FLAG(const char *, coverage_dir, ".",
+ "Target directory for coverage dumps. Defaults to the current "
+ "directory.")
+COMMON_FLAG(bool, full_address_space, false,
+ "Sanitize complete address space; "
+ "by default kernel area on 32-bit platforms will not be sanitized")
+COMMON_FLAG(bool, print_suppressions, true,
+ "Print matched suppressions at exit.")
+COMMON_FLAG(
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
+ "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
+ "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ "default and for sanitizers that don't reserve lots of virtual memory.")
+COMMON_FLAG(bool, use_madv_dontdump, true,
+ "If set, instructs kernel to not store the (huge) shadow "
+ "in core file.")
+COMMON_FLAG(bool, symbolize_inline_frames, true,
+ "Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
+ "Format string used to render stack frames. "
+ "See sanitizer_stacktrace_printer.h for the format description. "
+ "Use DEFAULT to get default format.")
+COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
+ "If true, the shadow is not allowed to use huge pages. ")
diff --git a/lib/sanitizer_common/sanitizer_interface_internal.h b/lib/sanitizer_common/sanitizer_interface_internal.h
new file mode 100644
index 0000000..94d9f4e
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -0,0 +1,58 @@
+//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+//
+// This header declares the sanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/common_interface_defs.h
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_INTERFACE_INTERNAL_H
+#define SANITIZER_INTERFACE_INTERNAL_H
+
+#include "sanitizer_internal_defs.h"
+
+extern "C" {
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ // The special values are "stdout" and "stderr".
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_path(const char *path);
+
+ typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Notify the tools that the sandbox is going to be turned on.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_report_error_summary(const char *error_summary);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_annotate_contiguous_container(const void *beg,
+ const void *end,
+ const void *old_mid,
+ const void *new_mid);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+} // extern "C"
+
+#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index d77ca8f..e7737bd 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -15,6 +15,10 @@
#include "sanitizer_platform.h"
+#ifndef SANITIZER_DEBUG
+# define SANITIZER_DEBUG 0
+#endif
+
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
@@ -28,7 +32,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if SANITIZER_LINUX && !defined(SANITIZER_GO)
+#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
@@ -81,8 +85,9 @@
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64.
-// Mac and Linux/x86-64 are special.
-#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__))
+// FreeBSD, Mac and Linux/x86-64 are special.
+#if SANITIZER_FREEBSD || SANITIZER_MAC || \
+ (SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
@@ -96,41 +101,6 @@
#endif
} // namespace __sanitizer
-extern "C" {
- // Tell the tools to write their reports to "path.<pid>" instead of stderr.
- // The special values are "stdout" and "stderr".
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_set_report_path(const char *path);
-
- typedef struct {
- int coverage_sandboxed;
- __sanitizer::sptr coverage_fd;
- unsigned int coverage_max_block_size;
- } __sanitizer_sandbox_arguments;
-
- // Notify the tools that the sandbox is going to be turned on.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
-
- // This function is called by the tool when it has just finished reporting
- // an error. 'error_summary' is a one-line string that summarizes
- // the error message. This function can be overridden by the client.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_report_error_summary(const char *error_summary);
-
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov();
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_annotate_contiguous_container(const void *beg,
- const void *end,
- const void *old_mid,
- const void *new_mid);
- SANITIZER_INTERFACE_ATTRIBUTE
- int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
- const void *end);
-} // extern "C"
-
using namespace __sanitizer; // NOLINT
// ----------- ATTENTION -------------
@@ -240,7 +210,7 @@
#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
#define DCHECK(a) CHECK(a)
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
#define DCHECK_NE(a, b) CHECK_NE(a, b)
@@ -320,4 +290,12 @@
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
}
+// Forces the compiler to generate a frame pointer in the function.
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ (void)enable_fp; \
+ } while (0)
+
#endif // SANITIZER_DEFS_H
diff --git a/lib/sanitizer_common/sanitizer_libc.cc b/lib/sanitizer_common/sanitizer_libc.cc
index 8d8ad59..cb162a4 100644
--- a/lib/sanitizer_common/sanitizer_libc.cc
+++ b/lib/sanitizer_common/sanitizer_libc.cc
@@ -16,16 +16,6 @@
namespace __sanitizer {
-// Make the compiler think that something is going on there.
-static inline void break_optimization(void *arg) {
-#if _MSC_VER
- // FIXME: make sure this is actually enough.
- __asm;
-#else
- __asm__ __volatile__("" : : "r" (arg) : "memory");
-#endif
-}
-
s64 internal_atoll(const char *nptr) {
return internal_simple_strtoll(nptr, (char**)0, 10);
}
@@ -38,6 +28,15 @@
return 0;
}
+void *internal_memrchr(const void *s, int c, uptr n) {
+ const char *t = (const char *)s;
+ void *res = nullptr;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
+ }
+ return res;
+}
+
int internal_memcmp(const void* s1, const void* s2, uptr n) {
const char *t1 = (const char *)s1;
const char *t2 = (const char *)s2;
@@ -78,7 +77,7 @@
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
p->a = p->b = 0;
- break_optimization(0); // Make sure this does not become memset.
+ SanitizerBreakOptimization(0); // Make sure this does not become memset.
}
}
@@ -111,6 +110,14 @@
return s2;
}
+char* internal_strndup(const char *s, uptr n) {
+ uptr len = internal_strnlen(s, n);
+ char *s2 = (char*)InternalAlloc(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
int internal_strcmp(const char *s1, const char *s2) {
while (true) {
unsigned c1 = *s1;
diff --git a/lib/sanitizer_common/sanitizer_libc.h b/lib/sanitizer_common/sanitizer_libc.h
index 6995626..c086b8a 100644
--- a/lib/sanitizer_common/sanitizer_libc.h
+++ b/lib/sanitizer_common/sanitizer_libc.h
@@ -26,6 +26,7 @@
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
+void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n);
@@ -38,6 +39,7 @@
int internal_strcmp(const char *s1, const char *s2);
uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s);
+char *internal_strndup(const char *s, uptr n);
uptr internal_strlen(const char *s);
char *internal_strncat(char *dst, const char *src, uptr n);
int internal_strncmp(const char *s1, const char *s2, uptr n);
@@ -98,6 +100,25 @@
// Threading
uptr internal_sched_yield();
+// These functions call appropriate pthread_ functions directly, bypassing
+// the interceptor. They are weak and may not be present in some tools.
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_create(void *th, void *attr, void *(*callback)(void *),
+ void *param);
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_join(void *th, void **ret);
+
+#define DEFINE_REAL_PTHREAD_FUNCTIONS \
+ namespace __sanitizer { \
+ int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \
+ void *param) { \
+ return REAL(pthread_create)(th, attr, callback, param); \
+ } \
+ int real_pthread_join(void *th, void **ret) { \
+ return REAL(pthread_join(th, ret)); \
+ } \
+ } // namespace __sanitizer
+
// Error handling
bool internal_iserror(uptr retval, int *rverrno = 0);
diff --git a/lib/sanitizer_common/sanitizer_libignore.cc b/lib/sanitizer_common/sanitizer_libignore.cc
index 44e4529..cefb1dc 100644
--- a/lib/sanitizer_common/sanitizer_libignore.cc
+++ b/lib/sanitizer_common/sanitizer_libignore.cc
@@ -19,32 +19,26 @@
LibIgnore::LibIgnore(LinkerInitialized) {
}
-void LibIgnore::Init(const SuppressionContext &supp) {
+void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
BlockingMutexLock lock(&mutex_);
- CHECK_EQ(count_, 0);
- const uptr n = supp.SuppressionCount();
- for (uptr i = 0; i < n; i++) {
- const Suppression *s = supp.SuppressionAt(i);
- if (s->type != SuppressionLib)
- continue;
- if (count_ >= kMaxLibs) {
- Report("%s: too many called_from_lib suppressions (max: %d)\n",
- SanitizerToolName, kMaxLibs);
- Die();
- }
- Lib *lib = &libs_[count_++];
- lib->templ = internal_strdup(s->templ);
- lib->name = 0;
- lib->loaded = false;
+ if (count_ >= kMaxLibs) {
+ Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+ kMaxLibs);
+ Die();
}
+ Lib *lib = &libs_[count_++];
+ lib->templ = internal_strdup(name_templ);
+ lib->name = nullptr;
+ lib->real_name = nullptr;
+ lib->loaded = false;
}
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target.
- InternalScopedBuffer<char> buf(4096);
+ InternalScopedString buf(kMaxPathLength);
if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
- buf.data()[0]) {
+ buf[0]) {
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
if (!lib->loaded && lib->real_name == 0 &&
@@ -55,7 +49,7 @@
// Scan suppressions list and find newly loaded and unloaded libraries.
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedBuffer<char> module(4096);
+ InternalScopedString module(kMaxPathLength);
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
diff --git a/lib/sanitizer_common/sanitizer_libignore.h b/lib/sanitizer_common/sanitizer_libignore.h
index 8e1d584..cd56c36 100644
--- a/lib/sanitizer_common/sanitizer_libignore.h
+++ b/lib/sanitizer_common/sanitizer_libignore.h
@@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
//
// LibIgnore allows to ignore all interceptors called from a particular set
-// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions
-// from the provided SuppressionContext; finds code ranges for the libraries;
+// of dynamic libraries. LibIgnore can be initialized with several templates
+// of names of libraries to be ignored. It finds code ranges for the libraries;
// and checks whether the provided PC value belongs to the code ranges.
//
//===----------------------------------------------------------------------===//
@@ -19,7 +19,6 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
-#include "sanitizer_suppressions.h"
#include "sanitizer_atomic.h"
#include "sanitizer_mutex.h"
@@ -29,8 +28,8 @@
public:
explicit LibIgnore(LinkerInitialized);
- // Fetches all "called_from_lib" suppressions from the SuppressionContext.
- void Init(const SuppressionContext &supp);
+ // Must be called during initialization.
+ void AddIgnoredLibrary(const char *name_templ);
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index acae5bb..8029181 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -15,6 +15,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
@@ -30,6 +31,17 @@
#include <asm/param.h>
#endif
+// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
+// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
+// access stat from asm/stat.h, without conflicting with definition in
+// sys/stat.h, we use this trick.
+#if defined(__mips64)
+#include <sys/types.h>
+#define stat kernel_stat
+#include <asm/stat.h>
+#undef stat
+#endif
+
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
@@ -97,14 +109,16 @@
#endif
// --------------- sanitizer_libc.h
-uptr internal_mmap(void *addr, uptr length, int prot, int flags,
- int fd, u64 offset) {
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ u64 offset) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
+ // mmap2 specifies file offset in 4096-byte units.
+ CHECK(IsAligned(offset, 4096));
return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
- offset);
+ offset / 4096);
#endif
}
@@ -178,6 +192,26 @@
}
#endif
+#if defined(__mips64)
+static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+ out->st_atime = in->st_atime_nsec;
+ out->st_mtime = in->st_mtime_nsec;
+ out->st_ctime = in->st_ctime_nsec;
+ out->st_ino = in->st_ino;
+}
+#endif
+
uptr internal_stat(const char *path, void *buf) {
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(stat), path, buf);
@@ -185,7 +219,15 @@
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, 0);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if defined(__mips64)
+ // For mips64, stat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(stat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
+# endif
#else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
@@ -283,17 +325,15 @@
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
+ struct stat st;
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
- struct stat st;
if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
- return false;
#else
- struct stat st;
if (internal_stat(filename, &st))
+#endif
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
-#endif
}
uptr GetTid() {
@@ -409,32 +449,18 @@
Die();
}
-// Stub implementation of GetThreadStackAndTls for Go.
-#if SANITIZER_GO
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
- *stk_addr = 0;
- *stk_size = 0;
- *tls_addr = 0;
- *tls_size = 0;
-}
-#endif // SANITIZER_GO
-
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
MtxSleeping = 2
};
-BlockingMutex::BlockingMutex(LinkerInitialized) {
- CHECK_EQ(owner_, 0);
-}
-
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
@@ -734,6 +760,7 @@
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+ CHECK_NE(map, nullptr);
#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
@@ -833,6 +860,13 @@
: "rsp", "memory", "r11", "rcx");
return res;
}
+#elif defined(__mips__)
+// TODO(sagarthakur): clone function is to be rewritten in assembly.
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ return clone(fn, child_stack, flags, arg, parent_tidptr,
+ newtls, child_tidptr);
+}
#endif // defined(__x86_64__) && SANITIZER_LINUX
#if SANITIZER_ANDROID
@@ -870,9 +904,30 @@
#endif
bool IsDeadlySignal(int signum) {
- return (signum == SIGSEGV) && common_flags()->handle_segv;
+ return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
+#ifndef SANITIZER_GO
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ void *th;
+ real_pthread_create(&th, 0, (void*(*)(void *arg))func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ real_pthread_join(th, 0);
+}
+#else
+void *internal_start_thread(void (*func)(void *), void *arg) { return 0; }
+
+void internal_join_thread(void *th) {}
+#endif
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_linux.h b/lib/sanitizer_common/sanitizer_linux.h
index 3013c25..b2e603d 100644
--- a/lib/sanitizer_common/sanitizer_linux.h
+++ b/lib/sanitizer_common/sanitizer_linux.h
@@ -43,7 +43,7 @@
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
diff --git a/lib/sanitizer_common/sanitizer_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
index 2ca55b4..c71b625 100644
--- a/lib/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -58,8 +58,10 @@
} // extern "C"
static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
- if (real_pthread_attr_getstack)
+#if !SANITIZER_GO
+ if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
+#endif
return pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
}
@@ -67,8 +69,10 @@
real_sigaction(int signum, const void *act, void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
- if (real_sigaction)
+#if !SANITIZER_GO
+ if (&real_sigaction)
return real_sigaction(signum, act, oldact);
+#endif
return sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
}
@@ -120,6 +124,7 @@
*stack_bottom = (uptr)stackaddr;
}
+#if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (f == 0)
@@ -130,6 +135,7 @@
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
+#endif
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
@@ -162,8 +168,22 @@
# define DL_INTERNAL_FUNCTION
#endif
+#if defined(__mips__)
+// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
+// head structure. It lies before the static tls blocks.
+static uptr TlsPreTcbSize() {
+ const uptr kTcbHead = 16;
+ const uptr kTlsAlign = 16;
+ const uptr kTlsPreTcbSize =
+ (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
+ InitTlsSize();
+ g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
+ return kTlsPreTcbSize;
+}
+#endif
+
void InitTlsSize() {
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
@@ -178,7 +198,8 @@
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID
}
-#if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
+#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__)) \
+ && SANITIZER_LINUX
// sizeof(struct thread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
@@ -186,6 +207,7 @@
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
+#if defined(__x86_64__) || defined(__i386__)
#ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
@@ -208,6 +230,8 @@
val = FIRST_32_SECOND_64(1168, 1776);
else if (minor <= 12)
val = FIRST_32_SECOND_64(1168, 2288);
+ else if (minor == 13)
+ val = FIRST_32_SECOND_64(1168, 2304);
else
val = FIRST_32_SECOND_64(1216, 2304);
}
@@ -216,6 +240,13 @@
return val;
}
#endif
+#elif defined(__mips__)
+ // TODO(sagarthakur): add more values as per different glibc versions.
+ val = FIRST_32_SECOND_64(1152, 1776);
+ if (val)
+ atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
+ return val;
+#endif
return 0;
}
@@ -232,12 +263,24 @@
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
+# elif defined(__mips__)
+ // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
+ // points to the end of the TCB + 0x7000. The pthread_descr structure is
+ // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
+ // TCB and the size of pthread_descr.
+ const uptr kTlsTcbOffset = 0x7000;
+ uptr thread_pointer;
+ asm volatile(".set push;\
+ .set mips64r2;\
+ rdhwr %0,$29;\
+ .set pop" : "=r" (thread_pointer));
+ descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
# else
# error "unsupported CPU arch"
# endif
return descr_addr;
}
-#endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
+#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
@@ -259,6 +302,7 @@
}
#endif // SANITIZER_FREEBSD
+#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX
# if defined(__x86_64__) || defined(__i386__)
@@ -266,6 +310,9 @@
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
+# elif defined(__mips__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
# else
*addr = 0;
*size = 0;
@@ -287,7 +334,9 @@
# error "Unknown OS"
#endif
}
+#endif
+#if !SANITIZER_GO
uptr GetTlsSize() {
#if SANITIZER_FREEBSD
uptr addr, size;
@@ -297,9 +346,14 @@
return g_tls_size;
#endif
}
+#endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
+#if SANITIZER_GO
+ // Stub implementation for Go.
+ *stk_addr = *stk_size = *tls_addr = *tls_size = 0;
+#else
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
@@ -316,6 +370,7 @@
*tls_addr = *stk_addr + *stk_size;
}
}
+#endif
}
void AdjustStackSize(void *attr_) {
@@ -370,16 +425,15 @@
DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
if (data->current_n == data->max_n)
return 0;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
- module_name.data()[0] = '\0';
+ InternalScopedString module_name(kMaxPathLength);
if (data->first) {
data->first = false;
// First module is the binary itself.
ReadBinaryName(module_name.data(), module_name.size());
} else if (info->dlpi_name) {
- internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
+ module_name.append("%s", info->dlpi_name);
}
- if (module_name.data()[0] == '\0')
+ if (module_name[0] == '\0')
return 0;
if (data->filter && !data->filter(module_name.data()))
return 0;
@@ -421,6 +475,45 @@
#endif
}
+// getrusage does not give us the current RSS, only the max RSS.
+// Still, this is better than nothing if /proc/self/statm is not available
+// for some reason, e.g. due to a sandbox.
+static uptr GetRSSFromGetrusage() {
+ struct rusage usage;
+ if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox.
+ return 0;
+ return usage.ru_maxrss << 10; // ru_maxrss is in Kb.
+}
+
+uptr GetRSS() {
+ if (!common_flags()->can_use_proc_maps_statm)
+ return GetRSSFromGetrusage();
+ uptr fd = OpenFile("/proc/self/statm", false);
+ if ((sptr)fd < 0)
+ return GetRSSFromGetrusage();
+ char buf[64];
+ uptr len = internal_read(fd, buf, sizeof(buf) - 1);
+ internal_close(fd);
+ if ((sptr)len <= 0)
+ return 0;
+ buf[len] = 0;
+ // The format of the file is:
+ // 1084 89 69 11 0 79 0
+ // We need the second number which is RSS in pages.
+ char *pos = buf;
+ // Skip the first number.
+ while (*pos >= '0' && *pos <= '9')
+ pos++;
+ // Skip whitespaces.
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
+ pos++;
+ // Read the number.
+ uptr rss = 0;
+ while (*pos >= '0' && *pos <= '9')
+ rss = rss * 10 + *pos++ - '0';
+ return rss * GetPageSizeCached();
+}
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/lib/sanitizer_common/sanitizer_list.h b/lib/sanitizer_common/sanitizer_list.h
index a47bc7d..6dd9c8f 100644
--- a/lib/sanitizer_common/sanitizer_list.h
+++ b/lib/sanitizer_common/sanitizer_list.h
@@ -115,21 +115,25 @@
}
}
- class Iterator {
+ template<class ListTy, class ItemTy>
+ class IteratorBase {
public:
- explicit Iterator(IntrusiveList<Item> *list)
+ explicit IteratorBase(ListTy *list)
: list_(list), current_(list->first_) { }
- Item *next() {
- Item *ret = current_;
+ ItemTy *next() {
+ ItemTy *ret = current_;
if (current_) current_ = current_->next;
return ret;
}
bool hasNext() const { return current_ != 0; }
private:
- IntrusiveList<Item> *list_;
- Item *current_;
+ ListTy *list_;
+ ItemTy *current_;
};
+ typedef IteratorBase<IntrusiveList<Item>, Item> Iterator;
+ typedef IteratorBase<const IntrusiveList<Item>, const Item> ConstIterator;
+
// private, don't use directly.
uptr size_;
Item *first_;
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index 1b77087..39a5c7e 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -109,6 +109,10 @@
return readlink(path, buf, bufsize);
}
+uptr internal_unlink(const char *path) {
+ return unlink(path);
+}
+
uptr internal_sched_yield() {
return sched_yield();
}
@@ -213,10 +217,6 @@
return sysconf(_SC_PAGESIZE);
}
-BlockingMutex::BlockingMutex(LinkerInitialized) {
- // We assume that OS_SPINLOCK_INIT is zero
-}
-
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
@@ -298,7 +298,11 @@
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
case '4': return MACOS_VERSION_YOSEMITE;
- default: return MACOS_VERSION_UNKNOWN;
+ default:
+ if (IsDigit(version[1]))
+ return MACOS_VERSION_UNKNOWN_NEWER;
+ else
+ return MACOS_VERSION_UNKNOWN;
}
}
default: return MACOS_VERSION_UNKNOWN;
@@ -317,6 +321,13 @@
return result;
}
+uptr GetRSS() {
+ return 0;
+}
+
+void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) { }
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/lib/sanitizer_common/sanitizer_mac.h b/lib/sanitizer_common/sanitizer_mac.h
index 3ed0ed3..9eed905 100644
--- a/lib/sanitizer_common/sanitizer_mac.h
+++ b/lib/sanitizer_common/sanitizer_mac.h
@@ -27,6 +27,7 @@
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS,
MACOS_VERSION_YOSEMITE,
+ MACOS_VERSION_UNKNOWN_NEWER
};
MacosVersion GetMacosVersion();
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h
index c7589f7..d06fc45 100644
--- a/lib/sanitizer_common/sanitizer_mutex.h
+++ b/lib/sanitizer_common/sanitizer_mutex.h
@@ -73,7 +73,13 @@
class BlockingMutex {
public:
+#if SANITIZER_WINDOWS
+ // Windows does not currently support LinkerInitialized
explicit BlockingMutex(LinkerInitialized);
+#else
+ explicit constexpr BlockingMutex(LinkerInitialized)
+ : opaque_storage_ {0, }, owner_(0) {}
+#endif
BlockingMutex();
void Lock();
void Unlock();
diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h
index 6f8cd30..fef5a5b 100644
--- a/lib/sanitizer_common/sanitizer_platform.h
+++ b/lib/sanitizer_common/sanitizer_platform.h
@@ -111,6 +111,19 @@
# endif
#endif
+// udi16 syscalls can only be used when the following conditions are
+// met:
+// * target is one of arm32, x86-32, sparc32, sh or m68k
+// * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15
+// built against > linux-2.2 kernel headers
+// Since we don't want to include libc headers here, we check the
+// target only.
+#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
+#define SANITIZER_USES_UID16_SYSCALLS 1
+#else
+#define SANITIZER_USES_UID16_SYSCALLS 0
+#endif
+
#ifdef __mips__
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
#else
diff --git a/lib/sanitizer_common/sanitizer_platform_interceptors.h b/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 95c2e9d..438ecba 100644
--- a/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -57,7 +57,7 @@
#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MEMCHR 1
-#define SANITIZER_INTERCEPT_MEMRCHR SI_LINUX
+#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
@@ -70,7 +70,7 @@
#define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_PREADV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
@@ -85,6 +85,7 @@
#ifndef SANITIZER_INTERCEPT_PRINTF
# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
+# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD
# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
#endif
@@ -93,12 +94,13 @@
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
- SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
@@ -109,10 +111,10 @@
#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
-#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
@@ -125,7 +127,7 @@
#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
- (defined(__i386) || defined (__x86_64)) // NOLINT
+ (defined(__i386) || defined (__x86_64) || defined (__mips64)) // NOLINT
#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
@@ -133,12 +135,15 @@
#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSNRTOMBS \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_CONFSTR \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
@@ -147,7 +152,8 @@
#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_WORDEXP (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WORDEXP \
+ SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
@@ -158,21 +164,22 @@
#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STATFS SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_ETHER_HOST SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ETHER_HOST \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SHMCTL \
- (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
+ ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64)
#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
- SI_MAC || SI_LINUX_NOT_ANDROID
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
@@ -193,7 +200,7 @@
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
+#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID
@@ -209,7 +216,8 @@
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
+ SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
index fc09522..2839e92 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -97,7 +97,6 @@
# include <sys/link_elf.h>
# include <netinet/ip_mroute.h>
# include <netinet/in.h>
-# include <netinet/ip_compat.h>
# include <net/ethernet.h>
# include <net/ppp_defs.h>
# include <glob.h>
@@ -117,6 +116,9 @@
#if SANITIZER_LINUX || SANITIZER_FREEBSD
# include <utime.h>
# include <sys/ptrace.h>
+# if defined(__mips64)
+# include <asm/ptrace.h>
+# endif
#endif
#if !SANITIZER_ANDROID
@@ -140,6 +142,9 @@
#include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h>
+#if defined(__mips64)
+# include <sys/procfs.h>
+#endif
#include <sys/user.h>
#include <sys/ustat.h>
#include <linux/cyclades.h>
@@ -284,14 +289,19 @@
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64))
+ (defined(__i386) || defined(__x86_64) || defined(__mips64))
+#if defined(__mips64)
+ unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
+#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
-#ifdef __x86_64
+#endif // __mips64
+#if (defined(__x86_64) || defined(__mips64))
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
-#endif
+#endif // __x86_64 || __mips64
int ptrace_peektext = PTRACE_PEEKTEXT;
int ptrace_peekdata = PTRACE_PEEKDATA;
@@ -1005,8 +1015,12 @@
CHECK_TYPE_SIZE(__kernel_uid_t);
CHECK_TYPE_SIZE(__kernel_gid_t);
+
+#if SANITIZER_USES_UID16_SYSCALLS
CHECK_TYPE_SIZE(__kernel_old_uid_t);
CHECK_TYPE_SIZE(__kernel_old_gid_t);
+#endif
+
CHECK_TYPE_SIZE(__kernel_off_t);
CHECK_TYPE_SIZE(__kernel_loff_t);
CHECK_TYPE_SIZE(__kernel_fd_set);
@@ -1057,7 +1071,13 @@
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+#if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)
+/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+#endif
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
diff --git a/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 80a3ddb..bd20bea 100644
--- a/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -18,6 +18,15 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD
+// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
+// incroporates the map structure.
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
+#else
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
+#endif // !SANITIZER_FREEBSD
+
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
@@ -169,7 +178,7 @@
unsigned __seq;
u64 __unused1;
u64 __unused2;
-#elif defined(__mips__)
+#elif defined(__mips__) || defined(__aarch64__)
unsigned int mode;
unsigned short __seq;
unsigned short __pad1;
@@ -538,6 +547,10 @@
#if SANITIZER_FREEBSD
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+#elif defined(__mips__)
+ struct __sanitizer_kernel_sigset_t {
+ u8 sig[16];
+ };
#else
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
@@ -686,7 +699,7 @@
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64))
+ (defined(__i386) || defined(__x86_64) || defined(__mips64))
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
index eb2497b..5bc41c2 100644
--- a/lib/sanitizer_common/sanitizer_posix.cc
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -30,6 +30,13 @@
#include <sys/personality.h>
#endif
+#if SANITIZER_FREEBSD
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+
namespace __sanitizer {
// ------------- sanitizer_common.h
@@ -78,16 +85,15 @@
uptr GetMaxVirtualAddress() {
#if SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__)
+# if defined(__powerpc64__) || defined(__aarch64__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
// of the address space, so simply checking the stack address is not enough.
// This should (does) work for both PowerPC64 Endian modes.
+ // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
-# elif defined(__aarch64__)
- return (1ULL << 39) - 1;
# elif defined(__mips64)
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# else
@@ -238,7 +244,8 @@
while (proc_maps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
/*protection*/0)) {
- if (!IntervalsAreSeparate(start, end, range_start, range_end))
+ CHECK_NE(0, end);
+ if (!IntervalsAreSeparate(start, end - 1, range_start, range_end))
return false;
}
return true;
@@ -286,45 +293,20 @@
return 0;
}
-void MaybeOpenReportFile() {
- if (!log_to_file) return;
- uptr pid = internal_getpid();
- // If in tracer, use the parent's file.
- if (pid == stoptheworld_tracer_pid)
- pid = stoptheworld_tracer_ppid;
- if (report_fd_pid == pid) return;
- InternalScopedBuffer<char> report_path_full(4096);
- internal_snprintf(report_path_full.data(), report_path_full.size(),
- "%s.%zu", report_path_prefix, pid);
- uptr openrv = OpenFile(report_path_full.data(), true);
- if (internal_iserror(openrv)) {
- report_fd = kStderrFd;
- log_to_file = false;
- Report("ERROR: Can't open file: %s\n", report_path_full.data());
- Die();
- }
- if (report_fd != kInvalidFd) {
- // We're in the child. Close the parent's log.
- internal_close(report_fd);
- }
- report_fd = openrv;
- report_fd_pid = pid;
-}
-
-void RawWrite(const char *buffer) {
- static const char *kRawWriteError =
- "RawWrite can't output requested buffer!\n";
- uptr length = (uptr)internal_strlen(buffer);
- MaybeOpenReportFile();
- if (length != internal_write(report_fd, buffer, length)) {
- internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError));
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ static const char *kWriteError =
+ "ReportFile::Write() can't output requested buffer!\n";
+ ReopenIfNecessary();
+ if (length != internal_write(fd, buffer, length)) {
+ internal_write(fd, kWriteError, internal_strlen(kWriteError));
Die();
}
}
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
uptr s, e, off, prot;
- InternalScopedString buff(4096);
+ InternalScopedString buff(kMaxPathLength);
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) {
if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index ed1e372..11828e6 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -44,6 +44,18 @@
madvise((void*)addr, size, MADV_DONTNEED);
}
+void NoHugePagesInRegion(uptr addr, uptr size) {
+#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
+ madvise((void *)addr, size, MADV_NOHUGEPAGE);
+#endif // MADV_NOHUGEPAGE
+}
+
+void DontDumpShadowMemory(uptr addr, uptr length) {
+#ifdef MADV_DONTDUMP
+ madvise((void *)addr, length, MADV_DONTDUMP);
+#endif
+}
+
static rlim_t getlim(int res) {
rlimit rlim;
CHECK_EQ(0, getrlimit(res, &rlim));
diff --git a/lib/sanitizer_common/sanitizer_procmaps_common.cc b/lib/sanitizer_common/sanitizer_procmaps_common.cc
index 3b1a311..2ec08d7 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_common.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_common.cc
@@ -119,7 +119,7 @@
string_predicate_t filter) {
Reset();
uptr cur_beg, cur_end, cur_offset, prot;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
+ InternalScopedString module_name(kMaxPathLength);
uptr n_modules = 0;
for (uptr i = 0; n_modules < max_modules &&
Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
diff --git a/lib/sanitizer_common/sanitizer_procmaps_mac.cc b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
index 074b91a..c0a8614 100644
--- a/lib/sanitizer_common/sanitizer_procmaps_mac.cc
+++ b/lib/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -160,7 +160,7 @@
string_predicate_t filter) {
Reset();
uptr cur_beg, cur_end, prot;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
+ InternalScopedString module_name(kMaxPathLength);
uptr n_modules = 0;
for (uptr i = 0; n_modules < max_modules &&
Next(&cur_beg, &cur_end, 0, module_name.data(),
diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h
index db4eb74..404d375 100644
--- a/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/lib/sanitizer_common/sanitizer_quarantine.h
@@ -49,11 +49,14 @@
}
void Init(uptr size, uptr cache_size) {
- max_size_ = size;
- min_size_ = size / 10 * 9; // 90% of max size.
+ atomic_store(&max_size_, size, memory_order_release);
+ atomic_store(&min_size_, size / 10 * 9,
+ memory_order_release); // 90% of max size.
max_cache_size_ = cache_size;
}
+ uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
+
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
if (c->Size() > max_cache_size_)
@@ -65,15 +68,15 @@
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
- if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
+ if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
Recycle(cb);
}
private:
// Read-only data.
char pad0_[kCacheLineSize];
- uptr max_size_;
- uptr min_size_;
+ atomic_uintptr_t max_size_;
+ atomic_uintptr_t min_size_;
uptr max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
@@ -83,9 +86,10 @@
void NOINLINE Recycle(Callback cb) {
Cache tmp;
+ uptr min_size = atomic_load(&min_size_, memory_order_acquire);
{
SpinMutexLock l(&cache_mutex_);
- while (cache_.Size() > min_size_) {
+ while (cache_.Size() > min_size) {
QuarantineBatch *b = cache_.DequeueBatch();
tmp.EnqueueBatch(b);
}
@@ -130,6 +134,7 @@
size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
}
QuarantineBatch *b = list_.back();
+ CHECK(b);
b->batch[b->count++] = ptr;
b->size += size;
SizeAdd(size);
@@ -168,6 +173,7 @@
NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ CHECK(b);
b->count = 0;
b->size = 0;
list_.push_back(b);
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc
index f10f1f9..59b53f4 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.cc
+++ b/lib/sanitizer_common/sanitizer_stackdepot.cc
@@ -22,7 +22,8 @@
StackDepotNode *link;
u32 id;
atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
- uptr size;
+ u32 size;
+ u32 tag;
uptr stack[1]; // [size]
static const u32 kTabSizeLog = 20;
@@ -37,7 +38,8 @@
bool eq(u32 hash, const args_type &args) const {
u32 hash_bits =
atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
- if ((hash & kHashMask) != hash_bits || args.size != size) return false;
+ if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
+ return false;
uptr i = 0;
for (; i < size; i++) {
if (stack[i] != args.trace[i]) return false;
@@ -72,10 +74,11 @@
void store(const args_type &args, u32 hash) {
atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
size = args.size;
+ tag = args.tag;
internal_memcpy(stack, args.trace, size * sizeof(uptr));
}
args_type load() const {
- return args_type(&stack[0], size);
+ return args_type(&stack[0], size, tag);
}
StackDepotHandle get_handle() { return StackDepotHandle(this); }
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.cc b/lib/sanitizer_common/sanitizer_stacktrace.cc
index cf061fb..2deadb6 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace.cc
@@ -17,21 +17,6 @@
namespace __sanitizer {
-uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
-#if defined(__arm__)
- // Cancel Thumb bit.
- pc = pc & (~1);
-#endif
-#if defined(__powerpc__) || defined(__powerpc64__)
- // PCs are always 4 byte aligned.
- return pc - 4;
-#elif defined(__sparc__) || defined(__mips__)
- return pc - 8;
-#else
- return pc - 1;
-#endif
-}
-
uptr StackTrace::GetNextInstructionPc(uptr pc) {
#if defined(__mips__)
return pc + 8;
@@ -83,7 +68,7 @@
}
void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
- uptr stack_bottom, uptr max_depth) {
+ uptr stack_bottom, u32 max_depth) {
CHECK_GE(max_depth, 2);
trace_buffer[0] = pc;
size = 1;
@@ -120,7 +105,7 @@
uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 288;
+ const int kPcThreshold = 304;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;
diff --git a/lib/sanitizer_common/sanitizer_stacktrace.h b/lib/sanitizer_common/sanitizer_stacktrace.h
index e755c05..6c3a151 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -17,7 +17,7 @@
namespace __sanitizer {
-static const uptr kStackTraceMax = 256;
+static const u32 kStackTraceMax = 256;
#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__powerpc__) || \
defined(__powerpc64__) || defined(__sparc__) || \
@@ -40,10 +40,18 @@
struct StackTrace {
const uptr *trace;
- uptr size;
+ u32 size;
+ u32 tag;
- StackTrace() : trace(nullptr), size(0) {}
- StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {}
+ static const int TAG_UNKNOWN = 0;
+ static const int TAG_ALLOC = 1;
+ static const int TAG_DEALLOC = 2;
+ static const int TAG_CUSTOM = 100; // Tool specific tags start here.
+
+ StackTrace() : trace(nullptr), size(0), tag(0) {}
+ StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
+ StackTrace(const uptr *trace, u32 size, u32 tag)
+ : trace(trace), size(size), tag(tag) {}
// Prints a symbolized stacktrace, followed by an empty line.
void Print() const;
@@ -57,12 +65,29 @@
}
static uptr GetCurrentPc();
- static uptr GetPreviousInstructionPc(uptr pc);
+ static inline uptr GetPreviousInstructionPc(uptr pc);
static uptr GetNextInstructionPc(uptr pc);
typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
int out_size);
};
+// Performance-critical, must be in the header.
+ALWAYS_INLINE
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
+#if defined(__arm__)
+ // Cancel Thumb bit.
+ pc = pc & (~1);
+#endif
+#if defined(__powerpc__) || defined(__powerpc64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__) || defined(__mips__)
+ return pc - 8;
+#else
+ return pc - 1;
+#endif
+}
+
// StackTrace that owns the buffer used to store the addresses.
struct BufferedStackTrace : public StackTrace {
uptr trace_buffer[kStackTraceMax];
@@ -71,15 +96,15 @@
BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
- void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
+ void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
- uptr max_depth);
- void SlowUnwindStack(uptr pc, uptr max_depth);
+ u32 max_depth);
+ void SlowUnwindStack(uptr pc, u32 max_depth);
void SlowUnwindStackWithContext(uptr pc, void *context,
- uptr max_depth);
+ u32 max_depth);
void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc);
diff --git a/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index 13fb01f..0f98c7d 100644
--- a/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -24,36 +24,27 @@
Printf(" <empty stack>\n\n");
return;
}
- const int kMaxAddrFrames = 64;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++)
- new(&addr_frames[i]) AddressInfo();
InternalScopedString frame_desc(GetPageSizeCached() * 2);
uptr frame_num = 0;
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
- uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
- pc, addr_frames.data(), kMaxAddrFrames);
- if (addr_frames_num == 0) {
- addr_frames[0].address = pc;
- addr_frames_num = 1;
- }
- for (uptr j = 0; j < addr_frames_num; j++) {
- AddressInfo &info = addr_frames[j];
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ CHECK(frames);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
- info, common_flags()->strip_path_prefix);
+ cur->info, common_flags()->strip_path_prefix);
Printf("%s\n", frame_desc.data());
- info.Clear();
}
+ frames->ClearAll();
}
// Always print a trailing empty line after stack trace.
Printf("\n");
}
-void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context,
+void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
uptr stack_top, uptr stack_bottom,
bool request_fast_unwind) {
top_frame_bp = (max_depth > 0) ? bp : 0;
diff --git a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index d20b524..ad20e39 100644
--- a/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -14,7 +14,7 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__))
#include "sanitizer_stoptheworld.h"
@@ -89,36 +89,50 @@
bool SuspendThread(SuspendedThreadID thread_id);
};
-bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
+bool ThreadSuspender::SuspendThread(SuspendedThreadID tid) {
// Are we already attached to this thread?
// Currently this check takes linear time, however the number of threads is
// usually small.
- if (suspended_threads_list_.Contains(thread_id))
+ if (suspended_threads_list_.Contains(tid))
return false;
int pterrno;
- if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL),
+ if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, NULL, NULL),
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
- VReport(1, "Could not attach to thread %d (errno %d).\n", thread_id,
- pterrno);
+ VReport(1, "Could not attach to thread %d (errno %d).\n", tid, pterrno);
return false;
} else {
- VReport(1, "Attached to thread %d.\n", thread_id);
+ VReport(1, "Attached to thread %d.\n", tid);
// The thread is not guaranteed to stop before ptrace returns, so we must
- // wait on it.
- uptr waitpid_status;
- HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL));
- int wperrno;
- if (internal_iserror(waitpid_status, &wperrno)) {
- // Got a ECHILD error. I don't think this situation is possible, but it
- // doesn't hurt to report it.
- VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
- thread_id, wperrno);
- internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
- return false;
+ // wait on it. Note: if the thread receives a signal concurrently,
+ // we can get notification about the signal before notification about stop.
+ // In such case we need to forward the signal to the thread, otherwise
+ // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
+ // any logic relying on signals will break. After forwarding we need to
+ // continue to wait for stopping, because the thread is not stopped yet.
+ // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
+ // as invisible as possible.
+ for (;;) {
+ int status;
+ uptr waitpid_status;
+ HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
+ int wperrno;
+ if (internal_iserror(waitpid_status, &wperrno)) {
+ // Got a ECHILD error. I don't think this situation is possible, but it
+ // doesn't hurt to report it.
+ VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
+ tid, wperrno);
+ internal_ptrace(PTRACE_DETACH, tid, NULL, NULL);
+ return false;
+ }
+ if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
+ internal_ptrace(PTRACE_CONT, tid, 0, (void*)(uptr)WSTOPSIG(status));
+ continue;
+ }
+ break;
}
- suspended_threads_list_.Append(thread_id);
+ suspended_threads_list_.Append(tid);
return true;
}
}
@@ -170,10 +184,9 @@
// Pointer to the ThreadSuspender instance for use in signal handler.
static ThreadSuspender *thread_suspender_instance = NULL;
-// Signals that should not be blocked (this is used in the parent thread as well
-// as the tracer thread).
-static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV,
- SIGBUS, SIGXCPU, SIGXFSZ };
+// Synchronous signals that should not be blocked.
+static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
+ SIGXCPU, SIGXFSZ };
// Structure for passing arguments into the tracer thread.
struct TracerThreadArgument {
@@ -188,7 +201,7 @@
static DieCallbackType old_die_callback;
// Signal handler to wake up suspended threads when the tracer thread dies.
-void TracerThreadSignalHandler(int signum, void *siginfo, void *) {
+static void TracerThreadSignalHandler(int signum, void *siginfo, void *) {
if (thread_suspender_instance != NULL) {
if (signum == SIGABRT)
thread_suspender_instance->KillAllThreads();
@@ -228,6 +241,7 @@
tracer_thread_argument->mutex.Lock();
tracer_thread_argument->mutex.Unlock();
+ old_die_callback = GetDieCallback();
SetDieCallback(TracerThreadDieCallback);
ThreadSuspender thread_suspender(internal_getppid());
@@ -242,17 +256,14 @@
handler_stack.ss_size = kHandlerStackSize;
internal_sigaltstack(&handler_stack, NULL);
- // Install our handler for fatal signals. Other signals should be blocked by
- // the mask we inherited from the caller thread.
- for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
- signal_index++) {
- __sanitizer_sigaction new_sigaction;
- internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
- new_sigaction.sigaction = TracerThreadSignalHandler;
- new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
- internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction_norestorer(kUnblockedSignals[signal_index],
- &new_sigaction, NULL);
+ // Install our handler for synchronous signals. Other signals should be
+ // blocked by the mask we inherited from the parent thread.
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
+ __sanitizer_sigaction act;
+ internal_memset(&act, 0, sizeof(act));
+ act.sigaction = TracerThreadSignalHandler;
+ act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
}
int exit_code = 0;
@@ -265,9 +276,11 @@
thread_suspender.ResumeAllThreads();
exit_code = 0;
}
+ // Note, this is a bad race. If TracerThreadDieCallback is already started
+ // in another thread and observed that thread_suspender_instance != 0,
+ // it can call KillAllThreads on the destroyed variable.
+ SetDieCallback(old_die_callback);
thread_suspender_instance = NULL;
- handler_stack.ss_flags = SS_DISABLE;
- internal_sigaltstack(&handler_stack, NULL);
return exit_code;
}
@@ -299,53 +312,21 @@
// into globals.
static __sanitizer_sigset_t blocked_sigset;
static __sanitizer_sigset_t old_sigset;
-static __sanitizer_sigaction old_sigactions
- [ARRAY_SIZE(kUnblockedSignals)];
class StopTheWorldScope {
public:
StopTheWorldScope() {
- // Block all signals that can be blocked safely, and install
- // default handlers for the remaining signals.
- // We cannot allow user-defined handlers to run while the ThreadSuspender
- // thread is active, because they could conceivably call some libc functions
- // which modify errno (which is shared between the two threads).
- internal_sigfillset(&blocked_sigset);
- for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
- signal_index++) {
- // Remove the signal from the set of blocked signals.
- internal_sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
- // Install the default handler.
- __sanitizer_sigaction new_sigaction;
- internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
- new_sigaction.handler = SIG_DFL;
- internal_sigfillset(&new_sigaction.sa_mask);
- internal_sigaction_norestorer(kUnblockedSignals[signal_index],
- &new_sigaction, &old_sigactions[signal_index]);
- }
- int sigprocmask_status =
- internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
- CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail
// Make this process dumpable. Processes that are not dumpable cannot be
// attached to.
process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
if (!process_was_dumpable_)
internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
- old_die_callback = GetDieCallback();
}
~StopTheWorldScope() {
- SetDieCallback(old_die_callback);
// Restore the dumpable flag.
if (!process_was_dumpable_)
internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
- // Restore the signal handlers.
- for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
- signal_index++) {
- internal_sigaction_norestorer(kUnblockedSignals[signal_index],
- &old_sigactions[signal_index], NULL);
- }
- internal_sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
}
private:
@@ -378,11 +359,36 @@
// Block the execution of TracerThread until after we have set ptrace
// permissions.
tracer_thread_argument.mutex.Lock();
+ // Signal handling story.
+ // We don't want async signals to be delivered to the tracer thread,
+ // so we block all async signals before creating the thread. An async signal
+ // handler can temporary modify errno, which is shared with this thread.
+ // We ought to use pthread_sigmask here, because sigprocmask has undefined
+ // behavior in multithreaded programs. However, on linux sigprocmask is
+ // equivalent to pthread_sigmask with the exception that pthread_sigmask
+ // does not allow to block some signals used internally in pthread
+ // implementation. We are fine with blocking them here, we are really not
+ // going to pthread_cancel the thread.
+ // The tracer thread should not raise any synchronous signals. But in case it
+ // does, we setup a special handler for sync signals that properly kills the
+ // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
+ // in the tracer thread won't interfere with user program. Double note: if a
+ // user does something along the lines of 'kill -11 pid', that can kill the
+ // process even if user setup own handler for SEGV.
+ // Thing to watch out for: this code should not change behavior of user code
+ // in any observable way. In particular it should not override user signal
+ // handlers.
+ internal_sigfillset(&blocked_sigset);
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
+ internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
+ int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
+ CHECK_EQ(rv, 0);
uptr tracer_pid = internal_clone(
TracerThread, tracer_stack.Bottom(),
CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
&tracer_thread_argument, 0 /* parent_tidptr */, 0 /* newtls */, 0
/* child_tidptr */);
+ internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
@@ -459,4 +465,4 @@
}
} // namespace __sanitizer
-#endif // SANITIZER_LINUX && defined(__x86_64__)
+#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__))
diff --git a/lib/sanitizer_common/sanitizer_suppressions.cc b/lib/sanitizer_common/sanitizer_suppressions.cc
index 7f76693..2b697e9 100644
--- a/lib/sanitizer_common/sanitizer_suppressions.cc
+++ b/lib/sanitizer_common/sanitizer_suppressions.cc
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Suppression parsing/matching code shared between TSan and LSan.
+// Suppression parsing/matching code.
//
//===----------------------------------------------------------------------===//
@@ -21,90 +21,43 @@
namespace __sanitizer {
-static const char *const kTypeStrings[SuppressionTypeCount] = {
- "none", "race", "mutex", "thread", "signal",
- "leak", "called_from_lib", "deadlock", "vptr_check"};
-
-bool TemplateMatch(char *templ, const char *str) {
- if (str == 0 || str[0] == 0)
- return false;
- bool start = false;
- if (templ && templ[0] == '^') {
- start = true;
- templ++;
- }
- bool asterisk = false;
- while (templ && templ[0]) {
- if (templ[0] == '*') {
- templ++;
- start = false;
- asterisk = true;
- continue;
- }
- if (templ[0] == '$')
- return str[0] == 0 || asterisk;
- if (str[0] == 0)
- return false;
- char *tpos = (char*)internal_strchr(templ, '*');
- char *tpos1 = (char*)internal_strchr(templ, '$');
- if (tpos == 0 || (tpos1 && tpos1 < tpos))
- tpos = tpos1;
- if (tpos != 0)
- tpos[0] = 0;
- const char *str0 = str;
- const char *spos = internal_strstr(str, templ);
- str = spos + internal_strlen(templ);
- templ = tpos;
- if (tpos)
- tpos[0] = tpos == tpos1 ? '$' : '*';
- if (spos == 0)
- return false;
- if (start && spos != str0)
- return false;
- start = false;
- asterisk = false;
- }
- return true;
+SuppressionContext::SuppressionContext(const char *suppression_types[],
+ int suppression_types_num)
+ : suppression_types_(suppression_types),
+ suppression_types_num_(suppression_types_num), suppressions_(1),
+ can_parse_(true) {
+ CHECK_LE(suppression_types_num_, kMaxSuppressionTypes);
+ internal_memset(has_suppression_type_, 0, suppression_types_num_);
}
-ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
-static SuppressionContext *suppression_ctx = 0;
-
-SuppressionContext *SuppressionContext::Get() {
- CHECK(suppression_ctx);
- return suppression_ctx;
-}
-
-void SuppressionContext::InitIfNecessary() {
- if (suppression_ctx)
+void SuppressionContext::ParseFromFile(const char *filename) {
+ if (filename[0] == '\0')
return;
- suppression_ctx = new(placeholder) SuppressionContext;
- if (common_flags()->suppressions[0] == '\0')
- return;
- char *suppressions_from_file;
+ char *file_contents;
uptr buffer_size;
- uptr contents_size =
- ReadFileToBuffer(common_flags()->suppressions, &suppressions_from_file,
- &buffer_size, 1 << 26 /* max_len */);
+ uptr contents_size = ReadFileToBuffer(filename, &file_contents, &buffer_size,
+ 1 << 26 /* max_len */);
if (contents_size == 0) {
Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName,
- common_flags()->suppressions);
+ filename);
Die();
}
- suppression_ctx->Parse(suppressions_from_file);
+ Parse(file_contents);
}
-bool SuppressionContext::Match(const char *str, SuppressionType type,
+bool SuppressionContext::Match(const char *str, const char *type,
Suppression **s) {
can_parse_ = false;
- uptr i;
- for (i = 0; i < suppressions_.size(); i++)
- if (type == suppressions_[i].type &&
- TemplateMatch(suppressions_[i].templ, str))
- break;
- if (i == suppressions_.size()) return false;
- *s = &suppressions_[i];
- return true;
+ if (!HasSuppressionType(type))
+ return false;
+ for (uptr i = 0; i < suppressions_.size(); i++) {
+ Suppression &cur = suppressions_[i];
+ if (0 == internal_strcmp(cur.type, type) && TemplateMatch(cur.templ, str)) {
+ *s = &cur;
+ return true;
+ }
+ }
+ return false;
}
static const char *StripPrefix(const char *str, const char *prefix) {
@@ -132,25 +85,26 @@
while (line != end2 && (end2[-1] == ' ' || end2[-1] == '\t'))
end2--;
int type;
- for (type = 0; type < SuppressionTypeCount; type++) {
- const char *next_char = StripPrefix(line, kTypeStrings[type]);
+ for (type = 0; type < suppression_types_num_; type++) {
+ const char *next_char = StripPrefix(line, suppression_types_[type]);
if (next_char && *next_char == ':') {
line = ++next_char;
break;
}
}
- if (type == SuppressionTypeCount) {
+ if (type == suppression_types_num_) {
Printf("%s: failed to parse suppressions\n", SanitizerToolName);
Die();
}
Suppression s;
- s.type = static_cast<SuppressionType>(type);
+ s.type = suppression_types_[type];
s.templ = (char*)InternalAlloc(end2 - line + 1);
internal_memcpy(s.templ, line, end2 - line);
s.templ[end2 - line] = 0;
s.hit_count = 0;
s.weight = 0;
suppressions_.push_back(s);
+ has_suppression_type_[type] = true;
}
if (end[0] == 0)
break;
@@ -162,6 +116,14 @@
return suppressions_.size();
}
+bool SuppressionContext::HasSuppressionType(const char *type) const {
+ for (int i = 0; i < suppression_types_num_; i++) {
+ if (0 == internal_strcmp(type, suppression_types_[i]))
+ return has_suppression_type_[i];
+ }
+ return false;
+}
+
const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
CHECK_LT(i, suppressions_.size());
return &suppressions_[i];
@@ -174,9 +136,4 @@
matched->push_back(&suppressions_[i]);
}
-const char *SuppressionTypeString(SuppressionType t) {
- CHECK(t < SuppressionTypeCount);
- return kTypeStrings[t];
-}
-
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_suppressions.h b/lib/sanitizer_common/sanitizer_suppressions.h
index 37fd3c4..02dbf6f 100644
--- a/lib/sanitizer_common/sanitizer_suppressions.h
+++ b/lib/sanitizer_common/sanitizer_suppressions.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Suppression parsing/matching code shared between TSan and LSan.
+// Suppression parsing/matching code.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SUPPRESSIONS_H
@@ -18,21 +18,8 @@
namespace __sanitizer {
-enum SuppressionType {
- SuppressionNone,
- SuppressionRace,
- SuppressionMutex,
- SuppressionThread,
- SuppressionSignal,
- SuppressionLeak,
- SuppressionLib,
- SuppressionDeadlock,
- SuppressionVptrCheck,
- SuppressionTypeCount
-};
-
struct Suppression {
- SuppressionType type;
+ const char *type;
char *templ;
unsigned hit_count;
uptr weight;
@@ -40,31 +27,29 @@
class SuppressionContext {
public:
+ // Create new SuppressionContext capable of parsing given suppression types.
+ SuppressionContext(const char *supprression_types[],
+ int suppression_types_num);
+
+ void ParseFromFile(const char *filename);
void Parse(const char *str);
- bool Match(const char* str, SuppressionType type, Suppression **s);
+
+ bool Match(const char *str, const char *type, Suppression **s);
uptr SuppressionCount() const;
+ bool HasSuppressionType(const char *type) const;
const Suppression *SuppressionAt(uptr i) const;
void GetMatched(InternalMmapVector<Suppression *> *matched);
- // Create a SuppressionContext singleton if it hasn't been created earlier.
- // Not thread safe. Must be called early during initialization (but after
- // runtime flags are parsed).
- static void InitIfNecessary();
- // Returns a SuppressionContext singleton.
- static SuppressionContext *Get();
-
private:
- SuppressionContext() : suppressions_(1), can_parse_(true) {}
+ static const int kMaxSuppressionTypes = 16;
+ const char **const suppression_types_;
+ const int suppression_types_num_;
+
InternalMmapVector<Suppression> suppressions_;
+ bool has_suppression_type_[kMaxSuppressionTypes];
bool can_parse_;
-
- friend class SuppressionContextTest;
};
-const char *SuppressionTypeString(SuppressionType t);
-
-bool TemplateMatch(char *templ, const char *str);
-
} // namespace __sanitizer
#endif // SANITIZER_SUPPRESSIONS_H
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.cc b/lib/sanitizer_common/sanitizer_symbolizer.cc
index 8aa9de0..135720e 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer.cc
@@ -11,13 +11,61 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator_internal.h"
#include "sanitizer_platform.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
+AddressInfo::AddressInfo() {
+ internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
+}
+
+void AddressInfo::Clear() {
+ InternalFree(module);
+ InternalFree(function);
+ InternalFree(file);
+ internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
+}
+
+void AddressInfo::FillAddressAndModuleInfo(uptr addr, const char *mod_name,
+ uptr mod_offset) {
+ address = addr;
+ module = internal_strdup(mod_name);
+ module_offset = mod_offset;
+}
+
+SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
+
+SymbolizedStack *SymbolizedStack::New(uptr addr) {
+ void *mem = InternalAlloc(sizeof(SymbolizedStack));
+ SymbolizedStack *res = new(mem) SymbolizedStack();
+ res->info.address = addr;
+ return res;
+}
+
+void SymbolizedStack::ClearAll() {
+ info.Clear();
+ if (next)
+ next->ClearAll();
+ InternalFree(this);
+}
+
+DataInfo::DataInfo() {
+ internal_memset(this, 0, sizeof(DataInfo));
+}
+
+void DataInfo::Clear() {
+ InternalFree(module);
+ InternalFree(name);
+ internal_memset(this, 0, sizeof(DataInfo));
+}
+
Symbolizer *Symbolizer::symbolizer_;
StaticSpinMutex Symbolizer::init_mu_;
LowLevelAllocator Symbolizer::symbolizer_allocator_;
diff --git a/lib/sanitizer_common/sanitizer_symbolizer.h b/lib/sanitizer_common/sanitizer_symbolizer.h
index 82093e4..3a80774 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer.h
+++ b/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -19,13 +19,14 @@
#ifndef SANITIZER_SYMBOLIZER_H
#define SANITIZER_SYMBOLIZER_H
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
struct AddressInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
uptr address;
char *module;
@@ -39,45 +40,38 @@
int line;
int column;
- AddressInfo() {
- internal_memset(this, 0, sizeof(AddressInfo));
- function_offset = kUnknown;
- }
-
+ AddressInfo();
// Deletes all strings and resets all fields.
- void Clear() {
- InternalFree(module);
- InternalFree(function);
- InternalFree(file);
- internal_memset(this, 0, sizeof(AddressInfo));
- function_offset = kUnknown;
- }
-
+ void Clear();
void FillAddressAndModuleInfo(uptr addr, const char *mod_name,
- uptr mod_offset) {
- address = addr;
- module = internal_strdup(mod_name);
- module_offset = mod_offset;
- }
+ uptr mod_offset);
+};
+
+// Linked list of symbolized frames (each frame is described by AddressInfo).
+struct SymbolizedStack {
+ SymbolizedStack *next;
+ AddressInfo info;
+ static SymbolizedStack *New(uptr addr);
+ // Deletes current, and all subsequent frames in the linked list.
+ // The object cannot be accessed after the call to this function.
+ void ClearAll();
+
+ private:
+ SymbolizedStack();
};
// For now, DataInfo is used to describe global variable.
struct DataInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
char *module;
uptr module_offset;
char *name;
uptr start;
uptr size;
- DataInfo() {
- internal_memset(this, 0, sizeof(DataInfo));
- }
-
- void Clear() {
- InternalFree(module);
- InternalFree(name);
- internal_memset(this, 0, sizeof(DataInfo));
- }
+ DataInfo();
+ void Clear();
};
class Symbolizer {
@@ -85,11 +79,10 @@
/// Initialize and return platform-specific implementation of symbolizer
/// (if it wasn't already initialized).
static Symbolizer *GetOrInit();
- // Fills at most "max_frames" elements of "frames" with descriptions
- // for a given address (in all inlined functions). Returns the number
- // of descriptions actually filled.
- virtual uptr SymbolizePC(uptr address, AddressInfo *frames, uptr max_frames) {
- return 0;
+ // Returns a list of symbolized frames for a given address (containing
+ // all inlined functions, if necessary).
+ virtual SymbolizedStack *SymbolizePC(uptr address) {
+ return SymbolizedStack::New(address);
}
virtual bool SymbolizeData(uptr address, DataInfo *info) {
return false;
@@ -144,6 +137,61 @@
};
};
+class ExternalSymbolizerInterface {
+ public:
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable.
+ virtual char *SendCommand(bool is_data, const char *module_name,
+ uptr module_offset) {
+ UNIMPLEMENTED();
+ }
+};
+
+// SymbolizerProcess encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess.
+// SymbolizerProcess may not be used from two threads simultaneously.
+class SymbolizerProcess : public ExternalSymbolizerInterface {
+ public:
+ explicit SymbolizerProcess(const char *path);
+ char *SendCommand(bool is_data, const char *module_name,
+ uptr module_offset) override;
+
+ private:
+ bool Restart();
+ char *SendCommandImpl(bool is_data, const char *module_name,
+ uptr module_offset);
+ bool ReadFromSymbolizer(char *buffer, uptr max_length);
+ bool WriteToSymbolizer(const char *buffer, uptr length);
+ bool StartSymbolizerSubprocess();
+
+ virtual bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
+ const char *module_name,
+ uptr module_offset) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual void ExecuteWithDefaultArgs(const char *path_to_binary) const {
+ UNIMPLEMENTED();
+ }
+
+ const char *path_;
+ int input_fd_;
+ int output_fd_;
+
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
+ static const uptr kMaxTimesRestarted = 5;
+ static const int kSymbolizerStartupTimeMillis = 10;
+ uptr times_restarted_;
+ bool failed_to_start_;
+ bool reported_invalid_path_;
+};
+
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_H
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc b/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
index 4ec6f0a..9317a78 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
@@ -83,44 +83,52 @@
}
# endif // SANITIZER_CP_DEMANGLE
-struct SymbolizeCodeData {
- AddressInfo *frames;
- uptr n_frames;
- uptr max_frames;
+struct SymbolizeCodeCallbackArg {
+ SymbolizedStack *first;
+ SymbolizedStack *last;
const char *module_name;
uptr module_offset;
+
+ void append(SymbolizedStack *f) {
+ if (last != nullptr) {
+ last->next = f;
+ last = f;
+ } else {
+ first = f;
+ last = f;
+ }
+ }
};
extern "C" {
static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
const char *filename, int lineno,
const char *function) {
- SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata;
+ SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
if (function) {
- AddressInfo *info = &cdata->frames[cdata->n_frames++];
- info->Clear();
+ SymbolizedStack *cur = SymbolizedStack::New(addr);
+ cdata->append(cur);
+ AddressInfo *info = &cur->info;
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
info->function = LibbacktraceSymbolizer::Demangle(function, true);
if (filename)
info->file = internal_strdup(filename);
info->line = lineno;
- if (cdata->n_frames == cdata->max_frames)
- return 1;
}
return 0;
}
static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
const char *symname, uintptr_t, uintptr_t) {
- SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata;
+ SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
if (symname) {
- AddressInfo *info = &cdata->frames[0];
- info->Clear();
+ SymbolizedStack *cur = SymbolizedStack::New(addr);
+ cdata->append(cur);
+ AddressInfo *info = &cur->info;
info->FillAddressAndModuleInfo(addr, cdata->module_name,
cdata->module_offset);
info->function = LibbacktraceSymbolizer::Demangle(symname, true);
- cdata->n_frames = 1;
}
}
@@ -148,28 +156,26 @@
return new(*alloc) LibbacktraceSymbolizer(state);
}
-uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
- uptr max_frames,
- const char *module_name,
- uptr module_offset) {
- SymbolizeCodeData data;
- data.frames = frames;
- data.n_frames = 0;
- data.max_frames = max_frames;
+SymbolizedStack *LibbacktraceSymbolizer::SymbolizeCode(uptr addr,
+ const char *module_name,
+ uptr module_offset) {
+ SymbolizeCodeCallbackArg data;
+ data.first = nullptr;
+ data.last = nullptr;
data.module_name = module_name;
data.module_offset = module_offset;
backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback,
ErrorCallback, &data);
- if (data.n_frames)
- return data.n_frames;
+ if (data.first)
+ return data.first;
backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback,
ErrorCallback, &data);
- return data.n_frames;
+ return data.first;
}
-bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
- backtrace_syminfo((backtrace_state *)state_, info->address,
- SymbolizeDataCallback, ErrorCallback, info);
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeDataCallback,
+ ErrorCallback, info);
return true;
}
@@ -179,15 +185,14 @@
return 0;
}
-uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames,
- uptr max_frames,
- const char *module_name,
- uptr module_offset) {
+SymbolizedStack *LibbacktraceSymbolizer::SymbolizeCode(uptr addr,
+ const char *module_name,
+ uptr module_offset) {
(void)state_;
- return 0;
+ return nullptr;
}
-bool LibbacktraceSymbolizer::SymbolizeData(DataInfo *info) {
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return false;
}
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
index 6c536cc..1ff0050 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
+++ b/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
@@ -32,10 +32,10 @@
public:
static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);
- uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames,
- const char *module_name, uptr module_offset);
+ SymbolizedStack *SymbolizeCode(uptr addr, const char *module_name,
+ uptr module_offset);
- bool SymbolizeData(DataInfo *info);
+ bool SymbolizeData(uptr addr, DataInfo *info);
// May return NULL if demangling failed.
static char *Demangle(const char *name, bool always_alloc = false);
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index eb2b707..deb3429 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -94,219 +94,239 @@
return ret;
}
-class ExternalSymbolizerInterface {
- public:
- // Can't declare pure virtual functions in sanitizer runtimes:
- // __cxa_pure_virtual might be unavailable.
- virtual char *SendCommand(bool is_data, const char *module_name,
- uptr module_offset) {
- UNIMPLEMENTED();
- }
-};
+SymbolizerProcess::SymbolizerProcess(const char *path)
+ : path_(path),
+ input_fd_(kInvalidFd),
+ output_fd_(kInvalidFd),
+ times_restarted_(0),
+ failed_to_start_(false),
+ reported_invalid_path_(false) {
+ CHECK(path_);
+ CHECK_NE(path_[0], '\0');
+}
-// SymbolizerProcess encapsulates communication between the tool and
-// external symbolizer program, running in a different subprocess.
-// SymbolizerProcess may not be used from two threads simultaneously.
-class SymbolizerProcess : public ExternalSymbolizerInterface {
- public:
- explicit SymbolizerProcess(const char *path)
- : path_(path),
- input_fd_(kInvalidFd),
- output_fd_(kInvalidFd),
- times_restarted_(0),
- failed_to_start_(false),
- reported_invalid_path_(false) {
- CHECK(path_);
- CHECK_NE(path_[0], '\0');
+char *SymbolizerProcess::SendCommand(bool is_data, const char *module_name,
+ uptr module_offset) {
+ for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
+ // Start or restart symbolizer if we failed to send command to it.
+ if (char *res = SendCommandImpl(is_data, module_name, module_offset))
+ return res;
+ Restart();
}
-
- char *SendCommand(bool is_data, const char *module_name, uptr module_offset) {
- for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
- // Start or restart symbolizer if we failed to send command to it.
- if (char *res = SendCommandImpl(is_data, module_name, module_offset))
- return res;
- Restart();
- }
- if (!failed_to_start_) {
- Report("WARNING: Failed to use and restart external symbolizer!\n");
- failed_to_start_ = true;
- }
- return 0;
+ if (!failed_to_start_) {
+ Report("WARNING: Failed to use and restart external symbolizer!\n");
+ failed_to_start_ = true;
}
+ return 0;
+}
- private:
- bool Restart() {
- if (input_fd_ != kInvalidFd)
- internal_close(input_fd_);
- if (output_fd_ != kInvalidFd)
- internal_close(output_fd_);
- return StartSymbolizerSubprocess();
- }
+bool SymbolizerProcess::Restart() {
+ if (input_fd_ != kInvalidFd)
+ internal_close(input_fd_);
+ if (output_fd_ != kInvalidFd)
+ internal_close(output_fd_);
+ return StartSymbolizerSubprocess();
+}
- char *SendCommandImpl(bool is_data, const char *module_name,
- uptr module_offset) {
- if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
+char *SymbolizerProcess::SendCommandImpl(bool is_data, const char *module_name,
+ uptr module_offset) {
+ if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
return 0;
- CHECK(module_name);
- if (!RenderInputCommand(buffer_, kBufferSize, is_data, module_name,
- module_offset))
+ CHECK(module_name);
+ if (!RenderInputCommand(buffer_, kBufferSize, is_data, module_name,
+ module_offset))
return 0;
- if (!writeToSymbolizer(buffer_, internal_strlen(buffer_)))
+ if (!WriteToSymbolizer(buffer_, internal_strlen(buffer_)))
return 0;
- if (!readFromSymbolizer(buffer_, kBufferSize))
+ if (!ReadFromSymbolizer(buffer_, kBufferSize))
return 0;
- return buffer_;
- }
+ return buffer_;
+}
- bool readFromSymbolizer(char *buffer, uptr max_length) {
- if (max_length == 0)
- return true;
- uptr read_len = 0;
- while (true) {
- uptr just_read = internal_read(input_fd_, buffer + read_len,
- max_length - read_len - 1);
- // We can't read 0 bytes, as we don't expect external symbolizer to close
- // its stdout.
- if (just_read == 0 || just_read == (uptr)-1) {
- Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
- return false;
- }
- read_len += just_read;
- if (ReachedEndOfOutput(buffer, read_len))
- break;
- }
- buffer[read_len] = '\0';
+bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
+ if (max_length == 0)
return true;
- }
-
- bool writeToSymbolizer(const char *buffer, uptr length) {
- if (length == 0)
- return true;
- uptr write_len = internal_write(output_fd_, buffer, length);
- if (write_len == 0 || write_len == (uptr)-1) {
- Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
+ uptr read_len = 0;
+ while (true) {
+ uptr just_read = internal_read(input_fd_, buffer + read_len,
+ max_length - read_len - 1);
+ // We can't read 0 bytes, as we don't expect external symbolizer to close
+ // its stdout.
+ if (just_read == 0 || just_read == (uptr)-1) {
+ Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
return false;
}
+ read_len += just_read;
+ if (ReachedEndOfOutput(buffer, read_len))
+ break;
+ }
+ buffer[read_len] = '\0';
+ return true;
+}
+
+bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {
+ if (length == 0)
return true;
+ uptr write_len = internal_write(output_fd_, buffer, length);
+ if (write_len == 0 || write_len == (uptr)-1) {
+ Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
+ return false;
+ }
+ return true;
+}
+
+bool SymbolizerProcess::StartSymbolizerSubprocess() {
+ if (!FileExists(path_)) {
+ if (!reported_invalid_path_) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ reported_invalid_path_ = true;
+ }
+ return false;
}
- bool StartSymbolizerSubprocess() {
- if (!FileExists(path_)) {
- if (!reported_invalid_path_) {
- Report("WARNING: invalid path to external symbolizer!\n");
- reported_invalid_path_ = true;
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
}
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
return false;
- }
-
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
internal_close(sock_pair[j][0]);
internal_close(sock_pair[j][1]);
}
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
- return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
- for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- break;
- }
+ break;
}
}
- CHECK(infd);
- CHECK(outfd);
+ }
+ CHECK(infd);
+ CHECK(outfd);
- // Real fork() may call user callbacks registered with pthread_atfork().
- int pid = internal_fork();
- if (pid == -1) {
- // Fork() failed.
- internal_close(infd[0]);
- internal_close(infd[1]);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- Report("WARNING: failed to fork external symbolizer "
- " (errno: %d)\n", errno);
- return false;
- } else if (pid == 0) {
- // Child subprocess.
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--)
- internal_close(fd);
- ExecuteWithDefaultArgs(path_);
- internal__exit(1);
- }
-
- // Continue execution in parent process.
- internal_close(outfd[0]);
+ // Real fork() may call user callbacks registered with pthread_atfork().
+ int pid = internal_fork();
+ if (pid == -1) {
+ // Fork() failed.
+ internal_close(infd[0]);
internal_close(infd[1]);
- input_fd_ = infd[0];
- output_fd_ = outfd[1];
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ Report("WARNING: failed to fork external symbolizer "
+ " (errno: %d)\n", errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+ internal_close(STDOUT_FILENO);
+ internal_close(STDIN_FILENO);
+ internal_dup2(outfd[0], STDIN_FILENO);
+ internal_dup2(infd[1], STDOUT_FILENO);
+ internal_close(outfd[0]);
+ internal_close(outfd[1]);
+ internal_close(infd[0]);
+ internal_close(infd[1]);
+ for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--)
+ internal_close(fd);
+ ExecuteWithDefaultArgs(path_);
+ internal__exit(1);
+ }
- // Check that symbolizer subprocess started successfully.
- int pid_status;
- SleepForMillis(kSymbolizerStartupTimeMillis);
- int exited_pid = waitpid(pid, &pid_status, WNOHANG);
- if (exited_pid != 0) {
- // Either waitpid failed, or child has already exited.
- Report("WARNING: external symbolizer didn't start up correctly!\n");
- return false;
+ // Continue execution in parent process.
+ internal_close(outfd[0]);
+ internal_close(infd[1]);
+ input_fd_ = infd[0];
+ output_fd_ = outfd[1];
+
+ // Check that symbolizer subprocess started successfully.
+ int pid_status;
+ SleepForMillis(kSymbolizerStartupTimeMillis);
+ int exited_pid = waitpid(pid, &pid_status, WNOHANG);
+ if (exited_pid != 0) {
+ // Either waitpid failed, or child has already exited.
+ Report("WARNING: external symbolizer didn't start up correctly!\n");
+ return false;
+ }
+
+ return true;
+}
+
+
+// Parses one or more two-line strings in the following format:
+// <function_name>
+// <file_name>:<line_number>[:<column_number>]
+// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
+// them use the same output format.
+static void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
+ bool top_frame = true;
+ SymbolizedStack *last = res;
+ while (true) {
+ char *function_name = 0;
+ str = ExtractToken(str, "\n", &function_name);
+ CHECK(function_name);
+ if (function_name[0] == '\0') {
+ // There are no more frames.
+ break;
+ }
+ SymbolizedStack *cur;
+ if (top_frame) {
+ cur = res;
+ top_frame = false;
+ } else {
+ cur = SymbolizedStack::New(res->info.address);
+ cur->info.FillAddressAndModuleInfo(res->info.address, res->info.module,
+ res->info.module_offset);
+ last->next = cur;
+ last = cur;
}
- return true;
+ AddressInfo *info = &cur->info;
+ info->function = function_name;
+ // Parse <file>:<line>:<column> buffer.
+ char *file_line_info = 0;
+ str = ExtractToken(str, "\n", &file_line_info);
+ CHECK(file_line_info);
+ const char *line_info = ExtractToken(file_line_info, ":", &info->file);
+ line_info = ExtractInt(line_info, ":", &info->line);
+ line_info = ExtractInt(line_info, "", &info->column);
+ InternalFree(file_line_info);
+
+ // Functions and filenames can be "??", in which case we write 0
+ // to address info to mark that names are unknown.
+ if (0 == internal_strcmp(info->function, "??")) {
+ InternalFree(info->function);
+ info->function = 0;
+ }
+ if (0 == internal_strcmp(info->file, "??")) {
+ InternalFree(info->file);
+ info->file = 0;
+ }
}
+}
- virtual bool RenderInputCommand(char *buffer, uptr max_length, bool is_data,
- const char *module_name,
- uptr module_offset) const {
- UNIMPLEMENTED();
- }
-
- virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
- UNIMPLEMENTED();
- }
-
- virtual void ExecuteWithDefaultArgs(const char *path_to_binary) const {
- UNIMPLEMENTED();
- }
-
- const char *path_;
- int input_fd_;
- int output_fd_;
-
- static const uptr kBufferSize = 16 * 1024;
- char buffer_[kBufferSize];
-
- static const uptr kMaxTimesRestarted = 5;
- static const int kSymbolizerStartupTimeMillis = 10;
- uptr times_restarted_;
- bool failed_to_start_;
- bool reported_invalid_path_;
-};
+// Parses a two-line string in the following format:
+// <symbol_name>
+// <start_address> <size>
+// Used by LLVMSymbolizer and InternalSymbolizer.
+static void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
+ str = ExtractToken(str, "\n", &info->name);
+ str = ExtractUptr(str, " ", &info->start);
+ str = ExtractUptr(str, "\n", &info->size);
+}
// For now we assume the following protocol:
// For each request of the form
@@ -514,75 +534,34 @@
internal_symbolizer_(internal_symbolizer),
libbacktrace_symbolizer_(libbacktrace_symbolizer) {}
- uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) {
+ SymbolizedStack *SymbolizePC(uptr addr) override {
BlockingMutexLock l(&mu_);
- if (max_frames == 0)
- return 0;
const char *module_name;
uptr module_offset;
if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
- return 0;
+ return SymbolizedStack::New(addr);
// First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
- uptr res = libbacktrace_symbolizer_->SymbolizeCode(
- addr, frames, max_frames, module_name, module_offset);
- if (res > 0)
+ if (SymbolizedStack *res = libbacktrace_symbolizer_->SymbolizeCode(
+ addr, module_name, module_offset))
return res;
}
+ // Always fill data about module name and offset.
+ SymbolizedStack *res = SymbolizedStack::New(addr);
+ res->info.FillAddressAndModuleInfo(addr, module_name, module_offset);
+
const char *str = SendCommand(false, module_name, module_offset);
if (str == 0) {
- // Symbolizer was not initialized or failed. Fill only data
- // about module name and offset.
- AddressInfo *info = &frames[0];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- return 1;
+ // Symbolizer was not initialized or failed.
+ return res;
}
- uptr frame_id = 0;
- for (frame_id = 0; frame_id < max_frames; frame_id++) {
- AddressInfo *info = &frames[frame_id];
- char *function_name = 0;
- str = ExtractToken(str, "\n", &function_name);
- CHECK(function_name);
- if (function_name[0] == '\0') {
- // There are no more frames.
- break;
- }
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- info->function = function_name;
- // Parse <file>:<line>:<column> buffer.
- char *file_line_info = 0;
- str = ExtractToken(str, "\n", &file_line_info);
- CHECK(file_line_info);
- const char *line_info = ExtractToken(file_line_info, ":", &info->file);
- line_info = ExtractInt(line_info, ":", &info->line);
- line_info = ExtractInt(line_info, "", &info->column);
- InternalFree(file_line_info);
- // Functions and filenames can be "??", in which case we write 0
- // to address info to mark that names are unknown.
- if (0 == internal_strcmp(info->function, "??")) {
- InternalFree(info->function);
- info->function = 0;
- }
- if (0 == internal_strcmp(info->file, "??")) {
- InternalFree(info->file);
- info->file = 0;
- }
- }
- if (frame_id == 0) {
- // Make sure we return at least one frame.
- AddressInfo *info = &frames[0];
- info->Clear();
- info->FillAddressAndModuleInfo(addr, module_name, module_offset);
- frame_id = 1;
- }
- return frame_id;
+ ParseSymbolizePCOutput(str, res);
+ return res;
}
- bool SymbolizeData(uptr addr, DataInfo *info) {
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
BlockingMutexLock l(&mu_);
LoadedModule *module = FindModuleForAddress(addr);
if (module == 0)
@@ -595,31 +574,29 @@
// First, try to use libbacktrace symbolizer (if it's available).
if (libbacktrace_symbolizer_ != 0) {
mu_.CheckLocked();
- if (libbacktrace_symbolizer_->SymbolizeData(info))
+ if (libbacktrace_symbolizer_->SymbolizeData(addr, info))
return true;
}
const char *str = SendCommand(true, module_name, module_offset);
if (str == 0)
return true;
- str = ExtractToken(str, "\n", &info->name);
- str = ExtractUptr(str, " ", &info->start);
- str = ExtractUptr(str, "\n", &info->size);
+ ParseSymbolizeDataOutput(str, info);
info->start += module->base_address();
return true;
}
bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
- uptr *module_address) {
+ uptr *module_address) override {
BlockingMutexLock l(&mu_);
return FindModuleNameAndOffsetForAddress(pc, module_name, module_address);
}
- bool CanReturnFileLineInfo() {
+ bool CanReturnFileLineInfo() override {
return internal_symbolizer_ != 0 || external_symbolizer_ != 0 ||
libbacktrace_symbolizer_ != 0;
}
- void Flush() {
+ void Flush() override {
BlockingMutexLock l(&mu_);
if (internal_symbolizer_ != 0) {
SymbolizerScope sym_scope(this);
@@ -627,7 +604,7 @@
}
}
- const char *Demangle(const char *name) {
+ const char *Demangle(const char *name) override {
BlockingMutexLock l(&mu_);
// Run hooks even if we don't use internal symbolizer, as cxxabi
// demangle may call system functions.
@@ -642,7 +619,7 @@
return DemangleCXXABI(name);
}
- void PrepareForSandboxing() {
+ void PrepareForSandboxing() override {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
BlockingMutexLock l(&mu_);
// Cache /proc/self/exe on Linux.
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_win.cc b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
index e8cf0a8..ed96a3a 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -26,30 +26,11 @@
public:
WinSymbolizer() : initialized_(false) {}
- uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) {
- if (max_frames == 0)
- return 0;
+ SymbolizedStack *SymbolizePC(uptr addr) override {
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
BlockingMutexLock l(&dbghelp_mu_);
- if (!initialized_) {
- if (!TrySymInitialize()) {
- // OK, maybe the client app has called SymInitialize already.
- // That's a bit unfortunate for us as all the DbgHelp functions are
- // single-threaded and we can't coordinate with the app.
- // FIXME: Can we stop the other threads at this point?
- // Anyways, we have to reconfigure stuff to make sure that SymInitialize
- // has all the appropriate options set.
- // Cross our fingers and reinitialize DbgHelp.
- Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
- Report("*** Most likely this means that the app is already ***\n");
- Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
- Report("*** Due to technical reasons, symbolization might crash ***\n");
- Report("*** or produce wrong results. ***\n");
- SymCleanup(GetCurrentProcess());
- TrySymInitialize();
- }
- initialized_ = true;
- }
+ InitializeIfNeeded();
// See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
@@ -60,36 +41,34 @@
BOOL got_objname = SymFromAddr(GetCurrentProcess(),
(DWORD64)addr, &offset, symbol);
if (!got_objname)
- return 0;
+ return frame;
DWORD unused;
IMAGEHLP_LINE64 line_info;
line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,
&unused, &line_info);
- AddressInfo *info = &frames[0];
- info->Clear();
- info->function = internal_strdup(symbol->Name);
- info->function_offset = (uptr)offset;
+ frame->info.function = internal_strdup(symbol->Name);
+ frame->info.function_offset = (uptr)offset;
if (got_fileline) {
- info->file = internal_strdup(line_info.FileName);
- info->line = line_info.LineNumber;
+ frame->info.file = internal_strdup(line_info.FileName);
+ frame->info.line = line_info.LineNumber;
}
IMAGEHLP_MODULE64 mod_info;
internal_memset(&mod_info, 0, sizeof(mod_info));
mod_info.SizeOfStruct = sizeof(mod_info);
if (SymGetModuleInfo64(GetCurrentProcess(), addr, &mod_info))
- info->FillAddressAndModuleInfo(addr, mod_info.ImageName,
- addr - (uptr)mod_info.BaseOfImage);
- return 1;
+ frame->info.FillAddressAndModuleInfo(addr, mod_info.ImageName,
+ addr - (uptr)mod_info.BaseOfImage);
+ return frame;
}
- bool CanReturnFileLineInfo() {
+ bool CanReturnFileLineInfo() override {
return true;
}
- const char *Demangle(const char *name) {
+ const char *Demangle(const char *name) override {
CHECK(initialized_);
static char demangle_buffer[1000];
if (name[0] == '\01' &&
@@ -103,6 +82,58 @@
// FIXME: Implement GetModuleNameAndOffsetForPC().
private:
+ void InitializeIfNeeded() {
+ if (initialized_)
+ return;
+ if (!TrySymInitialize()) {
+ // OK, maybe the client app has called SymInitialize already.
+ // That's a bit unfortunate for us as all the DbgHelp functions are
+ // single-threaded and we can't coordinate with the app.
+ // FIXME: Can we stop the other threads at this point?
+ // Anyways, we have to reconfigure stuff to make sure that SymInitialize
+ // has all the appropriate options set.
+ // Cross our fingers and reinitialize DbgHelp.
+ Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
+ Report("*** Most likely this means that the app is already ***\n");
+ Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
+ Report("*** Due to technical reasons, symbolization might crash ***\n");
+ Report("*** or produce wrong results. ***\n");
+ SymCleanup(GetCurrentProcess());
+ TrySymInitialize();
+ }
+ initialized_ = true;
+
+ // When an executable is run from a location different from the one where it
+ // was originally built, we may not see the nearby PDB files.
+ // To work around this, let's append the directory of the main module
+ // to the symbol search path. All the failures below are not fatal.
+ const size_t kSymPathSize = 2048;
+ static wchar_t path_buffer[kSymPathSize + 1 + MAX_PATH];
+ if (!SymGetSearchPathW(GetCurrentProcess(), path_buffer, kSymPathSize)) {
+ Report("*** WARNING: Failed to SymGetSearchPathW ***\n");
+ return;
+ }
+ size_t sz = wcslen(path_buffer);
+ if (sz) {
+ CHECK_EQ(0, wcscat_s(path_buffer, L";"));
+ sz++;
+ }
+ DWORD res = GetModuleFileNameW(NULL, path_buffer + sz, MAX_PATH);
+ if (res == 0 || res == MAX_PATH) {
+ Report("*** WARNING: Failed to getting the EXE directory ***\n");
+ return;
+ }
+ // Write the zero character in place of the last backslash to get the
+ // directory of the main module at the end of path_buffer.
+ wchar_t *last_bslash = wcsrchr(path_buffer + sz, L'\\');
+ CHECK_NE(last_bslash, 0);
+ *last_bslash = L'\0';
+ if (!SymSetSearchPathW(GetCurrentProcess(), path_buffer)) {
+ Report("*** WARNING: Failed to SymSetSearchPathW\n");
+ return;
+ }
+ }
+
bool TrySymInitialize() {
SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
return SymInitialize(GetCurrentProcess(), 0, TRUE);
diff --git a/lib/sanitizer_common/sanitizer_syscall_generic.inc b/lib/sanitizer_common/sanitizer_syscall_generic.inc
index 88d237f..15cf05f 100644
--- a/lib/sanitizer_common/sanitizer_syscall_generic.inc
+++ b/lib/sanitizer_common/sanitizer_syscall_generic.inc
@@ -11,13 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_MAC
# define SYSCALL(name) SYS_ ## name
#else
# define SYSCALL(name) __NR_ ## name
#endif
-#if SANITIZER_FREEBSD && defined(__x86_64__)
+#if (SANITIZER_FREEBSD || SANITIZER_MAC) && defined(__x86_64__)
# define internal_syscall __syscall
# else
# define internal_syscall syscall
diff --git a/lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc
index a98e617..7ab2efb 100644
--- a/lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc
@@ -96,7 +96,7 @@
struct UnwindTraceArg {
BufferedStackTrace *stack;
- uptr max_depth;
+ u32 max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
@@ -108,7 +108,7 @@
return UNWIND_CONTINUE;
}
-void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
@@ -128,7 +128,7 @@
}
void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
- uptr max_depth) {
+ u32 max_depth) {
CHECK_GE(max_depth, 2);
if (!unwind_backtrace_signal_arch) {
SlowUnwindStack(pc, max_depth);
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index 2f9b158..335ceca 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -20,6 +20,7 @@
#include <windows.h>
#include <dbghelp.h>
#include <io.h>
+#include <psapi.h>
#include <stdlib.h>
#include "sanitizer_common.h"
@@ -122,18 +123,34 @@
}
void *Mprotect(uptr fixed_addr, uptr size) {
- return VirtualAlloc((LPVOID)fixed_addr, size,
- MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+ void *res = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
+ return res;
}
void FlushUnneededShadowMemory(uptr addr, uptr size) {
// This is almost useless on 32-bits.
- // FIXME: add madvice-analog when we move to 64-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
+}
+
+void NoHugePagesInRegion(uptr addr, uptr size) {
+ // FIXME: probably similar to FlushUnneededShadowMemory.
+}
+
+void DontDumpShadowMemory(uptr addr, uptr length) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
}
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
- // FIXME: shall we do anything here on Windows?
- return true;
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
+ return mbi.Protect == PAGE_NOACCESS &&
+ (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
}
void *MapFileToMemory(const char *file_name, uptr *buff_size) {
@@ -187,9 +204,80 @@
UNIMPLEMENTED();
}
-void DumpProcessMap() {
- UNIMPLEMENTED();
+namespace {
+struct ModuleInfo {
+ HMODULE handle;
+ uptr base_address;
+ uptr end_address;
+};
+
+int CompareModulesBase(const void *pl, const void *pr) {
+ const ModuleInfo &l = *(ModuleInfo *)pl, &r = *(ModuleInfo *)pr;
+ if (l.base_address < r.base_address)
+ return -1;
+ return l.base_address > r.base_address;
}
+} // namespace
+
+#ifndef SANITIZER_GO
+void DumpProcessMap() {
+ Report("Dumping process modules:\n");
+ HANDLE cur_process = GetCurrentProcess();
+
+ // Query the list of modules. Start by assuming there are no more than 256
+ // modules and retry if that's not sufficient.
+ ModuleInfo *modules;
+ size_t num_modules;
+ {
+ HMODULE *hmodules = 0;
+ uptr modules_buffer_size = sizeof(HMODULE) * 256;
+ DWORD bytes_required;
+ while (!hmodules) {
+ hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
+ CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
+ &bytes_required));
+ if (bytes_required > modules_buffer_size) {
+ // Either there turned out to be more than 256 hmodules, or new hmodules
+ // could have loaded since the last try. Retry.
+ UnmapOrDie(hmodules, modules_buffer_size);
+ hmodules = 0;
+ modules_buffer_size = bytes_required;
+ }
+ }
+
+ num_modules = bytes_required / sizeof(HMODULE);
+ modules =
+ (ModuleInfo *)MmapOrDie(num_modules * sizeof(ModuleInfo), __FUNCTION__);
+ for (size_t i = 0; i < num_modules; ++i) {
+ modules[i].handle = hmodules[i];
+ MODULEINFO mi;
+ if (!GetModuleInformation(cur_process, hmodules[i], &mi, sizeof(mi)))
+ continue;
+ modules[i].base_address = (uptr)mi.lpBaseOfDll;
+ modules[i].end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
+ }
+ UnmapOrDie(hmodules, modules_buffer_size);
+ }
+
+ qsort(modules, num_modules, sizeof(ModuleInfo), CompareModulesBase);
+
+ for (size_t i = 0; i < num_modules; ++i) {
+ const ModuleInfo &mi = modules[i];
+ char module_name[MAX_PATH];
+ bool got_module_name = GetModuleFileNameA(
+ mi.handle, module_name, sizeof(module_name));
+ if (mi.end_address != 0) {
+ Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
+ got_module_name ? module_name : "[no name]");
+ } else if (got_module_name) {
+ Printf("\t??\?-??? %s\n", module_name);
+ } else {
+ Printf("\t???\n");
+ }
+ }
+ UnmapOrDie(modules, num_modules * sizeof(ModuleInfo));
+}
+#endif
void DisableCoreDumperIfNecessary() {
// Do nothing.
@@ -238,8 +326,9 @@
}
void Abort() {
- abort();
- internal__exit(-1); // abort is not NORETURN on Windows.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ internal__exit(3);
}
uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
@@ -375,6 +464,13 @@
UNIMPLEMENTED();
}
+uptr GetRSS() {
+ return 0;
+}
+
+void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) { }
+
// ---------------------- BlockingMutex ---------------- {{{1
const uptr LOCK_UNINITIALIZED = 0;
const uptr LOCK_READY = (uptr)-1;
@@ -444,7 +540,7 @@
}
#if !SANITIZER_GO
-void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
@@ -460,7 +556,7 @@
}
void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
- uptr max_depth) {
+ u32 max_depth) {
CONTEXT ctx = *(CONTEXT *)context;
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
@@ -488,15 +584,10 @@
}
#endif // #if !SANITIZER_GO
-void MaybeOpenReportFile() {
- // Windows doesn't have native fork, and we don't support Cygwin or other
- // environments that try to fake it, so the initial report_fd will always be
- // correct.
-}
-
-void RawWrite(const char *buffer) {
- uptr length = (uptr)internal_strlen(buffer);
- if (length != internal_write(report_fd, buffer, length)) {
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ if (length != internal_write(fd, buffer, length)) {
// stderr may be closed, but we may be able to print to the debugger
// instead. This is the case when launching a program from Visual Studio,
// and the following routine should write to its console.
diff --git a/lib/sanitizer_common/scripts/check_lint.sh b/lib/sanitizer_common/scripts/check_lint.sh
index 267273d..7ed05d7 100755
--- a/lib/sanitizer_common/scripts/check_lint.sh
+++ b/lib/sanitizer_common/scripts/check_lint.sh
@@ -32,7 +32,14 @@
DFSAN_RTL_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int,-runtime/printf,-runtime/references,-readability/function
COMMON_RTL_INC_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int,-runtime/sizeof,-runtime/printf,-readability/fn_size
SANITIZER_INCLUDES_LINT_FILTER=${COMMON_LINT_FILTER},-runtime/int
-MKTEMP="mktemp -q /tmp/tmp.XXXXXXXXXX"
+
+MKTEMP_DIR=$(mktemp -qd /tmp/check_lint.XXXXXXXXXX)
+MKTEMP="mktemp -q ${MKTEMP_DIR}/tmp.XXXXXXXXXX"
+cleanup() {
+ rm -rf $MKTEMP_DIR
+}
+trap cleanup EXIT
+
cd ${LLVM_CHECKOUT}
EXITSTATUS=0
diff --git a/lib/sanitizer_common/scripts/sancov.py b/lib/sanitizer_common/scripts/sancov.py
index 4769530..566116e 100755
--- a/lib/sanitizer_common/scripts/sancov.py
+++ b/lib/sanitizer_common/scripts/sancov.py
@@ -118,7 +118,7 @@
if len(pc_list) == 0: continue
assert path.endswith('.sancov.raw')
dst_path = module_path + '.' + os.path.basename(path)[:-4]
- print "writing %d PCs to %s" % (len(pc_list), dst_path)
+ print >> sys.stderr, "%s: writing %d PCs to %s" % (prog_name, len(pc_list), dst_path)
arr = array.array('I')
arr.fromlist(sorted(pc_list))
with open(dst_path, 'ab') as f2:
diff --git a/lib/sanitizer_common/tests/CMakeLists.txt b/lib/sanitizer_common/tests/CMakeLists.txt
index bb7a399..75008db 100644
--- a/lib/sanitizer_common/tests/CMakeLists.txt
+++ b/lib/sanitizer_common/tests/CMakeLists.txt
@@ -2,6 +2,9 @@
clang_compiler_add_cxx_check()
+# FIXME: use SANITIZER_COMMON_SUPPORTED_ARCH here
+filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el)
+
set(SANITIZER_UNITTESTS
sanitizer_allocator_test.cc
sanitizer_atomic_test.cc
@@ -157,24 +160,18 @@
$<TARGET_OBJECTS:RTSanitizerCommon.osx>)
else()
if(CAN_TARGET_x86_64)
- add_sanitizer_common_lib("RTSanitizerCommon.test.x86_64"
- $<TARGET_OBJECTS:RTSanitizerCommon.x86_64>
- $<TARGET_OBJECTS:RTSanitizerCommonLibc.x86_64>)
add_sanitizer_common_lib("RTSanitizerCommon.test.nolibc.x86_64"
$<TARGET_OBJECTS:RTSanitizerCommon.x86_64>)
endif()
- if(CAN_TARGET_i386)
- add_sanitizer_common_lib("RTSanitizerCommon.test.i386"
- $<TARGET_OBJECTS:RTSanitizerCommon.i386>
- $<TARGET_OBJECTS:RTSanitizerCommonLibc.i386>)
- endif()
+ foreach(arch ${SANITIZER_UNITTEST_SUPPORTED_ARCH})
+ add_sanitizer_common_lib("RTSanitizerCommon.test.${arch}"
+ $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>)
+ endforeach()
endif()
- if(CAN_TARGET_x86_64)
- add_sanitizer_tests_for_arch(x86_64)
- endif()
- if(CAN_TARGET_i386)
- add_sanitizer_tests_for_arch(i386)
- endif()
+ foreach(arch ${SANITIZER_UNITTEST_SUPPORTED_ARCH})
+ add_sanitizer_tests_for_arch(${arch})
+ endforeach()
endif()
if(ANDROID)
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index f61d58d..be8fc91 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -14,7 +14,6 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_test_utils.h"
#include "sanitizer_pthread_wrappers.h"
@@ -27,9 +26,9 @@
#include <set>
// Too slow for debug build
-#if TSAN_DEBUG == 0
+#if !SANITIZER_DEBUG
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
static const uptr kAllocatorSpace = 0x700000000000ULL;
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
static const u64 kAddressSpaceSize = 1ULL << 47;
@@ -39,6 +38,8 @@
typedef SizeClassAllocator64<
kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
+#elif defined(__mips64)
+static const u64 kAddressSpaceSize = 1ULL << 40;
#else
static const u64 kAddressSpaceSize = 1ULL << 32;
#endif
@@ -140,7 +141,7 @@
delete a;
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64) {
TestSizeClassAllocator<Allocator64>();
}
@@ -184,7 +185,7 @@
delete a;
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64>();
}
@@ -192,7 +193,7 @@
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
-#endif // SANITIZER_WORDSIZE == 64
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator32Compact>();
}
@@ -221,7 +222,7 @@
delete a;
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator64>();
}
@@ -231,7 +232,7 @@
TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
}
-#endif // SANITIZER_WORDSIZE == 64
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
struct TestMapUnmapCallback {
static int map_count, unmap_count;
@@ -241,7 +242,7 @@
int TestMapUnmapCallback::map_count;
int TestMapUnmapCallback::unmap_count;
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
@@ -297,7 +298,7 @@
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
LargeMmapAllocator<TestMapUnmapCallback> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
void *x = a.Allocate(&stats, 1 << 20, 1);
@@ -322,7 +323,7 @@
a.TestOnlyUnmap();
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
}
@@ -331,7 +332,7 @@
#if !defined(_WIN32) // FIXME: This currently fails on Windows.
TEST(SanitizerCommon, LargeMmapAllocator) {
LargeMmapAllocator<> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
@@ -413,25 +414,22 @@
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
Allocator *a = new Allocator;
- a->Init();
+ a->Init(/* may_return_null */ true);
AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
a->InitCache(&cache);
- bool allocator_may_return_null = common_flags()->allocator_may_return_null;
- common_flags()->allocator_may_return_null = true;
EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
- common_flags()->allocator_may_return_null = false;
+ // Set to false
+ a->SetMayReturnNull(false);
EXPECT_DEATH(a->Allocate(&cache, -1, 1),
"allocator is terminating the process");
- // Restore the original value.
- common_flags()->allocator_may_return_null = allocator_may_return_null;
const uptr kNumAllocs = 100000;
const uptr kNumIter = 10;
@@ -465,7 +463,7 @@
a->TestOnlyUnmap();
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, CombinedAllocator64) {
TestCombinedAllocator<Allocator64,
LargeMmapAllocator<>,
@@ -521,7 +519,7 @@
delete a;
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64> >();
@@ -538,7 +536,7 @@
SizeClassAllocatorLocalCache<Allocator32Compact> >();
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
static AllocatorCache static_allocator_cache;
@@ -694,7 +692,7 @@
delete a;
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
TestSizeClassAllocatorIteration<Allocator64>();
}
@@ -706,7 +704,7 @@
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
LargeMmapAllocator<> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
@@ -733,7 +731,7 @@
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
LargeMmapAllocator<> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
@@ -769,7 +767,7 @@
}
-#if SANITIZER_WORDSIZE == 64
+#if SANITIZER_CAN_USE_ALLOCATOR64
// Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
// In a world where regions are small and chunks are huge...
@@ -859,4 +857,4 @@
EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
}
-#endif // #if TSAN_DEBUG==0
+#endif // #if !SANITIZER_DEBUG
diff --git a/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc b/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc
index 8c83633..7835eef 100644
--- a/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc
@@ -268,7 +268,7 @@
}
EXPECT_EQ(d.testOnlyGetEpoch(), 4 * d.size());
-#if TSAN_DEBUG == 0
+#if !SANITIZER_DEBUG
// EXPECT_DEATH clones a thread with 4K stack,
// which is overflown by tsan memory accesses functions in debug mode.
diff --git a/lib/sanitizer_common/tests/sanitizer_flags_test.cc b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
index 1055f5d..3e5d838 100644
--- a/lib/sanitizer_common/tests/sanitizer_flags_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_flags_test.cc
@@ -12,7 +12,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "gtest/gtest.h"
#include <string.h>
@@ -20,58 +22,79 @@
namespace __sanitizer {
static const char kFlagName[] = "flag_name";
+static const char kFlagDesc[] = "flag description";
template <typename T>
static void TestFlag(T start_value, const char *env, T final_value) {
T flag = start_value;
- ParseFlag(env, &flag, kFlagName, "flag description");
+
+ FlagParser parser;
+ RegisterFlag(&parser, kFlagName, kFlagDesc, &flag);
+
+ parser.ParseString(env);
+
EXPECT_EQ(final_value, flag);
}
-static void TestStrFlag(const char *start_value, const char *env,
- const char *final_value) {
+template <>
+void TestFlag(const char *start_value, const char *env,
+ const char *final_value) {
const char *flag = start_value;
- ParseFlag(env, &flag, kFlagName, "flag description");
+
+ FlagParser parser;
+ RegisterFlag(&parser, kFlagName, kFlagDesc, &flag);
+
+ parser.ParseString(env);
+
EXPECT_EQ(0, internal_strcmp(final_value, flag));
}
TEST(SanitizerCommon, BooleanFlags) {
- TestFlag(true, "--flag_name", true);
- TestFlag(false, "flag_name", false);
- TestFlag(false, "--flag_name=1", true);
- TestFlag(true, "asdas flag_name=0 asdas", false);
- TestFlag(true, " --flag_name=0 ", false);
+ TestFlag(false, "flag_name=1", true);
TestFlag(false, "flag_name=yes", true);
TestFlag(false, "flag_name=true", true);
+ TestFlag(true, "flag_name=0", false);
TestFlag(true, "flag_name=no", false);
TestFlag(true, "flag_name=false", false);
}
TEST(SanitizerCommon, IntFlags) {
TestFlag(-11, 0, -11);
- TestFlag(-11, "flag_name", -11);
- TestFlag(-11, "--flag_name=", 0);
- TestFlag(-11, "--flag_name=42", 42);
- TestFlag(-11, "--flag_name=-42", -42);
+ TestFlag(-11, "flag_name=0", 0);
+ TestFlag(-11, "flag_name=42", 42);
+ TestFlag(-11, "flag_name=-42", -42);
+
+ // Unrecognized flags are ignored.
+ TestFlag(-11, "--flag_name=42", -11);
+ TestFlag(-11, "zzzzzzz=42", -11);
+
+ EXPECT_DEATH(TestFlag(-11, "flag_name", 0), "expected '='");
+ EXPECT_DEATH(TestFlag(-11, "flag_name=42U", 0),
+ "Invalid value for int option");
}
TEST(SanitizerCommon, StrFlags) {
- TestStrFlag("zzz", 0, "zzz");
- TestStrFlag("zzz", "flag_name", "zzz");
- TestStrFlag("zzz", "--flag_name=", "");
- TestStrFlag("", "--flag_name=abc", "abc");
- TestStrFlag("", "--flag_name='abc zxc'", "abc zxc");
- TestStrFlag("", "--flag_name='abc zxcc'", "abc zxcc");
- TestStrFlag("", "--flag_name=\"abc qwe\" asd", "abc qwe");
- TestStrFlag("", "other_flag_name=zzz", "");
+ TestFlag("zzz", 0, "zzz");
+ TestFlag("zzz", "flag_name=", "");
+ TestFlag("zzz", "flag_name=abc", "abc");
+ TestFlag("", "flag_name=abc", "abc");
+ TestFlag("", "flag_name='abc zxc'", "abc zxc");
+ // TestStrFlag("", "flag_name=\"abc qwe\" asd", "abc qwe");
}
static void TestTwoFlags(const char *env, bool expected_flag1,
- const char *expected_flag2) {
+ const char *expected_flag2,
+ const char *name1 = "flag1",
+ const char *name2 = "flag2") {
bool flag1 = !expected_flag1;
const char *flag2 = "";
- ParseFlag(env, &flag1, "flag1", "flag1 description");
- ParseFlag(env, &flag2, "flag2", "flag2 description");
+
+ FlagParser parser;
+ RegisterFlag(&parser, name1, kFlagDesc, &flag1);
+ RegisterFlag(&parser, name2, kFlagDesc, &flag2);
+
+ parser.ParseString(env);
+
EXPECT_EQ(expected_flag1, flag1);
EXPECT_EQ(0, internal_strcmp(flag2, expected_flag2));
}
@@ -81,6 +104,39 @@
TestTwoFlags("flag2='qxx' flag1=0", false, "qxx");
TestTwoFlags("flag1=false:flag2='zzz'", false, "zzz");
TestTwoFlags("flag2=qxx:flag1=yes", true, "qxx");
+ TestTwoFlags("flag2=qxx\nflag1=yes", true, "qxx");
+ TestTwoFlags("flag2=qxx\r\nflag1=yes", true, "qxx");
+ TestTwoFlags("flag2=qxx\tflag1=yes", true, "qxx");
+}
+
+TEST(SanitizerCommon, CommonSuffixFlags) {
+ TestTwoFlags("flag=1 other_flag='zzz'", true, "zzz", "flag", "other_flag");
+ TestTwoFlags("other_flag='zzz' flag=1", true, "zzz", "flag", "other_flag");
+ TestTwoFlags("other_flag=' flag=0 ' flag=1", true, " flag=0 ", "flag",
+ "other_flag");
+ TestTwoFlags("flag=1 other_flag=' flag=0 '", true, " flag=0 ", "flag",
+ "other_flag");
+}
+
+TEST(SanitizerCommon, CommonFlags) {
+ CommonFlags cf;
+ FlagParser parser;
+ RegisterCommonFlags(&parser, &cf);
+
+ cf.SetDefaults();
+ EXPECT_TRUE(cf.symbolize);
+ EXPECT_STREQ(".", cf.coverage_dir);
+
+ cf.symbolize = false;
+ cf.coverage = true;
+ cf.coverage_direct = true;
+ cf.log_path = "path/one";
+
+ parser.ParseString("symbolize=1:coverage_direct=false log_path='path/two'");
+ EXPECT_TRUE(cf.symbolize);
+ EXPECT_TRUE(cf.coverage);
+ EXPECT_FALSE(cf.coverage_direct);
+ EXPECT_STREQ("path/two", cf.log_path);
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_libc_test.cc b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
index 660710d..8712d2c 100644
--- a/lib/sanitizer_common/tests/sanitizer_libc_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_libc_test.cc
@@ -55,6 +55,19 @@
unsigned char z;
};
+static void temp_file_name(char *buf, size_t bufsize, const char *prefix) {
+ const char *tmpdir = "/tmp";
+#if SANITIZER_ANDROID
+ // I don't know a way to query temp directory location on Android without
+ // going through Java interfaces. The code below is not ideal, but should
+ // work. May require "adb root", but it is needed for almost any use of ASan
+ // on Android already.
+ tmpdir = GetEnv("EXTERNAL_STORAGE");
+#endif
+ u32 uid = GetUid();
+ internal_snprintf(buf, bufsize, "%s/%s%d", tmpdir, prefix, uid);
+}
+
// FIXME: File manipulations are not yet supported on Windows
#if !defined(_WIN32)
TEST(SanitizerCommon, FileOps) {
@@ -63,28 +76,16 @@
const char *str2 = "zxcv";
uptr len2 = internal_strlen(str2);
- u32 uid = GetUid();
- char temp_filename[128];
-#if SANITIZER_ANDROID
- // I don't know a way to query temp directory location on Android without
- // going through Java interfaces. The code below is not ideal, but should
- // work. May require "adb root", but it is needed for almost any use of ASan
- // on Android already.
- internal_snprintf(temp_filename, sizeof(temp_filename),
- "%s/sanitizer_common.tmp.%d",
- GetEnv("EXTERNAL_STORAGE"), uid);
-#else
- internal_snprintf(temp_filename, sizeof(temp_filename),
- "/tmp/sanitizer_common.tmp.%d", uid);
-#endif
- uptr openrv = OpenFile(temp_filename, true);
+ char tmpfile[128];
+ temp_file_name(tmpfile, sizeof(tmpfile), "sanitizer_common.fileops.tmp.");
+ uptr openrv = OpenFile(tmpfile, true);
EXPECT_FALSE(internal_iserror(openrv));
fd_t fd = openrv;
EXPECT_EQ(len1, internal_write(fd, str1, len1));
EXPECT_EQ(len2, internal_write(fd, str2, len2));
internal_close(fd);
- openrv = OpenFile(temp_filename, false);
+ openrv = OpenFile(tmpfile, false);
EXPECT_FALSE(internal_iserror(openrv));
fd = openrv;
uptr fsize = internal_filesize(fd);
@@ -92,8 +93,8 @@
#if SANITIZER_TEST_HAS_STAT_H
struct stat st1, st2, st3;
- EXPECT_EQ(0u, internal_stat(temp_filename, &st1));
- EXPECT_EQ(0u, internal_lstat(temp_filename, &st2));
+ EXPECT_EQ(0u, internal_stat(tmpfile, &st1));
+ EXPECT_EQ(0u, internal_lstat(tmpfile, &st2));
EXPECT_EQ(0u, internal_fstat(fd, &st3));
EXPECT_EQ(fsize, (uptr)st3.st_size);
@@ -115,6 +116,7 @@
EXPECT_EQ(len2, internal_read(fd, buf, len2));
EXPECT_EQ(0, internal_memcmp(buf, str2, len2));
internal_close(fd);
+ internal_unlink(tmpfile);
}
#endif
@@ -125,3 +127,35 @@
EXPECT_EQ(0, internal_strchr(haystack, 'z'));
EXPECT_EQ(haystack + 8, internal_strchrnul(haystack, 'z'));
}
+
+// FIXME: File manipulations are not yet supported on Windows
+#if !defined(_WIN32) && !SANITIZER_MAC
+TEST(SanitizerCommon, InternalMmapWithOffset) {
+ char tmpfile[128];
+ temp_file_name(tmpfile, sizeof(tmpfile),
+ "sanitizer_common.internalmmapwithoffset.tmp.");
+ uptr res = OpenFile(tmpfile, true);
+ ASSERT_FALSE(internal_iserror(res));
+ fd_t fd = res;
+
+ uptr page_size = GetPageSizeCached();
+ res = internal_ftruncate(fd, page_size * 2);
+ ASSERT_FALSE(internal_iserror(res));
+
+ res = internal_lseek(fd, page_size, SEEK_SET);
+ ASSERT_FALSE(internal_iserror(res));
+
+ res = internal_write(fd, "AB", 2);
+ ASSERT_FALSE(internal_iserror(res));
+
+ char *p = (char *)MapWritableFileToMemory(nullptr, page_size, fd, page_size);
+ ASSERT_NE(nullptr, p);
+
+ ASSERT_EQ('A', p[0]);
+ ASSERT_EQ('B', p[1]);
+
+ internal_close(fd);
+ internal_munmap(p, page_size);
+ internal_unlink(tmpfile);
+}
+#endif
diff --git a/lib/sanitizer_common/tests/sanitizer_linux_test.cc b/lib/sanitizer_common/tests/sanitizer_linux_test.cc
index 592d9c3..11342b7 100644
--- a/lib/sanitizer_common/tests/sanitizer_linux_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_linux_test.cc
@@ -255,6 +255,14 @@
}
}
+#if defined(__mips64)
+// Effectively, this is a test for ThreadDescriptorSize() which is used to
+// compute ThreadSelf().
+TEST(SanitizerLinux, ThreadSelfTest) {
+ ASSERT_EQ(pthread_self(), ThreadSelf());
+}
+#endif
+
} // namespace __sanitizer
#endif // SANITIZER_LINUX
diff --git a/lib/sanitizer_common/tests/sanitizer_printf_test.cc b/lib/sanitizer_common/tests/sanitizer_printf_test.cc
index d0b46ac..5e39e0a 100644
--- a/lib/sanitizer_common/tests/sanitizer_printf_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_printf_test.cc
@@ -28,14 +28,11 @@
(unsigned)10, (unsigned long)11, // NOLINT
(void*)0x123, "_string_");
EXPECT_EQ(len, strlen(buf));
- void *ptr;
- if (sizeof(ptr) == 4) {
- EXPECT_STREQ("a-1b-2c4294967292e5fahbq"
- "0x00000123e_string_r", buf);
- } else {
- EXPECT_STREQ("a-1b-2c4294967292e5fahbq"
- "0x000000000123e_string_r", buf);
- }
+
+ std::string expectedString = "a-1b-2c4294967292e5fahbq0x";
+ expectedString += std::string(SANITIZER_POINTER_FORMAT_LENGTH - 3, '0');
+ expectedString += "123e_string_r";
+ EXPECT_STREQ(expectedString.c_str(), buf);
}
TEST(Printf, OverflowStr) {
diff --git a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
index 495b726..abe4ef4 100644
--- a/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_procmaps_test.cc
@@ -48,6 +48,7 @@
if (strstr(modules[i].full_name(), binary_name) != 0)
found = true;
}
+ modules[i].clear();
}
EXPECT_TRUE(found);
free(modules);
diff --git a/lib/sanitizer_common/tests/sanitizer_suppressions_test.cc b/lib/sanitizer_common/tests/sanitizer_suppressions_test.cc
index 2a1e356..e8c30d0 100644
--- a/lib/sanitizer_common/tests/sanitizer_suppressions_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_suppressions_test.cc
@@ -58,99 +58,77 @@
EXPECT_FALSE(MyMatch("foo$^bar", "foobar"));
}
-TEST(Suppressions, TypeStrings) {
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionNone), "none"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionRace), "race"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionMutex), "mutex"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionThread), "thread"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionSignal), "signal"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionLeak), "leak"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionLib),
- "called_from_lib"));
- CHECK(
- !internal_strcmp(SuppressionTypeString(SuppressionDeadlock), "deadlock"));
- CHECK(!internal_strcmp(SuppressionTypeString(SuppressionVptrCheck),
- "vptr_check"));
- // Ensure this test is up-to-date when suppression types are added.
- CHECK_EQ(9, SuppressionTypeCount);
-}
+static const char *kTestSuppressionTypes[] = {"race", "thread", "mutex",
+ "signal"};
class SuppressionContextTest : public ::testing::Test {
public:
- virtual void SetUp() { ctx_ = new(placeholder_) SuppressionContext; }
- virtual void TearDown() { ctx_->~SuppressionContext(); }
+ SuppressionContextTest()
+ : ctx_(kTestSuppressionTypes, ARRAY_SIZE(kTestSuppressionTypes)) {}
protected:
- InternalMmapVector<Suppression> *Suppressions() {
- return &ctx_->suppressions_;
+ SuppressionContext ctx_;
+
+ void CheckSuppressions(unsigned count, std::vector<const char *> types,
+ std::vector<const char *> templs) const {
+ EXPECT_EQ(count, ctx_.SuppressionCount());
+ for (unsigned i = 0; i < count; i++) {
+ const Suppression *s = ctx_.SuppressionAt(i);
+ EXPECT_STREQ(types[i], s->type);
+ EXPECT_STREQ(templs[i], s->templ);
+ }
}
- SuppressionContext *ctx_;
- ALIGNED(64) char placeholder_[sizeof(SuppressionContext)];
};
TEST_F(SuppressionContextTest, Parse) {
- ctx_->Parse(
- "race:foo\n"
- " race:bar\n" // NOLINT
- "race:baz \n" // NOLINT
- "# a comment\n"
- "race:quz\n"
- ); // NOLINT
- EXPECT_EQ((unsigned)4, ctx_->SuppressionCount());
- EXPECT_EQ((*Suppressions())[3].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[3].templ, "quz"));
- EXPECT_EQ((*Suppressions())[2].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[2].templ, "baz"));
- EXPECT_EQ((*Suppressions())[1].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[1].templ, "bar"));
- EXPECT_EQ((*Suppressions())[0].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[0].templ, "foo"));
+ ctx_.Parse("race:foo\n"
+ " race:bar\n" // NOLINT
+ "race:baz \n" // NOLINT
+ "# a comment\n"
+ "race:quz\n"); // NOLINT
+ CheckSuppressions(4, {"race", "race", "race", "race"},
+ {"foo", "bar", "baz", "quz"});
}
TEST_F(SuppressionContextTest, Parse2) {
- ctx_->Parse(
+ ctx_.Parse(
" # first line comment\n" // NOLINT
" race:bar \n" // NOLINT
"race:baz* *baz\n"
"# a comment\n"
"# last line comment\n"
); // NOLINT
- EXPECT_EQ((unsigned)2, ctx_->SuppressionCount());
- EXPECT_EQ((*Suppressions())[1].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[1].templ, "baz* *baz"));
- EXPECT_EQ((*Suppressions())[0].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[0].templ, "bar"));
+ CheckSuppressions(2, {"race", "race"}, {"bar", "baz* *baz"});
}
TEST_F(SuppressionContextTest, Parse3) {
- ctx_->Parse(
+ ctx_.Parse(
"# last suppression w/o line-feed\n"
"race:foo\n"
"race:bar"
); // NOLINT
- EXPECT_EQ((unsigned)2, ctx_->SuppressionCount());
- EXPECT_EQ((*Suppressions())[1].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[1].templ, "bar"));
- EXPECT_EQ((*Suppressions())[0].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[0].templ, "foo"));
+ CheckSuppressions(2, {"race", "race"}, {"foo", "bar"});
}
TEST_F(SuppressionContextTest, ParseType) {
- ctx_->Parse(
+ ctx_.Parse(
"race:foo\n"
"thread:bar\n"
"mutex:baz\n"
"signal:quz\n"
); // NOLINT
- EXPECT_EQ((unsigned)4, ctx_->SuppressionCount());
- EXPECT_EQ((*Suppressions())[3].type, SuppressionSignal);
- EXPECT_EQ(0, strcmp((*Suppressions())[3].templ, "quz"));
- EXPECT_EQ((*Suppressions())[2].type, SuppressionMutex);
- EXPECT_EQ(0, strcmp((*Suppressions())[2].templ, "baz"));
- EXPECT_EQ((*Suppressions())[1].type, SuppressionThread);
- EXPECT_EQ(0, strcmp((*Suppressions())[1].templ, "bar"));
- EXPECT_EQ((*Suppressions())[0].type, SuppressionRace);
- EXPECT_EQ(0, strcmp((*Suppressions())[0].templ, "foo"));
+ CheckSuppressions(4, {"race", "thread", "mutex", "signal"},
+ {"foo", "bar", "baz", "quz"});
+}
+
+TEST_F(SuppressionContextTest, HasSuppressionType) {
+ ctx_.Parse(
+ "race:foo\n"
+ "thread:bar\n");
+ EXPECT_TRUE(ctx_.HasSuppressionType("race"));
+ EXPECT_TRUE(ctx_.HasSuppressionType("thread"));
+ EXPECT_FALSE(ctx_.HasSuppressionType("mutex"));
+ EXPECT_FALSE(ctx_.HasSuppressionType("signal"));
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/tests/sanitizer_test_utils.h b/lib/sanitizer_common/tests/sanitizer_test_utils.h
index 64db37f..9c162a6 100644
--- a/lib/sanitizer_common/tests/sanitizer_test_utils.h
+++ b/lib/sanitizer_common/tests/sanitizer_test_utils.h
@@ -118,4 +118,10 @@
# define SANITIZER_TEST_HAS_STRNLEN 0
#endif
+#if defined(__FreeBSD__)
+# define SANITIZER_TEST_HAS_PRINTF_L 1
+#else
+# define SANITIZER_TEST_HAS_PRINTF_L 0
+#endif
+
#endif // SANITIZER_TEST_UTILS_H
diff --git a/lib/tsan/CMakeLists.txt b/lib/tsan/CMakeLists.txt
index 6d51faa..68862f4 100644
--- a/lib/tsan/CMakeLists.txt
+++ b/lib/tsan/CMakeLists.txt
@@ -9,16 +9,13 @@
append_no_rtti_flag(TSAN_CFLAGS)
set(TSAN_RTL_CFLAGS ${TSAN_CFLAGS})
-append_list_if(COMPILER_RT_HAS_WFRAME_LARGER_THAN_FLAG -Wframe-larger-than=512 TSAN_RTL_CFLAGS)
-append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors TSAN_RTL_CFLAGS)
+append_list_if(COMPILER_RT_HAS_MSSE3_FLAG -msse3 TSAN_RTL_CFLAGS)
+append_list_if(SANITIZER_LIMIT_FRAME_SIZE -Wframe-larger-than=512
+ TSAN_RTL_CFLAGS)
+append_list_if(COMPILER_RT_HAS_WGLOBAL_CONSTRUCTORS_FLAG -Wglobal-constructors
+ TSAN_RTL_CFLAGS)
# FIXME: Add support for --sysroot=. compile flag:
-if("${CMAKE_BUILD_TYPE}" EQUAL "Release")
- set(TSAN_COMMON_DEFINITIONS DEBUG=0)
-else()
- set(TSAN_COMMON_DEFINITIONS DEBUG=1)
-endif()
-
set(TSAN_SOURCES
rtl/tsan_clock.cc
rtl/tsan_flags.cc
@@ -58,6 +55,7 @@
rtl/tsan_dense_alloc.h
rtl/tsan_fd.h
rtl/tsan_flags.h
+ rtl/tsan_flags.inc
rtl/tsan_ignoreset.h
rtl/tsan_interface_ann.h
rtl/tsan_interface.h
@@ -81,27 +79,42 @@
set(TSAN_RUNTIME_LIBRARIES)
add_custom_target(tsan)
# TSan is currently supported on 64-bit Linux only.
-if(CAN_TARGET_x86_64 AND UNIX AND NOT APPLE)
- set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S)
- # Pass ASM file directly to the C++ compiler.
- set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
- LANGUAGE C)
- set(arch "x86_64")
- add_compiler_rt_runtime(clang_rt.tsan-${arch} ${arch} STATIC
- SOURCES ${TSAN_SOURCES} ${TSAN_ASM_SOURCES}
- $<TARGET_OBJECTS:RTInterception.${arch}>
- $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
- $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
- CFLAGS ${TSAN_RTL_CFLAGS}
- DEFS ${TSAN_COMMON_DEFINITIONS})
- list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch})
- add_sanitizer_rt_symbols(clang_rt.tsan-${arch} rtl/tsan.syms.extra)
- add_dependencies(tsan clang_rt.tsan-${arch}
- clang_rt.tsan-${arch}-symbols)
+if(UNIX AND NOT APPLE)
+ foreach(arch ${TSAN_SUPPORTED_ARCH})
+ if(arch STREQUAL "x86_64")
+ set(TSAN_ASM_SOURCES rtl/tsan_rtl_amd64.S)
+ # Pass ASM file directly to the C++ compiler.
+ set_source_files_properties(${TSAN_ASM_SOURCES} PROPERTIES
+ LANGUAGE C)
+ # Sanity check for Go runtime.
+ set(BUILDGO_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/go/buildgo.sh)
+ add_custom_target(GotsanRuntimeCheck
+ COMMAND env "CC=${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_ARG1}"
+ IN_TMPDIR=1 SILENT=1 ${BUILDGO_SCRIPT}
+ DEPENDS clang_rt.tsan-${arch} ${BUILDGO_SCRIPT}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/go
+ COMMENT "Checking TSan Go runtime..."
+ VERBATIM)
+ else()
+ set(TSAN_ASM_SOURCES)
+ endif()
+ add_compiler_rt_runtime(clang_rt.tsan-${arch} ${arch} STATIC
+ SOURCES ${TSAN_SOURCES} ${TSAN_ASM_SOURCES}
+ $<TARGET_OBJECTS:RTInterception.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
+ $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+ CFLAGS ${TSAN_RTL_CFLAGS}
+ DEFS ${TSAN_COMMON_DEFINITIONS})
+ list(APPEND TSAN_RUNTIME_LIBRARIES clang_rt.tsan-${arch})
+ add_sanitizer_rt_symbols(clang_rt.tsan-${arch} rtl/tsan.syms.extra)
+ add_dependencies(tsan clang_rt.tsan-${arch}
+ clang_rt.tsan-${arch}-symbols)
+ endforeach()
endif()
add_dependencies(compiler-rt tsan)
+
# Build libcxx instrumented with TSan.
if(TSAN_SUPPORTED_ARCH AND
COMPILER_RT_HAS_LIBCXX_SOURCES AND
diff --git a/lib/tsan/Makefile.mk b/lib/tsan/Makefile.mk
deleted file mode 100644
index 70fb610..0000000
--- a/lib/tsan/Makefile.mk
+++ /dev/null
@@ -1,18 +0,0 @@
-#===- lib/tsan/Makefile.mk ---------------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := tsan
-SubDirs := rtl
-Sources :=
-ObjNames :=
-Dependencies :=
-
-Implementation := Generic
-
-TsanFunctions :=
diff --git a/lib/tsan/Makefile.old b/lib/tsan/Makefile.old
index b982e66..9e0693f 100644
--- a/lib/tsan/Makefile.old
+++ b/lib/tsan/Makefile.old
@@ -1,7 +1,7 @@
DEBUG=0
LDFLAGS=-ldl -lrt -lpthread -pie
CXXFLAGS = -std=c++11 -fPIE -fno-rtti -g -Wall -Werror \
- -DGTEST_HAS_RTTI=0 -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
+ -DGTEST_HAS_RTTI=0 -DSANITIZER_DEBUG=$(DEBUG)
CLANG=clang
FILECHECK=FileCheck
# Silence warnings that Clang produces for gtest code.
diff --git a/lib/tsan/check_analyze.sh b/lib/tsan/check_analyze.sh
index 08bfc7a..4b33393 100755
--- a/lib/tsan/check_analyze.sh
+++ b/lib/tsan/check_analyze.sh
@@ -8,17 +8,6 @@
PrintRes
-wmops="write1 \
- write2 \
- write4 \
- write8"
-rmops="read1 \
- read2 \
- read4 \
- read8"
-func="func_entry \
- func_exit"
-
check() {
res=$(PrintRes | egrep "$1 .* $2 $3; ")
if [ "$res" == "" ]; then
@@ -27,19 +16,25 @@
fi
}
-for f in $wmops; do
- check $f rsp 3
- check $f push 1
+for f in write1; do
+ check $f rsp 1
+ check $f push 2
+ check $f pop 2
+done
+
+for f in write2 write4 write8; do
+ check $f rsp 1
+ check $f push 3
+ check $f pop 3
+done
+
+for f in read1 read2 read4 read8; do
+ check $f rsp 1
+ check $f push 5
check $f pop 5
done
-for f in $rmops; do
- check $f rsp 3
- check $f push 1
- check $f pop 4
-done
-
-for f in $func; do
+for f in func_entry func_exit; do
check $f rsp 0
check $f push 0
check $f pop 0
diff --git a/lib/tsan/dd/CMakeLists.txt b/lib/tsan/dd/CMakeLists.txt
index aa7d63d..981c1fb 100644
--- a/lib/tsan/dd/CMakeLists.txt
+++ b/lib/tsan/dd/CMakeLists.txt
@@ -5,23 +5,11 @@
set(DD_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_no_rtti_flag(DD_CFLAGS)
-if("${CMAKE_BUILD_TYPE}" EQUAL "Release")
- set(DD_COMMON_DEFINITIONS DEBUG=0)
-else()
- set(DD_COMMON_DEFINITIONS DEBUG=1)
-endif()
-
-set(DD_DYNAMIC_DEFINITIONS DYNAMIC=1)
-
set(DD_SOURCES
dd_rtl.cc
dd_interceptors.cc
)
-set(DD_HEADERS
- dd_rtl.h
-)
-
set(DD_LINKLIBS)
append_list_if(COMPILER_RT_HAS_LIBDL dl DD_LINKLIBS)
append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread DD_LINKLIBS)
@@ -35,12 +23,11 @@
$<TARGET_OBJECTS:RTInterception.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
- CFLAGS ${DD_CFLAGS}
- DEFS ${DD_COMMON_DEFINITIONS})
+ CFLAGS ${DD_CFLAGS})
+ add_dependencies(dd clang_rt.dd-${arch})
add_compiler_rt_object_library(RTDD ${arch}
- SOURCES ${DD_SOURCES} CFLAGS ${DD_CFLAGS}
- DEFS ${DD_COMMON_DEFINITIONS} ${DD_DYNAMIC_DEFINITIONS})
+ SOURCES ${DD_SOURCES} CFLAGS ${DD_CFLAGS})
add_compiler_rt_runtime(clang_rt.dyndd-${arch} ${arch} SHARED
SOURCES $<TARGET_OBJECTS:RTDD.${arch}>
@@ -48,7 +35,7 @@
$<TARGET_OBJECTS:RTSanitizerCommon.${arch}>
$<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>)
target_link_libraries(clang_rt.dyndd-${arch} ${DD_LINKLIBS})
+ add_dependencies(dd clang_rt.dyndd-${arch})
endif()
add_dependencies(compiler-rt dd)
-
diff --git a/lib/tsan/dd/dd_rtl.cc b/lib/tsan/dd/dd_rtl.cc
index 44de617..99b8ee5 100644
--- a/lib/tsan/dd/dd_rtl.cc
+++ b/lib/tsan/dd/dd_rtl.cc
@@ -11,6 +11,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -64,22 +65,27 @@
return CurrentStackTrace(thr, 3);
}
-void InitializeFlags(Flags *f, const char *env) {
- internal_memset(f, 0, sizeof(*f));
+static void InitializeFlags() {
+ Flags *f = flags();
// Default values.
f->second_deadlock_stack = false;
- SetCommonFlagsDefaults(f);
- // Override some common flags defaults.
- f->allow_addr2line = true;
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.allow_addr2line = true;
+ OverrideCommonFlags(cf);
+ }
// Override from command line.
- ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", "");
- ParseCommonFlagsFromString(f, env);
-
- // Copy back to common flags.
- *common_flags() = *f;
+ FlagParser parser;
+ RegisterFlag(&parser, "second_deadlock_stack", "", &f->second_deadlock_stack);
+ RegisterCommonFlags(&parser);
+ parser.ParseString(GetEnv("DSAN_OPTIONS"));
+ SetVerbosity(common_flags()->verbosity);
}
void Initialize() {
@@ -87,8 +93,7 @@
ctx = new(ctx_mem) Context();
InitializeInterceptors();
- InitializeFlags(flags(), GetEnv("DSAN_OPTIONS"));
- common_flags()->symbolize = true;
+ InitializeFlags();
ctx->dd = DDetector::Create(flags());
}
diff --git a/lib/tsan/dd/dd_rtl.h b/lib/tsan/dd/dd_rtl.h
index ec77766..bb1b202 100644
--- a/lib/tsan/dd/dd_rtl.h
+++ b/lib/tsan/dd/dd_rtl.h
@@ -18,8 +18,7 @@
namespace __dsan {
-struct Flags : CommonFlags, DDFlags {
-};
+typedef DDFlags Flags;
struct Mutex {
DDMutex dd;
diff --git a/lib/tsan/go/build.bat b/lib/tsan/go/build.bat
index 7156b7d..7d393dc 100644
--- a/lib/tsan/go/build.bat
+++ b/lib/tsan/go/build.bat
@@ -1,4 +1,4 @@
-type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc > gotsan.cc
+type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc ..\..\sanitizer_common\sanitizer_flag_parser.cc ..\..\sanitizer_common\sanitizer_symbolizer.cc > gotsan.cc
-gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 -Wno-error=attributes -Wno-attributes -Wno-format -DTSAN_DEBUG=0 -O3 -fomit-frame-pointer
+gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
index dbb92f3..5ac6034 100755
--- a/lib/tsan/go/buildgo.sh
+++ b/lib/tsan/go/buildgo.sh
@@ -1,3 +1,5 @@
+#!/bin/sh
+
set -e
SRCS="
@@ -19,6 +21,7 @@
../../sanitizer_common/sanitizer_allocator.cc
../../sanitizer_common/sanitizer_common.cc
../../sanitizer_common/sanitizer_deadlock_detector2.cc
+ ../../sanitizer_common/sanitizer_flag_parser.cc
../../sanitizer_common/sanitizer_flags.cc
../../sanitizer_common/sanitizer_libc.cc
../../sanitizer_common/sanitizer_persistent_allocator.cc
@@ -27,13 +30,15 @@
../../sanitizer_common/sanitizer_thread_registry.cc
../../sanitizer_common/sanitizer_stackdepot.cc
../../sanitizer_common/sanitizer_stacktrace.cc
+ ../../sanitizer_common/sanitizer_symbolizer.cc
"
if [ "`uname -a | grep Linux`" != "" ]; then
SUFFIX="linux_amd64"
OSCFLAGS="-fPIC -ffreestanding -Wno-maybe-uninitialized -Wno-unused-const-variable -Werror -Wno-unknown-warning-option"
OSLDFLAGS="-lpthread -fPIC -fpie"
- SRCS+="
+ SRCS="
+ $SRCS
../rtl/tsan_platform_linux.cc
../../sanitizer_common/sanitizer_posix.cc
../../sanitizer_common/sanitizer_posix_libcdep.cc
@@ -46,7 +51,8 @@
SUFFIX="freebsd_amd64"
OSCFLAGS="-fno-strict-aliasing -fPIC -Werror"
OSLDFLAGS="-lpthread -fPIC -fpie"
- SRCS+="
+ SRCS="
+ $SRCS
../rtl/tsan_platform_linux.cc
../../sanitizer_common/sanitizer_posix.cc
../../sanitizer_common/sanitizer_posix_libcdep.cc
@@ -59,7 +65,8 @@
SUFFIX="darwin_amd64"
OSCFLAGS="-fPIC -Wno-unused-const-variable -Wno-unknown-warning-option"
OSLDFLAGS="-lpthread -fPIC -fpie"
- SRCS+="
+ SRCS="
+ $SRCS
../rtl/tsan_platform_mac.cc
../../sanitizer_common/sanitizer_mac.cc
../../sanitizer_common/sanitizer_posix.cc
@@ -70,7 +77,8 @@
SUFFIX="windows_amd64"
OSCFLAGS="-Wno-error=attributes -Wno-attributes -Wno-unused-const-variable -Wno-unknown-warning-option"
OSLDFLAGS=""
- SRCS+="
+ SRCS="
+ $SRCS
../rtl/tsan_platform_windows.cc
../../sanitizer_common/sanitizer_win.cc
"
@@ -79,24 +87,44 @@
exit 1
fi
-SRCS+=$ADD_SRCS
+CC=${CC:-gcc}
+IN_TMPDIR=${IN_TMPDIR:-0}
+SILENT=${SILENT:-0}
-rm -f gotsan.cc
-for F in $SRCS; do
- cat $F >> gotsan.cc
-done
-
-FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DTSAN_GO -DSANITIZER_GO -DTSAN_SHADOW_COUNT=4 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
-if [ "$DEBUG" == "" ]; then
- FLAGS+=" -DTSAN_DEBUG=0 -O3 -msse3 -fomit-frame-pointer"
+if [ $IN_TMPDIR != "0" ]; then
+ DIR=$(mktemp -qd /tmp/gotsan.XXXXXXXXXX)
+ cleanup() {
+ rm -rf $DIR
+ }
+ trap cleanup EXIT
else
- FLAGS+=" -DTSAN_DEBUG=1 -g"
+ DIR=.
fi
-CC=${CC:-gcc}
+SRCS="$SRCS $ADD_SRCS"
-echo $CC gotsan.cc -c -o race_$SUFFIX.syso $FLAGS $CFLAGS
-$CC gotsan.cc -c -o race_$SUFFIX.syso $FLAGS $CFLAGS
+rm -f $DIR/gotsan.cc
+for F in $SRCS; do
+ cat $F >> $DIR/gotsan.cc
+done
-$CC test.c race_$SUFFIX.syso -m64 -o test $OSLDFLAGS
-GORACE="exitcode=0 atexit_sleep_ms=0" ./test
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
+if [ "$DEBUG" = "" ]; then
+ FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer"
+else
+ FLAGS="$FLAGS -DSANITIZER_DEBUG=1 -g"
+fi
+
+if [ "$SILENT" != "1" ]; then
+ echo $CC gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS
+fi
+$CC $DIR/gotsan.cc -c -o $DIR/race_$SUFFIX.syso $FLAGS $CFLAGS
+
+$CC test.c $DIR/race_$SUFFIX.syso -m64 -o $DIR/test $OSLDFLAGS
+
+export GORACE="exitcode=0 atexit_sleep_ms=0"
+if [ "$SILENT" != "1" ]; then
+ $DIR/test
+else
+ $DIR/test 2>/dev/null
+fi
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
index c1d401f..ea0beb7 100644
--- a/lib/tsan/go/tsan_go.cc
+++ b/lib/tsan/go/tsan_go.cc
@@ -28,13 +28,6 @@
return false;
}
-void *internal_start_thread(void(*func)(void*), void *arg) {
- return 0;
-}
-
-void internal_join_thread(void *th) {
-}
-
ReportLocation *SymbolizeData(uptr addr) {
return 0;
}
@@ -59,18 +52,19 @@
// Callback into Go.
static void (*symbolize_cb)(SymbolizeContext *ctx);
-ReportStack *SymbolizeCode(uptr addr) {
- ReportStack *s = ReportStack::New(addr);
+SymbolizedStack *SymbolizeCode(uptr addr) {
+ SymbolizedStack *s = SymbolizedStack::New(addr);
SymbolizeContext ctx;
internal_memset(&ctx, 0, sizeof(ctx));
ctx.pc = addr;
symbolize_cb(&ctx);
if (ctx.res) {
- s->info.module_offset = ctx.off;
- s->info.function = internal_strdup(ctx.func ? ctx.func : "??");
- s->info.file = internal_strdup(ctx.file ? ctx.file : "-");
- s->info.line = ctx.line;
- s->info.column = 0;
+ AddressInfo &info = s->info;
+ info.module_offset = ctx.off;
+ info.function = internal_strdup(ctx.func ? ctx.func : "??");
+ info.file = internal_strdup(ctx.file ? ctx.file : "-");
+ info.line = ctx.line;
+ info.column = 0;
}
return s;
}
diff --git a/lib/tsan/rtl/Makefile.mk b/lib/tsan/rtl/Makefile.mk
deleted file mode 100644
index 2687123..0000000
--- a/lib/tsan/rtl/Makefile.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-#===- lib/tsan/rtl/Makefile.mk -----------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := tsan
-SubDirs :=
-
-Sources := $(foreach file,$(wildcard $(Dir)/*.cc),$(notdir $(file)))
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-ObjNames := $(Sources:%.cc=%.o) $(AsmSources:%.S=%.o)
-
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
-Dependencies += $(wildcard $(Dir)/../../interception/*.h)
-Dependencies += $(wildcard $(Dir)/../../sanitizer_common/*.h)
-
-# Define a convenience variable for all the tsan functions.
-TsanFunctions += $(Sources:%.cc=%) $(AsmSources:%.S=%)
diff --git a/lib/tsan/rtl/Makefile.old b/lib/tsan/rtl/Makefile.old
index 79c761c..150b376 100644
--- a/lib/tsan/rtl/Makefile.old
+++ b/lib/tsan/rtl/Makefile.old
@@ -1,4 +1,4 @@
-CXXFLAGS = -std=c++11 -fPIE -g -Wall -Werror -fno-builtin -msse3 -DTSAN_DEBUG=$(DEBUG) -DSANITIZER_DEBUG=$(DEBUG)
+CXXFLAGS = -std=c++11 -fPIE -g -Wall -Werror -fno-builtin -msse3 -DSANITIZER_DEBUG=$(DEBUG)
CLANG=clang
ifeq ($(DEBUG), 0)
CXXFLAGS += -O3
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 1855f05..59e3de4 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -82,7 +82,7 @@
// We don't have ThreadState in these methods, so this is an ugly hack that
// works only in C++.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
#else
# define CPP_STAT_INC(typ) (void)0
@@ -104,8 +104,8 @@
}
void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(src->size_ <= kMaxTid);
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(src->size_, kMaxTid);
CPP_STAT_INC(StatClockAcquire);
// Check if it's empty -> no need to do anything.
@@ -215,8 +215,8 @@
}
void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
- DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->size_ <= kMaxTid);
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
CPP_STAT_INC(StatClockStore);
// Check if we need to resize dst.
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index 0a356fb..910a483 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -18,13 +18,18 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "tsan_stat.h"
-#ifndef TSAN_DEBUG
-#define TSAN_DEBUG 0
-#endif // TSAN_DEBUG
+// Setup defaults for compile definitions.
+#ifndef TSAN_NO_HISTORY
+# define TSAN_NO_HISTORY 0
+#endif
+
+#ifndef TSAN_COLLECT_STATS
+# define TSAN_COLLECT_STATS 0
+#endif
namespace __tsan {
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
const bool kGoMode = true;
const bool kCppMode = false;
const char *const kTsanOptionsEnv = "GORACE";
@@ -39,23 +44,17 @@
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
+#ifndef SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+#else
+const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
+#endif
const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
-#ifdef TSAN_SHADOW_COUNT
-# if TSAN_SHADOW_COUNT == 2 \
- || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8
-const uptr kShadowCnt = TSAN_SHADOW_COUNT;
-# else
-# error "TSAN_SHADOW_COUNT must be one of 2,4,8"
-# endif
-#else
// Count of shadow values in a shadow cell.
-#define TSAN_SHADOW_COUNT 4
const uptr kShadowCnt = 4;
-#endif
// That many user bytes are mapped onto a single shadow cell.
const uptr kShadowCell = 8;
@@ -73,22 +72,16 @@
// Size of a single meta shadow value (u32).
const uptr kMetaShadowSize = 4;
-#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY
+#if TSAN_NO_HISTORY
const bool kCollectHistory = false;
#else
const bool kCollectHistory = true;
#endif
-#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS
-const bool kCollectStats = true;
-#else
-const bool kCollectStats = false;
-#endif
-
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// hard to debug crashes.
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
void build_consistency_debug();
#else
void build_consistency_release();
@@ -100,18 +93,8 @@
void build_consistency_nostats();
#endif
-#if TSAN_SHADOW_COUNT == 1
-void build_consistency_shadow1();
-#elif TSAN_SHADOW_COUNT == 2
-void build_consistency_shadow2();
-#elif TSAN_SHADOW_COUNT == 4
-void build_consistency_shadow4();
-#else
-void build_consistency_shadow8();
-#endif
-
static inline void USED build_consistency() {
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
build_consistency_debug();
#else
build_consistency_release();
@@ -121,15 +104,6 @@
#else
build_consistency_nostats();
#endif
-#if TSAN_SHADOW_COUNT == 1
- build_consistency_shadow1();
-#elif TSAN_SHADOW_COUNT == 2
- build_consistency_shadow2();
-#elif TSAN_SHADOW_COUNT == 4
- build_consistency_shadow4();
-#else
- build_consistency_shadow8();
-#endif
}
template<typename T>
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 5dc331f..1e81ef3 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "tsan_flags.h"
#include "tsan_rtl.h"
@@ -33,80 +34,46 @@
}
#endif
-static void ParseFlags(Flags *f, const char *env) {
- ParseFlag(env, &f->enable_annotations, "enable_annotations", "");
- ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", "");
- ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", "");
- ParseFlag(env, &f->report_bugs, "report_bugs", "");
- ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", "");
- ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", "");
- ParseFlag(env, &f->report_mutex_bugs, "report_mutex_bugs", "");
- ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", "");
- ParseFlag(env, &f->report_atomic_races, "report_atomic_races", "");
- ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", "");
- ParseFlag(env, &f->print_benign, "print_benign", "");
- ParseFlag(env, &f->exitcode, "exitcode", "");
- ParseFlag(env, &f->halt_on_error, "halt_on_error", "");
- ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms", "");
- ParseFlag(env, &f->profile_memory, "profile_memory", "");
- ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms", "");
- ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms", "");
- ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb", "");
- ParseFlag(env, &f->stop_on_start, "stop_on_start", "");
- ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind", "");
- ParseFlag(env, &f->history_size, "history_size", "");
- ParseFlag(env, &f->io_sync, "io_sync", "");
- ParseFlag(env, &f->die_after_fork, "die_after_fork", "");
-
+void Flags::SetDefaults() {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
// DDFlags
- ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", "");
+ second_deadlock_stack = false;
+}
+
+void RegisterTsanFlags(FlagParser *parser, Flags *f) {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
}
void InitializeFlags(Flags *f, const char *env) {
- internal_memset(f, 0, sizeof(*f));
+ FlagParser parser;
+ RegisterTsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
- // Default values.
- f->enable_annotations = true;
- f->suppress_equal_stacks = true;
- f->suppress_equal_addresses = true;
- f->report_bugs = true;
- f->report_thread_leaks = true;
- f->report_destroy_locked = true;
- f->report_mutex_bugs = true;
- f->report_signal_unsafe = true;
- f->report_atomic_races = true;
- f->force_seq_cst_atomics = false;
- f->print_benign = false;
- f->exitcode = 66;
- f->halt_on_error = false;
- f->atexit_sleep_ms = 1000;
- f->profile_memory = "";
- f->flush_memory_ms = 0;
- f->flush_symbolizer_ms = 5000;
- f->memory_limit_mb = 0;
- f->stop_on_start = false;
- f->running_on_valgrind = false;
- f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
- f->io_sync = 1;
- f->die_after_fork = true;
+ f->SetDefaults();
- // DDFlags
- f->second_deadlock_stack = false;
-
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- // Override some common flags defaults.
- cf->allow_addr2line = true;
- cf->detect_deadlocks = true;
- cf->print_suppressions = false;
- cf->stack_trace_format = " #%n %f %S %M";
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.allow_addr2line = true;
+#ifndef SANITIZER_GO
+ cf.detect_deadlocks = true;
+#endif
+ cf.print_suppressions = false;
+ cf.stack_trace_format = " #%n %f %S %M";
+ OverrideCommonFlags(cf);
+ }
// Let a frontend override.
- ParseFlags(f, __tsan_default_options());
- ParseCommonFlagsFromString(cf, __tsan_default_options());
+ parser.ParseString(__tsan_default_options());
// Override from command line.
- ParseFlags(f, env);
- ParseCommonFlagsFromString(cf, env);
+ parser.ParseString(env);
// Sanity check.
if (!f->report_bugs) {
@@ -115,7 +82,11 @@
f->report_signal_unsafe = false;
}
- if (cf->help) PrintFlagDescriptions();
+ SetVerbosity(common_flags()->verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
diff --git a/lib/tsan/rtl/tsan_flags.h b/lib/tsan/rtl/tsan_flags.h
index 621ca13..e2f6b3c 100644
--- a/lib/tsan/rtl/tsan_flags.h
+++ b/lib/tsan/rtl/tsan_flags.h
@@ -20,65 +20,12 @@
namespace __tsan {
struct Flags : DDFlags {
- // Enable dynamic annotations, otherwise they are no-ops.
- bool enable_annotations;
- // Suppress a race report if we've already output another race report
- // with the same stack.
- bool suppress_equal_stacks;
- // Suppress a race report if we've already output another race report
- // on the same address.
- bool suppress_equal_addresses;
- // Turns off bug reporting entirely (useful for benchmarking).
- bool report_bugs;
- // Report thread leaks at exit?
- bool report_thread_leaks;
- // Report destruction of a locked mutex?
- bool report_destroy_locked;
- // Report incorrect usages of mutexes and mutex annotations?
- bool report_mutex_bugs;
- // Report violations of async signal-safety
- // (e.g. malloc() call from a signal handler).
- bool report_signal_unsafe;
- // Report races between atomic and plain memory accesses.
- bool report_atomic_races;
- // If set, all atomics are effectively sequentially consistent (seq_cst),
- // regardless of what user actually specified.
- bool force_seq_cst_atomics;
- // Print matched "benign" races at exit.
- bool print_benign;
- // Override exit status if something was reported.
- int exitcode;
- // Exit after first reported error.
- bool halt_on_error;
- // Sleep in main thread before exiting for that many ms
- // (useful to catch "at exit" races).
- int atexit_sleep_ms;
- // If set, periodically write memory profile to that file.
- const char *profile_memory;
- // Flush shadow memory every X ms.
- int flush_memory_ms;
- // Flush symbolizer caches every X ms.
- int flush_symbolizer_ms;
- // Resident memory limit in MB to aim at.
- // If the process consumes more memory, then TSan will flush shadow memory.
- int memory_limit_mb;
- // Stops on start until __tsan_resume() is called (for debugging).
- bool stop_on_start;
- // Controls whether RunningOnValgrind() returns true or false.
- bool running_on_valgrind;
- // Per-thread history size, controls how many previous memory accesses
- // are remembered per thread. Possible values are [0..7].
- // history_size=0 amounts to 32K memory accesses. Each next value doubles
- // the amount of memory accesses, up to history_size=7 that amounts to
- // 4M memory accesses. The default value is 2 (128K memory accesses).
- int history_size;
- // Controls level of synchronization implied by IO operations.
- // 0 - no synchronization
- // 1 - reasonable level of synchronization (write->read)
- // 2 - global synchronization of all IO operations
- int io_sync;
- // Die after multi-threaded fork if the child creates new threads.
- bool die_after_fork;
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+
+ void SetDefaults();
+ void ParseFromString(const char *str);
};
Flags *flags();
diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc
new file mode 100644
index 0000000..e499468
--- /dev/null
+++ b/lib/tsan/rtl/tsan_flags.inc
@@ -0,0 +1,79 @@
+//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// TSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FLAG
+# error "Define TSAN_FLAG prior to including this file!"
+#endif
+
+// TSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+TSAN_FLAG(bool, enable_annotations, true,
+ "Enable dynamic annotations, otherwise they are no-ops.")
+// Suppress a race report if we've already output another race report
+// with the same stack.
+TSAN_FLAG(bool, suppress_equal_stacks, true,
+ "Suppress a race report if we've already output another race report "
+ "with the same stack.")
+TSAN_FLAG(bool, suppress_equal_addresses, true,
+ "Suppress a race report if we've already output another race report "
+ "on the same address.")
+
+TSAN_FLAG(bool, report_bugs, true,
+ "Turns off bug reporting entirely (useful for benchmarking).")
+TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
+TSAN_FLAG(bool, report_destroy_locked, true,
+ "Report destruction of a locked mutex?")
+TSAN_FLAG(bool, report_mutex_bugs, true,
+ "Report incorrect usages of mutexes and mutex annotations?")
+TSAN_FLAG(bool, report_signal_unsafe, true,
+ "Report violations of async signal-safety "
+ "(e.g. malloc() call from a signal handler).")
+TSAN_FLAG(bool, report_atomic_races, true,
+ "Report races between atomic and plain memory accesses.")
+TSAN_FLAG(
+ bool, force_seq_cst_atomics, false,
+ "If set, all atomics are effectively sequentially consistent (seq_cst), "
+ "regardless of what user actually specified.")
+TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
+TSAN_FLAG(int, exitcode, 66, "Override exit status if something was reported.")
+TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
+TSAN_FLAG(int, atexit_sleep_ms, 1000,
+ "Sleep in main thread before exiting for that many ms "
+ "(useful to catch \"at exit\" races).")
+TSAN_FLAG(const char *, profile_memory, "",
+ "If set, periodically write memory profile to that file.")
+TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.")
+TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.")
+TSAN_FLAG(
+ int, memory_limit_mb, 0,
+ "Resident memory limit in MB to aim at."
+ "If the process consumes more memory, then TSan will flush shadow memory.")
+TSAN_FLAG(bool, stop_on_start, false,
+ "Stops on start until __tsan_resume() is called (for debugging).")
+TSAN_FLAG(bool, running_on_valgrind, false,
+ "Controls whether RunningOnValgrind() returns true or false.")
+TSAN_FLAG(
+ int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go.
+ "Per-thread history size, controls how many previous memory accesses "
+ "are remembered per thread. Possible values are [0..7]. "
+ "history_size=0 amounts to 32K memory accesses. Each next value doubles "
+ "the amount of memory accesses, up to history_size=7 that amounts to "
+ "4M memory accesses. The default value is 2 (128K memory accesses).")
+TSAN_FLAG(int, io_sync, 1,
+ "Controls level of synchronization implied by IO operations. "
+ "0 - no synchronization "
+ "1 - reasonable level of synchronization (write->read)"
+ "2 - global synchronization of all IO operations.")
+TSAN_FLAG(bool, die_after_fork, true,
+ "Die after multi-threaded fork if the child creates new threads.")
+TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 7889942..31ff7d5 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -39,17 +39,27 @@
#define stderr __stderrp
#endif
+#ifdef __mips__
+const int kSigCount = 129;
+#else
const int kSigCount = 65;
+#endif
struct my_siginfo_t {
// The size is determined by looking at sizeof of real siginfo_t on linux.
u64 opaque[128 / sizeof(u64)];
};
+#ifdef __mips__
+struct ucontext_t {
+ u64 opaque[768 / sizeof(u64) + 1];
+};
+#else
struct ucontext_t {
// The size is determined by looking at sizeof of real ucontext_t on linux.
u64 opaque[936 / sizeof(u64) + 1];
};
+#endif
extern "C" int pthread_attr_init(void *attr);
extern "C" int pthread_attr_destroy(void *attr);
@@ -72,6 +82,7 @@
extern "C" void *__libc_calloc(uptr size, uptr n);
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
+extern "C" int dirfd(void *dirp);
#if !SANITIZER_FREEBSD
extern "C" int mallopt(int param, int value);
#endif
@@ -88,8 +99,13 @@
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
+#ifdef __mips__
+const int SIGBUS = 10;
+const int SIGSYS = 12;
+#else
const int SIGBUS = 7;
const int SIGSYS = 31;
+#endif
void *const MAP_FAILED = (void*)-1;
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
const int MAP_FIXED = 0x10;
@@ -101,21 +117,27 @@
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-typedef void (*sighandler_t)(int sig);
-
#define errno (*__errno_location())
+typedef void (*sighandler_t)(int sig);
+typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
+
struct sigaction_t {
+#ifdef __mips__
+ u32 sa_flags;
+#endif
union {
sighandler_t sa_handler;
- void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx);
+ sigactionhandler_t sa_sigaction;
};
#if SANITIZER_FREEBSD
int sa_flags;
__sanitizer_sigset_t sa_mask;
#else
__sanitizer_sigset_t sa_mask;
+#ifndef __mips__
int sa_flags;
+#endif
void (*sa_restorer)();
#endif
};
@@ -123,8 +145,13 @@
const sighandler_t SIG_DFL = (sighandler_t)0;
const sighandler_t SIG_IGN = (sighandler_t)1;
const sighandler_t SIG_ERR = (sighandler_t)-1;
+#ifdef __mips__
+const int SA_SIGINFO = 8;
+const int SIG_SETMASK = 3;
+#else
const int SA_SIGINFO = 4;
const int SIG_SETMASK = 2;
+#endif
namespace std {
struct nothrow_t {};
@@ -155,7 +182,13 @@
}
void InitializeLibIgnore() {
- libignore()->Init(*SuppressionContext::Get());
+ const SuppressionContext &supp = *Suppressions();
+ const uptr n = supp.SuppressionCount();
+ for (uptr i = 0; i < n; i++) {
+ const Suppression *s = supp.SuppressionAt(i);
+ if (0 == internal_strcmp(s->type, kSuppressionLib))
+ libignore()->AddIgnoredLibrary(s->templ);
+ }
libignore()->OnLibraryLoaded(0);
}
@@ -505,14 +538,10 @@
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
if (cur_thread()->in_symbolizer)
return __libc_calloc(size, n);
- if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n))
- return AllocatorReturnNull();
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(calloc, size, n);
- p = user_alloc(thr, pc, n * size);
- if (p)
- internal_memset(p, 0, n * size);
+ p = user_calloc(thr, pc, size, n);
}
invoke_malloc_hook(p, n * size);
return p;
@@ -952,6 +981,8 @@
return res;
}
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
int tid = ThreadTid(thr, pc, (uptr)th);
@@ -1800,9 +1831,16 @@
return REAL(fwrite)(p, size, nmemb, f);
}
+static void FlushStreams() {
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
+}
+
TSAN_INTERCEPTOR(void, abort, int fake) {
SCOPED_TSAN_INTERCEPTOR(abort, fake);
- REAL(fflush)(0);
+ FlushStreams();
REAL(abort)(fake);
}
@@ -1819,12 +1857,11 @@
return res;
}
-TSAN_INTERCEPTOR(void*, opendir, char *path) {
- SCOPED_TSAN_INTERCEPTOR(opendir, path);
- void *res = REAL(opendir)(path);
- if (res != 0)
- Acquire(thr, pc, Dir2addr(path));
- return res;
+TSAN_INTERCEPTOR(int, closedir, void *dirp) {
+ SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ int fd = dirfd(dirp);
+ FdClose(thr, pc, fd);
+ return REAL(closedir)(dirp);
}
#if !SANITIZER_FREEBSD
@@ -1868,15 +1905,18 @@
// Ensure that the handler does not spoil errno.
const int saved_errno = errno;
errno = 99;
- // Need to remember pc before the call, because the handler can reset it.
- uptr pc = sigact ?
+ // This code races with sigaction. Be careful to not read sa_sigaction twice.
+ // Also need to remember pc for reporting before the call,
+ // because the handler can reset it.
+ volatile uptr pc = sigact ?
(uptr)sigactions[sig].sa_sigaction :
(uptr)sigactions[sig].sa_handler;
- pc += 1; // return address is expected, OutputReport() will undo this
- if (sigact)
- sigactions[sig].sa_sigaction(sig, info, uctx);
- else
- sigactions[sig].sa_handler(sig);
+ if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) {
+ if (sigact)
+ ((sigactionhandler_t)pc)(sig, info, uctx);
+ else
+ ((sighandler_t)pc)(sig);
+ }
// We do not detect errno spoiling for SIGTERM,
// because some SIGTERM handlers do spoil errno but reraise SIGTERM,
// tsan reports false positive in such case.
@@ -1886,7 +1926,9 @@
// signal; and it looks too fragile to intercept all ways to reraise a signal.
if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
VarSizeStackTrace stack;
- ObtainCurrentStack(thr, pc, &stack);
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeErrnoInSignal);
if (!IsFiredSuppression(ctx, rep, stack)) {
@@ -1912,11 +1954,8 @@
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed) {
signal->armed = false;
- if (sigactions[sig].sa_handler != SIG_DFL
- && sigactions[sig].sa_handler != SIG_IGN) {
- CallUserSignalHandler(thr, false, true, signal->sigaction,
- sig, &signal->siginfo, &signal->ctx);
- }
+ CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
+ &signal->siginfo, &signal->ctx);
}
}
pthread_sigmask(SIG_SETMASK, &oldset, 0);
@@ -1998,7 +2037,19 @@
internal_memcpy(old, &sigactions[sig], sizeof(*old));
if (act == 0)
return 0;
- internal_memcpy(&sigactions[sig], act, sizeof(*act));
+ // Copy act into sigactions[sig].
+ // Can't use struct copy, because compiler can emit call to memcpy.
+ // Can't use internal_memcpy, because it copies byte-by-byte,
+ // and signal handler reads the sa_handler concurrently. It it can read
+ // some bytes from old value and some bytes from new value.
+ // Use volatile to prevent insertion of memcpy.
+ sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler;
+ sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
+ internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
+ sizeof(sigactions[sig].sa_mask));
+#if !SANITIZER_FREEBSD
+ sigactions[sig].sa_restorer = act->sa_restorer;
+#endif
sigaction_t newact;
internal_memcpy(&newact, act, sizeof(newact));
REAL(sigfillset)(&newact.sa_mask);
@@ -2131,7 +2182,7 @@
static int OnExit(ThreadState *thr) {
int status = Finalize(thr);
- REAL(fflush)(0);
+ FlushStreams();
return status;
}
@@ -2164,6 +2215,16 @@
#undef SANITIZER_INTERCEPT_FGETPWENT
#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+// __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// There are two potential issues:
+// 1. Sanitizer code contains a MOVDQA spill (it does not seem to be the case
+// right now). or 2. ProcessPendingSignal calls user handler which contains
+// MOVDQA spill (this happens right now).
+// Since the interceptor only initializes memory for msan, the simplest solution
+// is to disable the interceptor in tsan (other sanitizers do not call
+// signal handlers from COMMON_INTERCEPTOR_ENTER).
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
@@ -2202,12 +2263,15 @@
if (fd >= 0) FdClose(thr, pc, fd); \
}
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) \
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
libignore()->OnLibraryLoaded(filename)
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
libignore()->OnLibraryUnloaded()
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
+
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
@@ -2367,10 +2431,7 @@
ThreadState *thr = cur_thread();
int status = Finalize(thr);
// Make sure the output is not lost.
- // Flushing all the streams here may freeze the process if a child thread is
- // performing file stream operations at the same time.
- REAL(fflush)(stdout);
- REAL(fflush)(stderr);
+ FlushStreams();
if (status)
REAL(_exit)(status);
}
@@ -2526,7 +2587,7 @@
TSAN_INTERCEPT(abort);
TSAN_INTERCEPT(puts);
TSAN_INTERCEPT(rmdir);
- TSAN_INTERCEPT(opendir);
+ TSAN_INTERCEPT(closedir);
TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
@@ -2565,19 +2626,4 @@
FdInit();
}
-void *internal_start_thread(void(*func)(void *arg), void *arg) {
- // Start the thread with signals blocked, otherwise it can steal user signals.
- __sanitizer_sigset_t set, old;
- internal_sigfillset(&set);
- internal_sigprocmask(SIG_SETMASK, &set, &old);
- void *th;
- REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg);
- internal_sigprocmask(SIG_SETMASK, &old, 0);
- return th;
-}
-
-void internal_join_thread(void *th) {
- REAL(pthread_join)(th, 0);
-}
-
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_interface.cc b/lib/tsan/rtl/tsan_interface.cc
index 9de3808..9bc9a69 100644
--- a/lib/tsan/rtl/tsan_interface.cc
+++ b/lib/tsan/rtl/tsan_interface.cc
@@ -38,57 +38,80 @@
MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
-u16 __tsan_unaligned_read2(const uu16 *addr) {
+// __tsan_unaligned_read/write calls are emitted by compiler.
+
+void __tsan_unaligned_read2(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
- return *addr;
}
-u32 __tsan_unaligned_read4(const uu32 *addr) {
+void __tsan_unaligned_read4(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
- return *addr;
}
-u64 __tsan_unaligned_read8(const uu64 *addr) {
+void __tsan_unaligned_read8(const void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
- return *addr;
}
-void __tsan_unaligned_write2(uu16 *addr, u16 v) {
+void __tsan_unaligned_read16(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
+}
+
+void __tsan_unaligned_write2(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
- *addr = v;
}
-void __tsan_unaligned_write4(uu32 *addr, u32 v) {
+void __tsan_unaligned_write4(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
- *addr = v;
}
-void __tsan_unaligned_write8(uu64 *addr, u64 v) {
+void __tsan_unaligned_write8(void *addr) {
UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
- *addr = v;
}
+void __tsan_unaligned_write16(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+}
+
+// __sanitizer_unaligned_load/store are for user instrumentation.
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-uint16_t __sanitizer_unaligned_load16(void *addr)
- ALIAS("__tsan_unaligned_read2");
-SANITIZER_INTERFACE_ATTRIBUTE
-uint32_t __sanitizer_unaligned_load32(void *addr)
- ALIAS("__tsan_unaligned_read4");
-SANITIZER_INTERFACE_ATTRIBUTE
-uint64_t __sanitizer_unaligned_load64(void *addr)
- ALIAS("__tsan_unaligned_read8");
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store16(void *addr, uint16_t v)
- ALIAS("__tsan_unaligned_write2");
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store32(void *addr, uint32_t v)
- ALIAS("__tsan_unaligned_write4");
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store64(void *addr, uint64_t v)
- ALIAS("__tsan_unaligned_write8");
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ __tsan_unaligned_write2(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ __tsan_unaligned_write4(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ __tsan_unaligned_write8(addr);
+ *addr = v;
+}
+} // extern "C"
+
void __tsan_acquire(void *addr) {
Acquire(cur_thread(), CALLERPC, (uptr)addr);
}
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index 7045069..a05e6f0 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -41,12 +41,15 @@
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
-SANITIZER_INTERFACE_ATTRIBUTE u16 __tsan_unaligned_read2(const uu16 *addr);
-SANITIZER_INTERFACE_ATTRIBUTE u32 __tsan_unaligned_read4(const uu32 *addr);
-SANITIZER_INTERFACE_ATTRIBUTE u64 __tsan_unaligned_read8(const uu64 *addr);
-SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(uu16 *addr, u16 v);
-SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(uu32 *addr, u32 v);
-SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(uu64 *addr, u64 v);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index ceb32bd..2703199 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -32,15 +32,15 @@
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
-#if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302))
+#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
#else
# define __TSAN_HAS_INT128 0
#endif
-#ifndef TSAN_GO
+#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
@@ -125,7 +125,8 @@
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO)
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
+ && __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
a128 cmp = *v;
@@ -198,7 +199,7 @@
// this leads to false negatives only in very obscure cases.
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
}
@@ -234,7 +235,7 @@
return atomic_load(to_atomic(a), to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
@@ -264,7 +265,7 @@
atomic_store(to_atomic(a), v, to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
@@ -456,7 +457,7 @@
return c;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
@@ -468,7 +469,7 @@
#endif
// Interface functions follow.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// C/C++
@@ -867,7 +868,7 @@
}
} // extern "C"
-#else // #ifndef TSAN_GO
+#else // #ifndef SANITIZER_GO
// Go
@@ -950,4 +951,4 @@
*(bool*)(a+24) = (cur == cmp);
}
} // extern "C"
-#endif // #ifndef TSAN_GO
+#endif // #ifndef SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 8615349..0aea63d 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -219,3 +219,33 @@
return MutexUnlock(thr, pc, addr, true);
}
+
+void __tsan_java_acquire(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Acquire(thr, caller_pc, addr);
+}
+
+void __tsan_java_release(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Release(thr, caller_pc, addr);
+}
+
+void __tsan_java_release_store(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ ReleaseStore(thr, caller_pc, addr);
+}
diff --git a/lib/tsan/rtl/tsan_interface_java.h b/lib/tsan/rtl/tsan_interface_java.h
index 1f793df..30153a1 100644
--- a/lib/tsan/rtl/tsan_interface_java.h
+++ b/lib/tsan/rtl/tsan_interface_java.h
@@ -79,6 +79,14 @@
// the same recursion level.
int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
+// Raw acquire/release primitives.
+// Can be used to establish happens-before edges on volatile/final fields,
+// in atomic operations, etc. release_store is the same as release, but it
+// breaks release sequence on addr (see C++ standard 1.10/7 for details).
+void __tsan_java_acquire(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release_store(jptr addr) INTERFACE_ATTRIBUTE;
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 285bdb3..ebb3f77 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -45,7 +45,7 @@
}
void InitializeAllocator() {
- allocator()->Init();
+ allocator()->Init(common_flags()->allocator_may_return_null);
}
void AllocatorThreadStart(ThreadState *thr) {
@@ -78,7 +78,7 @@
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return AllocatorReturnNull();
+ return allocator()->ReturnNullOrDie();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
@@ -89,6 +89,15 @@
return p;
}
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (CallocShouldReturnNullDueToOverflow(size, n))
+ return allocator()->ReturnNullOrDie();
+ void *p = user_alloc(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return p;
+}
+
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index 7d41fa8..5ff956d 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -27,6 +27,7 @@
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
uptr align = kDefaultAlignment, bool signal = true);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
// Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc
index 55d6e18..dc5a462 100644
--- a/lib/tsan/rtl/tsan_mutex.cc
+++ b/lib/tsan/rtl/tsan_mutex.cc
@@ -25,7 +25,7 @@
// then Report mutex can be locked while under Threads mutex.
// The leaf mutexes can be locked under any other mutexes.
// Recursive locking is not supported.
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
const MutexType MutexTypeLeaf = (MutexType)-1;
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
@@ -47,7 +47,7 @@
#endif
void InitializeMutex() {
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
// Build the "can lock" adjacency matrix.
// If [i][j]==true, then one can lock mutex j while under mutex i.
const int N = MutexTypeCount;
@@ -128,7 +128,7 @@
// Rely on zero initialization because some mutexes can be locked before ctor.
}
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
void InternalDeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
@@ -170,7 +170,7 @@
#endif
void CheckNoLocks(ThreadState *thr) {
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
thr->internal_deadlock_detector.CheckNoLocks();
#endif
}
@@ -208,7 +208,7 @@
Mutex::Mutex(MutexType type, StatType stat_type) {
CHECK_GT(type, MutexTypeInvalid);
CHECK_LT(type, MutexTypeCount);
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
type_ = type;
#endif
#if TSAN_COLLECT_STATS
@@ -222,7 +222,7 @@
}
void Mutex::Lock() {
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr cmp = kUnlocked;
@@ -234,7 +234,7 @@
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS && !TSAN_GO
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -247,13 +247,13 @@
uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
(void)prev;
DCHECK_NE(prev & kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
void Mutex::ReadLock() {
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
@@ -262,7 +262,7 @@
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS && !TSAN_GO
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -275,7 +275,7 @@
(void)prev;
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
+#if SANITIZER_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h
index 7bb1c48..88fad57 100644
--- a/lib/tsan/rtl/tsan_mutex.h
+++ b/lib/tsan/rtl/tsan_mutex.h
@@ -52,7 +52,7 @@
private:
atomic_uintptr_t state_;
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
MutexType type_;
#endif
#if TSAN_COLLECT_STATS
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index 541d181..68f0ec2 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -43,7 +43,7 @@
}
private:
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
@@ -55,7 +55,7 @@
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index 45f8631..135e160 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -24,10 +24,11 @@
namespace __tsan {
-#if !defined(TSAN_GO)
+#if !defined(SANITIZER_GO)
+#if defined(__x86_64__)
/*
-C/C++ on linux and freebsd
+C/C++ on linux/x86_64 and freebsd/x86_64
0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
0100 0000 0000 - 0200 0000 0000: -
0200 0000 0000 - 1000 0000 0000: shadow
@@ -40,7 +41,6 @@
7e00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
-
const uptr kMetaShadowBeg = 0x300000000000ull;
const uptr kMetaShadowEnd = 0x400000000000ull;
const uptr kTraceMemBeg = 0x600000000000ull;
@@ -55,6 +55,38 @@
const uptr kHiAppMemEnd = 0x800000000000ull;
const uptr kAppMemMsk = 0x7c0000000000ull;
const uptr kAppMemXor = 0x020000000000ull;
+const uptr kVdsoBeg = 0xf000000000000000ull;
+#elif defined(__mips64)
+/*
+C/C++ on linux/mips64
+0100 0000 00 - 0200 0000 00: main binary
+0200 0000 00 - 1400 0000 00: -
+1400 0000 00 - 2400 0000 00: shadow
+2400 0000 00 - 3000 0000 00: -
+3000 0000 00 - 4000 0000 00: metainfo (memory blocks and sync objects)
+4000 0000 00 - 6000 0000 00: -
+6000 0000 00 - 6200 0000 00: traces
+6200 0000 00 - fe00 0000 00: -
+fe00 0000 00 - ff00 0000 00: heap
+ff00 0000 00 - ff80 0000 00: -
+ff80 0000 00 - ffff ffff ff: modules and main thread stack
+*/
+const uptr kMetaShadowBeg = 0x3000000000ull;
+const uptr kMetaShadowEnd = 0x4000000000ull;
+const uptr kTraceMemBeg = 0x6000000000ull;
+const uptr kTraceMemEnd = 0x6200000000ull;
+const uptr kShadowBeg = 0x1400000000ull;
+const uptr kShadowEnd = 0x2400000000ull;
+const uptr kHeapMemBeg = 0xfe00000000ull;
+const uptr kHeapMemEnd = 0xff00000000ull;
+const uptr kLoAppMemBeg = 0x0100000000ull;
+const uptr kLoAppMemEnd = 0x0200000000ull;
+const uptr kHiAppMemBeg = 0xff80000000ull;
+const uptr kHiAppMemEnd = 0xffffffffffull;
+const uptr kAppMemMsk = 0xfc00000000ull;
+const uptr kAppMemXor = 0x0400000000ull;
+const uptr kVdsoBeg = 0xfffff00000ull;
+#endif
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
@@ -102,7 +134,7 @@
kHeapMemBeg, kHeapMemEnd,
};
-#elif defined(TSAN_GO) && !SANITIZER_WINDOWS
+#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
0000 0000 1000 - 0000 1000 0000: executable
@@ -164,15 +196,15 @@
kAppMemBeg, kAppMemEnd,
};
-#elif defined(TSAN_GO) && SANITIZER_WINDOWS
+#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0380 0000 0000: shadow
-0380 0000 0000 - 0560 0000 0000: -
+0100 0000 0000 - 0500 0000 0000: shadow
+0500 0000 0000 - 0560 0000 0000: -
0560 0000 0000 - 0760 0000 0000: traces
0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
@@ -183,7 +215,7 @@
const uptr kTraceMemBeg = 0x056000000000ull;
const uptr kTraceMemEnd = 0x076000000000ull;
const uptr kShadowBeg = 0x010000000000ull;
-const uptr kShadowEnd = 0x038000000000ull;
+const uptr kShadowEnd = 0x050000000000ull;
const uptr kAppMemBeg = 0x000000001000ull;
const uptr kAppMemEnd = 0x00e000000000ull;
@@ -205,21 +237,21 @@
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
DCHECK(IsAppMem(x));
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) + kShadowBeg;
}
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
DCHECK(IsAppMem(x));
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd);
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
}
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
CHECK(IsShadowMem(s));
// FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
- return (x & ~kShadowBeg) / kShadowCnt;
+ return (s - kShadowBeg) / kShadowCnt;
}
static USED uptr UserRegions[] = {
@@ -251,10 +283,6 @@
void InitializePlatform();
void FlushShadowMemory();
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
-uptr GetRSS();
-
-void *internal_start_thread(void(*func)(void*), void *arg);
-void internal_join_thread(void *th);
// Says whether the addr relates to a global var.
// Guesses with high probability, may yield both false positives and negatives.
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index 46b648c..659e8d8 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -66,8 +66,6 @@
static uptr g_data_start;
static uptr g_data_end;
-const uptr kPageSize = 4096;
-
enum {
MemTotal = 0,
MemShadow = 1,
@@ -87,7 +85,7 @@
mem[MemShadow] += rss;
else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
mem[MemMeta] += rss;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
else if (p >= kHeapMemBeg && p < kHeapMemEnd)
mem[MemHeap] += rss;
else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
@@ -118,33 +116,6 @@
nlive, nthread);
}
-uptr GetRSS() {
- uptr fd = OpenFile("/proc/self/statm", false);
- if ((sptr)fd < 0)
- return 0;
- char buf[64];
- uptr len = internal_read(fd, buf, sizeof(buf) - 1);
- internal_close(fd);
- if ((sptr)len <= 0)
- return 0;
- buf[len] = 0;
- // The format of the file is:
- // 1084 89 69 11 0 79 0
- // We need the second number which is RSS in 4K units.
- char *pos = buf;
- // Skip the first number.
- while (*pos >= '0' && *pos <= '9')
- pos++;
- // Skip whitespaces.
- while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
- pos++;
- // Read the number.
- uptr rss = 0;
- while (*pos >= '0' && *pos <= '9')
- rss = rss * 10 + *pos++ - '0';
- return rss * 4096;
-}
-
#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
@@ -159,7 +130,7 @@
#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void ProtectRange(uptr beg, uptr end) {
CHECK_LE(beg, end);
if (beg == end)
@@ -200,7 +171,7 @@
*p = kShadowRodata;
internal_write(fd, marker.data(), marker.size());
// Map the file into memory.
- uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE,
+ uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
if (internal_iserror(page)) {
internal_close(fd);
@@ -242,10 +213,18 @@
// Frequently a thread uses only a small part of stack and similarly
// a program uses a small part of large mmap. On some programs
// we see 20% memory usage reduction without huge pages for this range.
-#ifdef MADV_NOHUGEPAGE
- madvise((void*)MemToShadow(0x7f0000000000ULL),
- 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
+ // FIXME: don't use constants here.
+#if defined(__x86_64__)
+ const uptr kMadviseRangeBeg = 0x7f0000000000ull;
+ const uptr kMadviseRangeSize = 0x010000000000ull;
+#elif defined(__mips64)
+ const uptr kMadviseRangeBeg = 0xff00000000ull;
+ const uptr kMadviseRangeSize = 0x0100000000ull;
#endif
+ NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
+ kMadviseRangeSize * kShadowMultiplier);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
kShadowBeg, kShadowEnd,
(kShadowEnd - kShadowBeg) >> 30);
@@ -259,6 +238,8 @@
"to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
Die();
}
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(meta, meta_size);
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
meta, meta + meta_size, meta_size >> 30);
@@ -313,9 +294,9 @@
if (IsAppMem(p))
continue;
if (p >= kHeapMemEnd &&
- p < kHeapMemEnd + PrimaryAllocator::AdditionalSize())
+ p < HeapEnd())
continue;
- if (p >= 0xf000000000000000ull) // vdso
+ if (p >= kVdsoBeg) // vdso
break;
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
Die();
@@ -324,10 +305,13 @@
ProtectRange(kLoAppMemEnd, kShadowBeg);
ProtectRange(kShadowEnd, kMetaShadowBeg);
ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ // Memory for traces is mapped lazily in MapThreadTrace.
+ // Protect the whole range for now, so that user does not map something here.
+ ProtectRange(kTraceMemBeg, kTraceMemEnd);
ProtectRange(kTraceMemEnd, kHeapMemBeg);
- ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
+ ProtectRange(HeapEnd(), kHiAppMemBeg);
}
-#endif // #ifndef TSAN_GO
+#endif // #ifndef SANITIZER_GO
void InitializePlatform() {
DisableCoreDumperIfNecessary();
@@ -361,7 +345,7 @@
ReExec();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
CheckAndProtect();
InitTlsSize();
InitDataSeg();
@@ -372,7 +356,7 @@
return g_data_start && addr >= g_data_start && addr < g_data_end;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index fd71eb3..63f1748 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -50,11 +50,7 @@
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-uptr GetRSS() {
- return 0;
-}
-
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void InitializeShadowMemory() {
uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
kShadowEnd - kShadowBeg);
@@ -64,6 +60,8 @@
"to link with -pie.\n");
Die();
}
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
DPrintf("kShadow %zx-%zx (%zuGB)\n",
kShadowBeg, kShadowEnd,
(kShadowEnd - kShadowBeg) >> 30);
@@ -77,7 +75,7 @@
DisableCoreDumperIfNecessary();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
void(*cleanup)(void *arg), void *arg) {
diff --git a/lib/tsan/rtl/tsan_platform_windows.cc b/lib/tsan/rtl/tsan_platform_windows.cc
index ae9f050..cfbe77d 100644
--- a/lib/tsan/rtl/tsan_platform_windows.cc
+++ b/lib/tsan/rtl/tsan_platform_windows.cc
@@ -31,10 +31,6 @@
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-uptr GetRSS() {
- return 0;
-}
-
void InitializePlatform() {
}
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index f4a1ddb..7e69cb4 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -19,13 +19,11 @@
namespace __tsan {
-ReportStack::ReportStack() : next(nullptr), info(), suppressable(false) {}
+ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
-ReportStack *ReportStack::New(uptr addr) {
+ReportStack *ReportStack::New() {
void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
- ReportStack *res = new(mem) ReportStack();
- res->info.address = addr;
- return res;
+ return new(mem) ReportStack();
}
ReportLocation::ReportLocation(ReportLocationType type)
@@ -73,7 +71,7 @@
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -114,13 +112,14 @@
}
void PrintStack(const ReportStack *ent) {
- if (ent == 0) {
+ if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n\n");
return;
}
- for (int i = 0; ent && ent->info.address; ent = ent->next, i++) {
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
InternalScopedString res(2 * GetPageSizeCached());
- RenderFrame(&res, common_flags()->stack_trace_format, i, ent->info,
+ RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
common_flags()->strip_path_prefix, "__interceptor_");
Printf("%s\n", res.data());
}
@@ -252,10 +251,20 @@
return 0;
}
-ReportStack *SkipTsanInternalFrames(ReportStack *ent) {
- while (FrameIsInternal(ent) && ent->next)
- ent = ent->next;
- return ent;
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ return file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_"));
+}
+
+static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ while (FrameIsInternal(frames) && frames->next)
+ frames = frames->next;
+ return frames;
}
void PrintReport(const ReportDesc *rep) {
@@ -325,27 +334,31 @@
if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
- if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) {
- const AddressInfo &info = ent->info;
- ReportErrorSummary(rep_typ_str, info.file, info.line, info.function);
+ if (ReportStack *stack = ChooseSummaryStack(rep)) {
+ if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames)) {
+ const AddressInfo &info = frame->info;
+ ReportErrorSummary(rep_typ_str, info.file, info.line, info.function);
+ }
}
Printf("==================\n");
}
-#else // #ifndef TSAN_GO
+#else // #ifndef SANITIZER_GO
const int kMainThreadId = 1;
void PrintStack(const ReportStack *ent) {
- if (ent == 0) {
+ if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n");
return;
}
- for (int i = 0; ent; ent = ent->next, i++) {
- const AddressInfo &info = ent->info;
- Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line,
- (void *)info.module_offset);
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame; frame = frame->next, i++) {
+ const AddressInfo &info = frame->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function,
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, (void *)info.module_offset);
}
}
diff --git a/lib/tsan/rtl/tsan_report.h b/lib/tsan/rtl/tsan_report.h
index e6d8535..3e344a0 100644
--- a/lib/tsan/rtl/tsan_report.h
+++ b/lib/tsan/rtl/tsan_report.h
@@ -36,10 +36,9 @@
};
struct ReportStack {
- ReportStack *next;
- AddressInfo info;
+ SymbolizedStack *frames;
bool suppressable;
- static ReportStack *New(uptr addr);
+ static ReportStack *New();
private:
ReportStack();
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 79320cb..b76f3e0 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -43,7 +43,7 @@
namespace __tsan {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
@@ -67,13 +67,22 @@
static ThreadContextBase *CreateThreadContext(u32 tid) {
// Map thread trace when context is created.
MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
- MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace));
- new(ThreadTrace(tid)) Trace();
+ const uptr hdr = GetThreadTraceHeader(tid);
+ MapThreadTrace(hdr, sizeof(Trace));
+ new((void*)hdr) Trace();
+ // We are going to use only a small part of the trace with the default
+ // value of history_size. However, the constructor writes to the whole trace.
+ // Unmap the unused part.
+ uptr hdr_end = hdr + sizeof(Trace);
+ hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
+ hdr_end = RoundUp(hdr_end, GetPageSizeCached());
+ if (hdr_end < hdr + sizeof(Trace))
+ UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
return new(mem) ThreadContext(tid);
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -102,7 +111,7 @@
// , ignore_reads_and_writes()
// , ignore_interceptors()
, clock(tid, reuse_count)
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -111,12 +120,13 @@
, stk_size(stk_size)
, tls_addr(tls_addr)
, tls_size(tls_size)
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
, last_sleep_clock(tid)
#endif
{
}
+#ifndef SANITIZER_GO
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
@@ -127,13 +137,11 @@
}
static void BackgroundThread(void *arg) {
-#ifndef TSAN_GO
// This is a non-initialized non-user thread, nothing to see here.
// We don't use ScopedIgnoreInterceptors, because we want ignores to be
// enabled even when the thread function exits (e.g. during pthread thread
// shutdown code).
cur_thread()->ignore_interceptors++;
-#endif
const u64 kMs2Ns = 1000 * 1000;
fd_t mprof_fd = kInvalidFd;
@@ -143,9 +151,8 @@
} else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
mprof_fd = 2;
} else {
- InternalScopedBuffer<char> filename(4096);
- internal_snprintf(filename.data(), filename.size(), "%s.%d",
- flags()->profile_memory, (int)internal_getpid());
+ InternalScopedString filename(kMaxPathLength);
+ filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
uptr openrv = OpenFile(filename.data(), true);
if (internal_iserror(openrv)) {
Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
@@ -192,7 +199,6 @@
if (mprof_fd != kInvalidFd)
MemoryProfiler(ctx, mprof_fd, i);
-#ifndef TSAN_GO
// Flush symbolizer cache if requested.
if (flags()->flush_symbolizer_ms > 0) {
u64 last = atomic_load(&ctx->last_symbolize_time_ns,
@@ -204,7 +210,6 @@
atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
}
}
-#endif
}
}
@@ -212,13 +217,14 @@
ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
}
-#ifndef TSAN_GO
+#ifndef __mips__
static void StopBackgroundThread() {
atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
internal_join_thread(ctx->background_thread);
ctx->background_thread = 0;
}
#endif
+#endif
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
@@ -283,11 +289,11 @@
if (p < beg || p >= end)
continue;
const uptr s = MemToShadow(p);
- VPrintf(3, " checking pointer %p -> %p\n", p, s);
+ const uptr m = (uptr)MemToMeta(p);
+ VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
CHECK(IsAppMem(p));
CHECK(IsShadowMem(s));
CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
- const uptr m = (uptr)MemToMeta(p);
CHECK(IsMetaMem(m));
}
}
@@ -309,7 +315,7 @@
ctx = new(ctx_placeholder) Context;
const char *options = GetEnv(kTsanOptionsEnv);
InitializeFlags(&ctx->flags, options);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeAllocator();
#endif
InitializeInterceptors();
@@ -317,20 +323,23 @@
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeShadowMemory();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
-#endif
+ // On MIPS, TSan initialization is run before
+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn
+ // new threads.
+#ifndef __mips__
StartBackgroundThread();
-#ifndef TSAN_GO
SetSandboxingCallback(StopBackgroundThread);
#endif
+#endif
if (common_flags()->detect_deadlocks)
ctx->dd = DDetector::Create(flags());
@@ -365,16 +374,15 @@
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef TSAN_GO
- if (common_flags()->verbosity)
- AllocatorPrintStats();
+#ifndef SANITIZER_GO
+ if (Verbosity()) AllocatorPrintStats();
#endif
ThreadFinalize(thr);
if (ctx->nreported) {
failed = true;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -389,19 +397,22 @@
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
failed = OnFinalize(failed);
+#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
StatOutput(ctx->stat);
+#endif
+
return failed ? flags()->exitcode : 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
@@ -421,7 +432,7 @@
VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
" parent had %d threads\n", (int)internal_getpid(), (int)nthread);
if (nthread == 1) {
- internal_start_thread(&BackgroundThread, 0);
+ StartBackgroundThread();
} else {
// We've just forked a multi-threaded process. We cannot reasonably function
// after that (some mutexes may be locked before fork). So just enable
@@ -434,7 +445,7 @@
}
#endif
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
@@ -453,7 +464,7 @@
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
if (pc != 0) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -499,7 +510,7 @@
return TraceSize() / kTracePartSize;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -532,7 +543,7 @@
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
@@ -566,43 +577,26 @@
// it's just not worth it (performance- and complexity-wise).
Shadow old(0);
- if (kShadowCnt == 1) {
- int idx = 0;
+
+ // It release mode we manually unroll the loop,
+ // because empirically gcc generates better code this way.
+ // However, we can't afford unrolling in debug mode, because the function
+ // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
+ // threads, which is not enough for the unrolled loop.
+#if SANITIZER_DEBUG
+ for (int idx = 0; idx < 4; idx++) {
#include "tsan_update_shadow_word_inl.h"
- } else if (kShadowCnt == 2) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
-#include "tsan_update_shadow_word_inl.h"
- } else if (kShadowCnt == 4) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
-#include "tsan_update_shadow_word_inl.h"
- idx = 2;
-#include "tsan_update_shadow_word_inl.h"
- idx = 3;
-#include "tsan_update_shadow_word_inl.h"
- } else if (kShadowCnt == 8) {
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
-#include "tsan_update_shadow_word_inl.h"
- idx = 2;
-#include "tsan_update_shadow_word_inl.h"
- idx = 3;
-#include "tsan_update_shadow_word_inl.h"
- idx = 4;
-#include "tsan_update_shadow_word_inl.h"
- idx = 5;
-#include "tsan_update_shadow_word_inl.h"
- idx = 6;
-#include "tsan_update_shadow_word_inl.h"
- idx = 7;
-#include "tsan_update_shadow_word_inl.h"
- } else {
- CHECK(false);
}
+#else
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 2;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 3;
+#include "tsan_update_shadow_word_inl.h"
+#endif
// we did not find any races and had already stored
// the current access info, so we are done
@@ -653,7 +647,7 @@
return false;
}
-#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+#if defined(__SSE3__)
#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
_mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
(i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
@@ -713,11 +707,12 @@
ALWAYS_INLINE
bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
-#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+#if defined(__SSE3__)
bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
// NOTE: this check can fail if the shadow is concurrently mutated
- // by other threads.
- DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ // by other threads. But it still can be useful if you modify
+ // ContainsSameAccessFast and want to ensure that it's not completely broken.
+ // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
return res;
#else
return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
@@ -734,7 +729,7 @@
(int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
(uptr)shadow_mem[0], (uptr)shadow_mem[1],
(uptr)shadow_mem[2], (uptr)shadow_mem[3]);
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
if (!IsAppMem(addr)) {
Printf("Access to non app mem %zx\n", addr);
DCHECK(IsAppMem(addr));
@@ -844,7 +839,7 @@
}
} else {
// The region is big, reset only beginning and end.
- const uptr kPageSize = 4096;
+ const uptr kPageSize = GetPageSizeCached();
u64 *begin = (u64*)MemToShadow(addr);
u64 *end = begin + size / kShadowCell * kShadowCnt;
u64 *p = begin;
@@ -918,7 +913,7 @@
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -938,7 +933,7 @@
}
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -949,7 +944,7 @@
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -961,7 +956,7 @@
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
@@ -971,7 +966,7 @@
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -981,7 +976,7 @@
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#endif
@@ -991,7 +986,7 @@
return hash[0] == other.hash[0] && hash[1] == other.hash[1];
}
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
void build_consistency_debug() {}
#else
void build_consistency_release() {}
@@ -1003,19 +998,9 @@
void build_consistency_nostats() {}
#endif
-#if TSAN_SHADOW_COUNT == 1
-void build_consistency_shadow1() {}
-#elif TSAN_SHADOW_COUNT == 2
-void build_consistency_shadow2() {}
-#elif TSAN_SHADOW_COUNT == 4
-void build_consistency_shadow4() {}
-#else
-void build_consistency_shadow8() {}
-#endif
-
} // namespace __tsan
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 591d10d..7a60e5c 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -52,10 +52,23 @@
namespace __tsan {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
struct MapUnmapCallback;
+#ifdef __mips64
+static const uptr kAllocatorSpace = 0;
+static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
+static const uptr kAllocatorRegionSizeLog = 20;
+static const uptr kAllocatorNumRegions =
+ kAllocatorSize >> kAllocatorRegionSizeLog;
+typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
+ MapUnmapCallback> ByteMap;
+typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
+ CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
+ MapUnmapCallback> PrimaryAllocator;
+#else
typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
+#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@@ -332,7 +345,7 @@
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -345,13 +358,15 @@
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
+#if TSAN_COLLECT_STATS
u64 stat[StatCnt];
+#endif
const int tid;
const int unique_id;
bool in_symbolizer;
@@ -365,7 +380,9 @@
const uptr tls_size;
ThreadContext *tctx;
+#if SANITIZER_DEBUG && !SANITIZER_GO
InternalDeadlockDetector internal_deadlock_detector;
+#endif
DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt;
@@ -376,7 +393,7 @@
DenseSlabAllocCache sync_cache;
DenseSlabAllocCache clock_cache;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -391,7 +408,7 @@
uptr tls_addr, uptr tls_size);
};
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
@@ -481,13 +498,13 @@
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
cur_thread()->ignore_interceptors++;
#endif
}
~ScopedIgnoreInterceptors() {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
cur_thread()->ignore_interceptors--;
#endif
}
@@ -539,15 +556,20 @@
}
+#if TSAN_COLLECT_STATS
void StatAggregate(u64 *dst, u64 *src);
void StatOutput(u64 *stat);
+#endif
+
void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
- if (kCollectStats)
- thr->stat[typ] += n;
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] += n;
+#endif
}
void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
- if (kCollectStats)
- thr->stat[typ] = n;
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] = n;
+#endif
}
void MapShadow(uptr addr, uptr size);
@@ -568,8 +590,6 @@
StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
void PrintMatchedBenignRaces();
-bool FrameIsInternal(const ReportStack *frame);
-ReportStack *SkipTsanInternalFrames(ReportStack *ent);
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
@@ -687,7 +707,7 @@
// The trick is that the call preserves all registers and the compiler
// does not treat it as a call.
// If it does not work for you, use normal call.
-#if TSAN_DEBUG == 0
+#if !SANITIZER_DEBUG && defined(__x86_64__)
// The caller may not create the stack frame for itself at all,
// so we create a reserve stack frame for it (1024b must be enough).
#define HACKY_CALL(f) \
@@ -719,7 +739,7 @@
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
@@ -731,6 +751,16 @@
*evp = ev;
}
+#ifndef SANITIZER_GO
+uptr ALWAYS_INLINE HeapEnd() {
+#if SANITIZER_CAN_USE_ALLOCATOR64
+ return kHeapMemEnd + PrimaryAllocator::AdditionalSize();
+#else
+ return kHeapMemEnd;
+#endif
+}
+#endif
+
} // namespace __tsan
#endif // TSAN_RTL_H
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 0807869..ddf2b69 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -88,7 +88,7 @@
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Global mutexes not marked as LINKER_INITIALIZED
// cause tons of not interesting reports, so just ignore it.
if (IsGlobalVar(addr))
@@ -405,7 +405,7 @@
s->mtx.Unlock();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index c7a00c9..dc9438e 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -56,40 +56,43 @@
}
#endif
-static void StackStripMain(ReportStack *stack) {
- ReportStack *last_frame = 0;
- ReportStack *last_frame2 = 0;
- for (ReportStack *ent = stack; ent; ent = ent->next) {
+static void StackStripMain(SymbolizedStack *frames) {
+ SymbolizedStack *last_frame = nullptr;
+ SymbolizedStack *last_frame2 = nullptr;
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
last_frame2 = last_frame;
- last_frame = ent;
+ last_frame = cur;
}
if (last_frame2 == 0)
return;
+#ifndef SANITIZER_GO
const char *last = last_frame->info.function;
-#ifndef TSAN_GO
const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
if (last2 && 0 == internal_strcmp(last2, "main")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// Strip our internal thread start routine.
} else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// Strip global ctors init.
} else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
- last_frame2->next = 0;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
// If both are 0, then we probably just failed to symbolize.
} else if (last || last2) {
// Ensure that we recovered stack completely. Trimmed stack
// can actually happen if we do not instrument some code,
// so it's only a debug print. However we must try hard to not miss it
// due to our fault.
- DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc);
+ DPrintf("Bottom stack frame is missed\n");
}
#else
// The last frame always point into runtime (gosched0, goexit0, runtime.main).
- last_frame2->next = 0;
- (void)last;
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
#endif
}
@@ -105,31 +108,29 @@
static ReportStack *SymbolizeStack(StackTrace trace) {
if (trace.size == 0)
return 0;
- ReportStack *stack = 0;
+ SymbolizedStack *top = nullptr;
for (uptr si = 0; si < trace.size; si++) {
const uptr pc = trace.trace[si];
-#ifndef TSAN_GO
- // We obtain the return address, that is, address of the next instruction,
- // so offset it by 1 byte.
- const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
-#else
- // FIXME(dvyukov): Go sometimes uses address of a function as top pc.
uptr pc1 = pc;
- if (si != trace.size - 1)
- pc1 -= 1;
-#endif
- ReportStack *ent = SymbolizeCode(pc1);
+ // We obtain the return address, but we're interested in the previous
+ // instruction.
+ if ((pc & kExternalPCBit) == 0)
+ pc1 = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *ent = SymbolizeCode(pc1);
CHECK_NE(ent, 0);
- ReportStack *last = ent;
+ SymbolizedStack *last = ent;
while (last->next) {
last->info.address = pc; // restore original pc for report
last = last->next;
}
last->info.address = pc; // restore original pc for report
- last->next = stack;
- stack = ent;
+ last->next = top;
+ top = ent;
}
- StackStripMain(stack);
+ StackStripMain(top);
+
+ ReportStack *stack = ReportStack::New();
+ stack->frames = top;
return stack;
}
@@ -198,7 +199,7 @@
rt->stack->suppressable = suppressable;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static ThreadContext *FindThreadByUidLocked(int unique_id) {
ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
@@ -243,8 +244,9 @@
#endif
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
-#ifndef TSAN_GO
- AddThread(FindThreadByUidLocked(unique_tid), suppressable);
+#ifndef SANITIZER_GO
+ if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ AddThread(tctx, suppressable);
#endif
}
@@ -298,7 +300,7 @@
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
@@ -348,7 +350,7 @@
#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -390,7 +392,7 @@
InternalScopedBuffer<uptr> stack(kShadowStackSize);
for (uptr i = 0; i < hdr->stack0.size; i++) {
stack[i] = hdr->stack0.trace[i];
- DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
+ DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
}
if (mset)
*mset = hdr->mset0;
@@ -547,16 +549,6 @@
return false;
}
-bool FrameIsInternal(const ReportStack *frame) {
- if (frame == 0)
- return false;
- const char *file = frame->info.file;
- return file != 0 &&
- (internal_strstr(file, "tsan_interceptors.cc") ||
- internal_strstr(file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(file, "tsan_interface_"));
-}
-
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
Shadow s0(thr->racy_state[0]);
Shadow s1(thr->racy_state[1]);
@@ -648,7 +640,7 @@
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -669,7 +661,7 @@
}
void PrintCurrentStackSlow(uptr pc) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 256a95c..8ed1fbf 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -30,7 +30,7 @@
, epoch1() {
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
@@ -92,7 +92,7 @@
epoch1 = (u64)-1;
new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -104,19 +104,20 @@
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorThreadStart(thr);
#endif
if (common_flags()->detect_deadlocks) {
thr->dd_pt = ctx->dd->CreatePhysicalThread();
thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
}
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ // Commit switch to the new part of the trace.
+ // TraceAddEvent will reset stack0/mset0 in the new part for us.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
- thr->fast_state.SetHistorySize(flags()->history_size);
- const uptr trace = (epoch0 / kTracePartSize) % TraceParts();
- Trace *thr_trace = ThreadTrace(thr->tid);
- thr_trace->headers[trace].epoch0 = epoch0;
StatInc(thr, StatSyncAcquire);
sync.Reset(&thr->clock_cache);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
@@ -140,15 +141,17 @@
}
ctx->clock_alloc.FlushCache(&thr->clock_cache);
ctx->metamap.OnThreadIdle(thr);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorThreadFinish(thr);
#endif
thr->~ThreadState();
+#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
+#endif
thr = 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -170,7 +173,7 @@
}
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -202,7 +205,7 @@
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
@@ -238,6 +241,7 @@
uptr stk_size = 0;
uptr tls_addr = 0;
uptr tls_size = 0;
+#ifndef SANITIZER_GO
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
@@ -258,6 +262,7 @@
thr_end, tls_addr + tls_size - thr_end);
}
}
+#endif
ThreadRegistry *tr = ctx->thread_registry;
OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
@@ -267,7 +272,7 @@
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;
ThreadIgnoreBegin(thr, 0);
@@ -329,7 +334,7 @@
thr->tid, (void*)pc, (void*)addr,
(int)size, is_write);
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
if (!IsAppMem(addr)) {
Printf("Access to non app mem %zx\n", addr);
DCHECK(IsAppMem(addr));
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
index 350a2ba..15fa43d 100644
--- a/lib/tsan/rtl/tsan_stat.cc
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -15,17 +15,14 @@
namespace __tsan {
+#if TSAN_COLLECT_STATS
+
void StatAggregate(u64 *dst, u64 *src) {
- if (!kCollectStats)
- return;
for (int i = 0; i < StatCnt; i++)
dst[i] += src[i];
}
void StatOutput(u64 *stat) {
- if (!kCollectStats)
- return;
-
stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
static const char *name[StatCnt] = {};
@@ -176,4 +173,6 @@
Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
}
+#endif
+
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index 1c5bea0..5413f04 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -33,7 +33,7 @@
"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
// Can be overriden in frontend.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" const char *WEAK __tsan_default_suppressions() {
return 0;
}
@@ -41,62 +41,74 @@
namespace __tsan {
-static bool suppressions_inited = false;
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char *kSuppressionTypes[] = {
+ kSuppressionRace, kSuppressionMutex, kSuppressionThread,
+ kSuppressionSignal, kSuppressionLib, kSuppressionDeadlock};
void InitializeSuppressions() {
- CHECK(!suppressions_inited);
- SuppressionContext::InitIfNecessary();
-#ifndef TSAN_GO
- SuppressionContext::Get()->Parse(__tsan_default_suppressions());
- SuppressionContext::Get()->Parse(std_suppressions);
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+#ifndef SANITIZER_GO
+ suppression_ctx->Parse(__tsan_default_suppressions());
+ suppression_ctx->Parse(std_suppressions);
#endif
- suppressions_inited = true;
}
-SuppressionType conv(ReportType typ) {
+SuppressionContext *Suppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+static const char *conv(ReportType typ) {
if (typ == ReportTypeRace)
- return SuppressionRace;
+ return kSuppressionRace;
else if (typ == ReportTypeVptrRace)
- return SuppressionRace;
+ return kSuppressionRace;
else if (typ == ReportTypeUseAfterFree)
- return SuppressionRace;
+ return kSuppressionRace;
else if (typ == ReportTypeVptrUseAfterFree)
- return SuppressionRace;
+ return kSuppressionRace;
else if (typ == ReportTypeThreadLeak)
- return SuppressionThread;
+ return kSuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
- return SuppressionMutex;
+ return kSuppressionMutex;
else if (typ == ReportTypeMutexDoubleLock)
- return SuppressionMutex;
+ return kSuppressionMutex;
else if (typ == ReportTypeMutexBadUnlock)
- return SuppressionMutex;
+ return kSuppressionMutex;
else if (typ == ReportTypeMutexBadReadLock)
- return SuppressionMutex;
+ return kSuppressionMutex;
else if (typ == ReportTypeMutexBadReadUnlock)
- return SuppressionMutex;
+ return kSuppressionMutex;
else if (typ == ReportTypeSignalUnsafe)
- return SuppressionSignal;
+ return kSuppressionSignal;
else if (typ == ReportTypeErrnoInSignal)
- return SuppressionNone;
+ return kSuppressionNone;
else if (typ == ReportTypeDeadlock)
- return SuppressionDeadlock;
+ return kSuppressionDeadlock;
Printf("ThreadSanitizer: unknown report type %d\n", typ),
Die();
}
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
- if (!SuppressionContext::Get()->SuppressionCount() || stack == 0 ||
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || stack == 0 ||
!stack->suppressable)
return 0;
- SuppressionType stype = conv(typ);
- if (stype == SuppressionNone)
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
return 0;
Suppression *s;
- for (const ReportStack *frame = stack; frame; frame = frame->next) {
+ for (const SymbolizedStack *frame = stack->frames; frame;
+ frame = frame->next) {
const AddressInfo &info = frame->info;
- if (SuppressionContext::Get()->Match(info.function, stype, &s) ||
- SuppressionContext::Get()->Match(info.file, stype, &s) ||
- SuppressionContext::Get()->Match(info.module, stype, &s)) {
+ if (suppression_ctx->Match(info.function, stype, &s) ||
+ suppression_ctx->Match(info.file, stype, &s) ||
+ suppression_ctx->Match(info.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
@@ -107,16 +119,17 @@
}
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
- if (!SuppressionContext::Get()->SuppressionCount() || loc == 0 ||
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || loc == 0 ||
loc->type != ReportLocationGlobal || !loc->suppressable)
return 0;
- SuppressionType stype = conv(typ);
- if (stype == SuppressionNone)
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
return 0;
Suppression *s;
const DataInfo &global = loc->global;
- if (SuppressionContext::Get()->Match(global.name, stype, &s) ||
- SuppressionContext::Get()->Match(global.module, stype, &s)) {
+ if (suppression_ctx->Match(global.name, stype, &s) ||
+ suppression_ctx->Match(global.module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
@@ -127,7 +140,8 @@
void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
- SuppressionContext::Get()->GetMatched(&matched);
+ CHECK(suppression_ctx);
+ suppression_ctx->GetMatched(&matched);
if (!matched.size())
return;
int hit_count = 0;
@@ -136,8 +150,8 @@
Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
(int)internal_getpid());
for (uptr i = 0; i < matched.size(); i++) {
- Printf("%d %s:%s\n", matched[i]->hit_count,
- SuppressionTypeString(matched[i]->type), matched[i]->templ);
+ Printf("%d %s:%s\n", matched[i]->hit_count, matched[i]->type,
+ matched[i]->templ);
}
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_suppressions.h b/lib/tsan/rtl/tsan_suppressions.h
index c618b3d..e6d279c 100644
--- a/lib/tsan/rtl/tsan_suppressions.h
+++ b/lib/tsan/rtl/tsan_suppressions.h
@@ -18,7 +18,16 @@
namespace __tsan {
+const char kSuppressionNone[] = "none";
+const char kSuppressionRace[] = "race";
+const char kSuppressionMutex[] = "mutex";
+const char kSuppressionThread[] = "thread";
+const char kSuppressionSignal[] = "signal";
+const char kSuppressionLib[] = "called_from_lib";
+const char kSuppressionDeadlock[] = "deadlock";
+
void InitializeSuppressions();
+SuppressionContext *Suppressions();
void PrintMatchedSuppressions();
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
diff --git a/lib/tsan/rtl/tsan_symbolize.cc b/lib/tsan/rtl/tsan_symbolize.cc
index c08de6a..3beb44f 100644
--- a/lib/tsan/rtl/tsan_symbolize.cc
+++ b/lib/tsan/rtl/tsan_symbolize.cc
@@ -36,10 +36,6 @@
thr->ignore_interceptors--;
}
-// Denotes fake PC values that come from JIT/JAVA/etc.
-// For such PC values __tsan_symbolize_external() will be called.
-const uptr kExternalPCBit = 1ULL << 60;
-
// May be overriden by JIT/JAVA/etc,
// whatever produces PCs marked with kExternalPCBit.
extern "C" bool __tsan_symbolize_external(uptr pc,
@@ -55,7 +51,7 @@
return false;
}
-ReportStack *SymbolizeCode(uptr addr) {
+SymbolizedStack *SymbolizeCode(uptr addr) {
// Check if PC comes from non-native land.
if (addr & kExternalPCBit) {
// Declare static to not consume too much stack space.
@@ -63,36 +59,17 @@
static char func_buf[1024];
static char file_buf[1024];
int line, col;
- ReportStack *ent = ReportStack::New(addr);
- if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf),
- file_buf, sizeof(file_buf), &line, &col))
- return ent;
- ent->info.function = internal_strdup(func_buf);
- ent->info.file = internal_strdup(file_buf);
- ent->info.line = line;
- ent->info.column = col;
- return ent;
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
+ if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf,
+ sizeof(file_buf), &line, &col)) {
+ frame->info.function = internal_strdup(func_buf);
+ frame->info.file = internal_strdup(file_buf);
+ frame->info.line = line;
+ frame->info.column = col;
+ }
+ return frame;
}
- static const uptr kMaxAddrFrames = 16;
- InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
- for (uptr i = 0; i < kMaxAddrFrames; i++)
- new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
- addr, addr_frames.data(), kMaxAddrFrames);
- if (addr_frames_num == 0)
- return ReportStack::New(addr);
- ReportStack *top = 0;
- ReportStack *bottom = 0;
- for (uptr i = 0; i < addr_frames_num; i++) {
- ReportStack *cur_entry = ReportStack::New(addr);
- cur_entry->info = addr_frames[i];
- if (i == 0)
- top = cur_entry;
- else
- bottom->next = cur_entry;
- bottom = cur_entry;
- }
- return top;
+ return Symbolizer::GetOrInit()->SymbolizePC(addr);
}
ReportLocation *SymbolizeData(uptr addr) {
diff --git a/lib/tsan/rtl/tsan_symbolize.h b/lib/tsan/rtl/tsan_symbolize.h
index 828edc9..b59b6cf 100644
--- a/lib/tsan/rtl/tsan_symbolize.h
+++ b/lib/tsan/rtl/tsan_symbolize.h
@@ -18,9 +18,13 @@
namespace __tsan {
+// Denotes fake PC values that come from JIT/JAVA/etc.
+// For such PC values __tsan_symbolize_external() will be called.
+const uptr kExternalPCBit = 1ULL << 60;
+
void EnterSymbolizer();
void ExitSymbolizer();
-ReportStack *SymbolizeCode(uptr addr);
+SymbolizedStack *SymbolizeCode(uptr addr);
ReportLocation *SymbolizeData(uptr addr);
void SymbolizeFlush();
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 7fb5ae3..2569c7e 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -20,9 +20,9 @@
namespace __tsan {
-const int kTracePartSizeBits = 14;
+const int kTracePartSizeBits = 13;
const int kTracePartSize = 1 << kTracePartSizeBits;
-const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize;
+const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
const int kTraceSize = kTracePartSize * kTraceParts;
// Must fit into 3 bits.
@@ -42,7 +42,7 @@
typedef u64 Event;
struct TraceHeader {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
@@ -54,13 +54,15 @@
};
struct Trace {
- TraceHeader headers[kTraceParts];
Mutex mtx;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
#endif
+ // Must be the last field, because we unmap the unused part in
+ // CreateThreadContext.
+ TraceHeader headers[kTraceParts];
Trace()
: mtx(MutexTypeTrace, StatMtxTrace) {
diff --git a/lib/tsan/tests/CMakeLists.txt b/lib/tsan/tests/CMakeLists.txt
index 2e830c3..e0c3f8a 100644
--- a/lib/tsan/tests/CMakeLists.txt
+++ b/lib/tsan/tests/CMakeLists.txt
@@ -6,6 +6,7 @@
set(TSAN_UNITTEST_CFLAGS
${TSAN_CFLAGS}
+ ${COMPILER_RT_TEST_CFLAGS}
${COMPILER_RT_GTEST_CFLAGS}
-I${COMPILER_RT_SOURCE_DIR}/lib
-I${COMPILER_RT_SOURCE_DIR}/lib/tsan/rtl
@@ -33,26 +34,28 @@
macro(add_tsan_unittest testname)
# Build unit tests only for 64-bit Linux.
- if(UNIX AND NOT APPLE AND CAN_TARGET_x86_64)
- parse_arguments(TEST "SOURCES;HEADERS" "" ${ARGN})
- set(TEST_OBJECTS)
- foreach(SOURCE ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE})
- tsan_compile(TEST_OBJECTS ${SOURCE} x86_64 ${TEST_HEADERS})
+ if(UNIX AND NOT APPLE)
+ foreach(arch ${TSAN_SUPPORTED_ARCH})
+ parse_arguments(TEST "SOURCES;HEADERS" "" ${ARGN})
+ set(TEST_OBJECTS)
+ foreach(SOURCE ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE})
+ tsan_compile(TEST_OBJECTS ${SOURCE} ${arch} ${TEST_HEADERS})
+ endforeach()
+ get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
+ set(TEST_DEPS ${TEST_OBJECTS})
+ if(NOT COMPILER_RT_STANDALONE_BUILD)
+ list(APPEND TEST_DEPS tsan)
+ endif()
+ # FIXME: Looks like we should link TSan with just-built runtime,
+ # and not rely on -fsanitize=thread, as these tests are essentially
+ # unit tests.
+ add_compiler_rt_test(TsanUnitTests ${testname}
+ OBJECTS ${TEST_OBJECTS}
+ DEPS ${TEST_DEPS}
+ LINK_FLAGS ${TARGET_LINK_FLAGS}
+ -fsanitize=thread
+ -lstdc++ -lm)
endforeach()
- get_target_flags_for_arch(${arch} TARGET_LINK_FLAGS)
- set(TEST_DEPS ${TEST_OBJECTS})
- if(NOT COMPILER_RT_STANDALONE_BUILD)
- list(APPEND TEST_DEPS tsan)
- endif()
- # FIXME: Looks like we should link TSan with just-built runtime,
- # and not rely on -fsanitize=thread, as these tests are essentially
- # unit tests.
- add_compiler_rt_test(TsanUnitTests ${testname}
- OBJECTS ${TEST_OBJECTS}
- DEPS ${TEST_DEPS}
- LINK_FLAGS ${TARGET_LINK_FLAGS}
- -fsanitize=thread
- -lstdc++ -lm)
endif()
endmacro()
diff --git a/lib/tsan/tests/rtl/tsan_string.cc b/lib/tsan/tests/rtl/tsan_string.cc
index c402f7c..75adc6c 100644
--- a/lib/tsan/tests/rtl/tsan_string.cc
+++ b/lib/tsan/tests/rtl/tsan_string.cc
@@ -46,9 +46,6 @@
t2.Memcpy(data, data2, 10, true);
}
-// The test fails with TSAN_SHADOW_COUNT=2,
-// because the old racy access is evicted.
-#if defined(TSAN_SHADOW_COUNT) && TSAN_SHADOW_COUNT >= 4
TEST(ThreadSanitizer, MemcpyRace2) {
char *data = new char[10];
char *data1 = new char[10];
@@ -57,7 +54,6 @@
t1.Memcpy(data+5, data1, 1);
t2.Memcpy(data+3, data2, 4, true);
}
-#endif
TEST(ThreadSanitizer, MemcpyRace3) {
char *data = new char[10];
diff --git a/lib/tsan/tests/unit/tsan_clock_test.cc b/lib/tsan/tests/unit/tsan_clock_test.cc
index a1fd2b7..9207182 100644
--- a/lib/tsan/tests/unit/tsan_clock_test.cc
+++ b/lib/tsan/tests/unit/tsan_clock_test.cc
@@ -211,8 +211,8 @@
}
}
-const int kThreads = 4;
-const int kClocks = 4;
+const uptr kThreads = 4;
+const uptr kClocks = 4;
// SimpleSyncClock and SimpleThreadClock implement the same thing as
// SyncClock and ThreadClock, but in a very simple way.
diff --git a/lib/tsan/tests/unit/tsan_mman_test.cc b/lib/tsan/tests/unit/tsan_mman_test.cc
index d969989..bfaefe6 100644
--- a/lib/tsan/tests/unit/tsan_mman_test.cc
+++ b/lib/tsan/tests/unit/tsan_mman_test.cc
@@ -136,7 +136,7 @@
}
TEST(Mman, CallocOverflow) {
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
// EXPECT_DEATH clones a thread with 4K stack,
// which is overflown by tsan memory accesses functions in debug mode.
return;
diff --git a/lib/tsan/tests/unit/tsan_mutex_test.cc b/lib/tsan/tests/unit/tsan_mutex_test.cc
index c39841d..cce7f07 100644
--- a/lib/tsan/tests/unit/tsan_mutex_test.cc
+++ b/lib/tsan/tests/unit/tsan_mutex_test.cc
@@ -64,7 +64,7 @@
const int kThreads = 8;
const int kWriteRate = 1024;
-#if TSAN_DEBUG
+#if SANITIZER_DEBUG
const int kIters = 16*1024;
#else
const int kIters = 64*1024;
diff --git a/lib/ubsan/CMakeLists.txt b/lib/ubsan/CMakeLists.txt
index 09c7a85..6683272 100644
--- a/lib/ubsan/CMakeLists.txt
+++ b/lib/ubsan/CMakeLists.txt
@@ -44,7 +44,7 @@
clang_rt.san-${arch}
clang_rt.ubsan-${arch}
clang_rt.ubsan_cxx-${arch})
- if (UNIX AND NOT ${arch} STREQUAL "i386" AND NOT ${arch} STREQUAL "i686")
+ if (UNIX AND NOT ${arch} MATCHES "i386|i686")
add_sanitizer_rt_symbols(clang_rt.ubsan-${arch} ubsan.syms.extra)
add_sanitizer_rt_symbols(clang_rt.ubsan_cxx-${arch} ubsan.syms.extra)
add_dependencies(ubsan
diff --git a/lib/ubsan/ubsan_diag.cc b/lib/ubsan/ubsan_diag.cc
index f562508..4f2a2a9 100644
--- a/lib/ubsan/ubsan_diag.cc
+++ b/lib/ubsan/ubsan_diag.cc
@@ -14,9 +14,11 @@
#include "ubsan_diag.h"
#include "ubsan_init.h"
#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include <stdio.h>
@@ -66,31 +68,9 @@
};
}
-Location __ubsan::getCallerLocation(uptr CallerLoc) {
- if (!CallerLoc)
- return Location();
-
- uptr Loc = StackTrace::GetPreviousInstructionPc(CallerLoc);
- return getFunctionLocation(Loc, 0);
-}
-
-Location __ubsan::getFunctionLocation(uptr Loc, const char **FName) {
- if (!Loc)
- return Location();
+SymbolizedStack *__ubsan::getSymbolizedLocation(uptr PC) {
InitIfNecessary();
-
- AddressInfo Info;
- if (!Symbolizer::GetOrInit()->SymbolizePC(Loc, &Info, 1) || !Info.module ||
- !*Info.module)
- return Location(Loc);
-
- if (FName && Info.function)
- *FName = Info.function;
-
- if (!Info.file)
- return ModuleLocation(Info.module, Info.module_offset);
-
- return SourceLocation(Info.file, Info.line, Info.column);
+ return Symbolizer::GetOrInit()->SymbolizePC(PC);
}
Diag &Diag::operator<<(const TypeDescriptor &V) {
@@ -134,15 +114,22 @@
SLoc.getColumn(), common_flags()->strip_path_prefix);
break;
}
- case Location::LK_Module: {
- ModuleLocation MLoc = Loc.getModuleLocation();
- RenderModuleLocation(&LocBuffer, MLoc.getModuleName(), MLoc.getOffset(),
- common_flags()->strip_path_prefix);
- break;
- }
case Location::LK_Memory:
LocBuffer.append("%p", Loc.getMemoryLocation());
break;
+ case Location::LK_Symbolized: {
+ const AddressInfo &Info = Loc.getSymbolizedStack()->info;
+ if (Info.file) {
+ RenderSourceLocation(&LocBuffer, Info.file, Info.line, Info.column,
+ common_flags()->strip_path_prefix);
+ } else if (Info.module) {
+ RenderModuleLocation(&LocBuffer, Info.module, Info.module_offset,
+ common_flags()->strip_path_prefix);
+ } else {
+ LocBuffer.append("%p", Info.address);
+ }
+ break;
+ }
case Location::LK_Null:
LocBuffer.append("<unknown>");
break;
@@ -348,11 +335,24 @@
Die();
}
-bool __ubsan::MatchSuppression(const char *Str, SuppressionType Type) {
- Suppression *s;
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kVptrCheck[] = "vptr_check";
+static const char *kSuppressionTypes[] = { kVptrCheck };
+
+void __ubsan::InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+}
+
+bool __ubsan::IsVptrCheckSuppressed(const char *TypeName) {
// If .preinit_array is not used, it is possible that the UBSan runtime is not
// initialized.
if (!SANITIZER_CAN_USE_PREINIT_ARRAY)
InitIfNecessary();
- return SuppressionContext::Get()->Match(Str, Type, &s);
+ CHECK(suppression_ctx);
+ Suppression *s;
+ return suppression_ctx->Match(TypeName, kVptrCheck, &s);
}
diff --git a/lib/ubsan/ubsan_diag.h b/lib/ubsan/ubsan_diag.h
index 296ec0d..44dca90 100644
--- a/lib/ubsan/ubsan_diag.h
+++ b/lib/ubsan/ubsan_diag.h
@@ -15,79 +15,84 @@
#include "ubsan_value.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __ubsan {
-/// \brief A location within a loaded module in the program. These are used when
-/// the location can't be resolved to a SourceLocation.
-class ModuleLocation {
- const char *ModuleName;
- uptr Offset;
+class SymbolizedStackHolder {
+ SymbolizedStack *Stack;
+
+ void clear() {
+ if (Stack)
+ Stack->ClearAll();
+ }
public:
- ModuleLocation() : ModuleName(0), Offset(0) {}
- ModuleLocation(const char *ModuleName, uptr Offset)
- : ModuleName(ModuleName), Offset(Offset) {}
- const char *getModuleName() const { return ModuleName; }
- uptr getOffset() const { return Offset; }
+ explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr)
+ : Stack(Stack) {}
+ ~SymbolizedStackHolder() { clear(); }
+ void reset(SymbolizedStack *S) {
+ if (Stack != S)
+ clear();
+ Stack = S;
+ }
+ const SymbolizedStack *get() const { return Stack; }
};
+SymbolizedStack *getSymbolizedLocation(uptr PC);
+
+inline SymbolizedStack *getCallerLocation(uptr CallerPC) {
+ CHECK(CallerPC);
+ uptr PC = StackTrace::GetPreviousInstructionPc(CallerPC);
+ return getSymbolizedLocation(PC);
+}
+
/// A location of some data within the program's address space.
typedef uptr MemoryLocation;
/// \brief Location at which a diagnostic can be emitted. Either a
-/// SourceLocation, a ModuleLocation, or a MemoryLocation.
+/// SourceLocation, a MemoryLocation, or a SymbolizedStack.
class Location {
public:
- enum LocationKind { LK_Null, LK_Source, LK_Module, LK_Memory };
+ enum LocationKind { LK_Null, LK_Source, LK_Memory, LK_Symbolized };
private:
LocationKind Kind;
// FIXME: In C++11, wrap these in an anonymous union.
SourceLocation SourceLoc;
- ModuleLocation ModuleLoc;
MemoryLocation MemoryLoc;
+ const SymbolizedStack *SymbolizedLoc; // Not owned.
public:
Location() : Kind(LK_Null) {}
Location(SourceLocation Loc) :
Kind(LK_Source), SourceLoc(Loc) {}
- Location(ModuleLocation Loc) :
- Kind(LK_Module), ModuleLoc(Loc) {}
Location(MemoryLocation Loc) :
Kind(LK_Memory), MemoryLoc(Loc) {}
+ // SymbolizedStackHolder must outlive Location object.
+ Location(const SymbolizedStackHolder &Stack) :
+ Kind(LK_Symbolized), SymbolizedLoc(Stack.get()) {}
LocationKind getKind() const { return Kind; }
bool isSourceLocation() const { return Kind == LK_Source; }
- bool isModuleLocation() const { return Kind == LK_Module; }
bool isMemoryLocation() const { return Kind == LK_Memory; }
+ bool isSymbolizedStack() const { return Kind == LK_Symbolized; }
SourceLocation getSourceLocation() const {
CHECK(isSourceLocation());
return SourceLoc;
}
- ModuleLocation getModuleLocation() const {
- CHECK(isModuleLocation());
- return ModuleLoc;
- }
MemoryLocation getMemoryLocation() const {
CHECK(isMemoryLocation());
return MemoryLoc;
}
+ const SymbolizedStack *getSymbolizedStack() const {
+ CHECK(isSymbolizedStack());
+ return SymbolizedLoc;
+ }
};
-/// Try to obtain a location for the caller. This might fail, and produce either
-/// an invalid location or a module location for the caller.
-Location getCallerLocation(uptr CallerLoc = GET_CALLER_PC());
-
-/// Try to obtain a location for the given function pointer. This might fail,
-/// and produce either an invalid location or a module location for the caller.
-/// If FName is non-null and the name of the function is known, set *FName to
-/// the function name, otherwise *FName is unchanged.
-Location getFunctionLocation(uptr Loc, const char **FName);
-
/// A diagnostic severity level.
enum DiagLevel {
DL_Error, ///< An error.
@@ -230,7 +235,8 @@
~ScopedReport();
};
-bool MatchSuppression(const char *Str, SuppressionType Type);
+void InitializeSuppressions();
+bool IsVptrCheckSuppressed(const char *TypeName);
} // namespace __ubsan
diff --git a/lib/ubsan/ubsan_flags.cc b/lib/ubsan/ubsan_flags.cc
index eda11f1..0dbffc9 100644
--- a/lib/ubsan/ubsan_flags.cc
+++ b/lib/ubsan/ubsan_flags.cc
@@ -14,6 +14,7 @@
#include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
namespace __ubsan {
@@ -21,36 +22,51 @@
return (&__ubsan_default_options) ? __ubsan_default_options() : "";
}
-void InitializeCommonFlags() {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->print_summary = false;
- // Override from user-specified string.
- ParseCommonFlagsFromString(cf, MaybeCallUbsanDefaultOptions());
- // Override from environment variable.
- ParseCommonFlagsFromString(cf, GetEnv("UBSAN_OPTIONS"));
-}
-
Flags ubsan_flags;
-static void ParseFlagsFromString(Flags *f, const char *str) {
- if (!str)
- return;
- ParseFlag(str, &f->halt_on_error, "halt_on_error",
- "Crash the program after printing the first error report");
- ParseFlag(str, &f->print_stacktrace, "print_stacktrace",
- "Include full stacktrace into an error report");
+void Flags::SetDefaults() {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
}
-void InitializeFlags() {
+void RegisterUbsanFlags(FlagParser *parser, Flags *f) {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+}
+
+void InitializeFlags(bool standalone) {
Flags *f = flags();
- // Default values.
- f->halt_on_error = false;
- f->print_stacktrace = false;
+ FlagParser parser;
+ RegisterUbsanFlags(&parser, f);
+
+ if (standalone) {
+ RegisterCommonFlags(&parser);
+
+ SetCommonFlagsDefaults();
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.print_summary = false;
+ OverrideCommonFlags(cf);
+ } else {
+ // Ignore common flags if not standalone.
+ // This is inconsistent with LSan, which allows common flags in LSAN_FLAGS.
+ // This is caused by undefined initialization order between ASan and UBsan,
+ // which makes it impossible to make sure that common flags from ASAN_OPTIONS
+ // have not been used (in __asan_init) before they are overwritten with flags
+ // from UBSAN_OPTIONS.
+ CommonFlags cf_ignored;
+ RegisterCommonFlags(&parser, &cf_ignored);
+ }
+
+ f->SetDefaults();
// Override from user-specified string.
- ParseFlagsFromString(f, MaybeCallUbsanDefaultOptions());
+ parser.ParseString(MaybeCallUbsanDefaultOptions());
// Override from environment variable.
- ParseFlagsFromString(f, GetEnv("UBSAN_OPTIONS"));
+ parser.ParseString(GetEnv("UBSAN_OPTIONS"));
+ SetVerbosity(common_flags()->verbosity);
}
} // namespace __ubsan
diff --git a/lib/ubsan/ubsan_flags.h b/lib/ubsan/ubsan_flags.h
index c496469..b47f14e 100644
--- a/lib/ubsan/ubsan_flags.h
+++ b/lib/ubsan/ubsan_flags.h
@@ -18,15 +18,17 @@
namespace __ubsan {
struct Flags {
- bool halt_on_error;
- bool print_stacktrace;
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+
+ void SetDefaults();
};
extern Flags ubsan_flags;
inline Flags *flags() { return &ubsan_flags; }
-void InitializeCommonFlags();
-void InitializeFlags();
+void InitializeFlags(bool standalone);
} // namespace __ubsan
diff --git a/lib/ubsan/ubsan_flags.inc b/lib/ubsan/ubsan_flags.inc
new file mode 100644
index 0000000..9ca31d1
--- /dev/null
+++ b/lib/ubsan/ubsan_flags.inc
@@ -0,0 +1,25 @@
+//===-- ubsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// UBSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAG
+# error "Define UBSAN_FLAG prior to including this file!"
+#endif
+
+// UBSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+UBSAN_FLAG(bool, halt_on_error, false,
+ "Crash the program after printing the first error report")
+UBSAN_FLAG(bool, print_stacktrace, false,
+ "Include full stacktrace into an error report")
+UBSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+
diff --git a/lib/ubsan/ubsan_handlers.cc b/lib/ubsan/ubsan_handlers.cc
index a0ecff9..78e7508 100644
--- a/lib/ubsan/ubsan_handlers.cc
+++ b/lib/ubsan/ubsan_handlers.cc
@@ -37,14 +37,17 @@
}
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
- Location FallbackLoc, ReportOptions Opts) {
+ ReportOptions Opts) {
Location Loc = Data->Loc.acquire();
// Use the SourceLocation from Data to track deduplication, even if 'invalid'
if (ignoreReport(Loc.getSourceLocation(), Opts))
return;
- if (Data->Loc.isInvalid())
+ SymbolizedStackHolder FallbackLoc;
+ if (Data->Loc.isInvalid()) {
+ FallbackLoc.reset(getCallerLocation(Opts.pc));
Loc = FallbackLoc;
+ }
ScopedReport R(Opts, Loc);
@@ -67,12 +70,12 @@
void __ubsan::__ubsan_handle_type_mismatch(TypeMismatchData *Data,
ValueHandle Pointer) {
GET_REPORT_OPTIONS(false);
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts);
+ handleTypeMismatchImpl(Data, Pointer, Opts);
}
void __ubsan::__ubsan_handle_type_mismatch_abort(TypeMismatchData *Data,
ValueHandle Pointer) {
GET_REPORT_OPTIONS(true);
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts);
+ handleTypeMismatchImpl(Data, Pointer, Opts);
Die();
}
@@ -288,7 +291,8 @@
static void handleFloatCastOverflow(FloatCastOverflowData *Data,
ValueHandle From, ReportOptions Opts) {
// TODO: Add deduplication once a SourceLocation is generated for this check.
- Location Loc = getCallerLocation();
+ SymbolizedStackHolder CallerLoc(getCallerLocation(Opts.pc));
+ Location Loc = CallerLoc;
ScopedReport R(Opts, Loc);
Diag(Loc, DL_Error,
@@ -337,16 +341,21 @@
static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
ValueHandle Function,
ReportOptions Opts) {
- const char *FName = "(unknown)";
+ SourceLocation CallLoc = Data->Loc.acquire();
+ if (ignoreReport(CallLoc, Opts))
+ return;
- Location Loc = getFunctionLocation(Function, &FName);
+ ScopedReport R(Opts, CallLoc);
- ScopedReport R(Opts, Loc);
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
- Diag(Data->Loc, DL_Error,
+ Diag(CallLoc, DL_Error,
"call to function %0 through pointer to incorrect function type %1")
- << FName << Data->Type;
- Diag(Loc, DL_Note, "%0 defined here") << FName;
+ << FName << Data->Type;
+ Diag(FLoc, DL_Note, "%0 defined here") << FName;
}
void
diff --git a/lib/ubsan/ubsan_handlers_cxx.cc b/lib/ubsan/ubsan_handlers_cxx.cc
index 5704c1e..4718e6e 100644
--- a/lib/ubsan/ubsan_handlers_cxx.cc
+++ b/lib/ubsan/ubsan_handlers_cxx.cc
@@ -36,8 +36,7 @@
// Check if error report should be suppressed.
DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer);
- if (DTI.isValid() &&
- MatchSuppression(DTI.getMostDerivedTypeName(), SuppressionVptrCheck))
+ if (DTI.isValid() && IsVptrCheckSuppressed(DTI.getMostDerivedTypeName()))
return;
SourceLocation Loc = Data->Loc.acquire();
diff --git a/lib/ubsan/ubsan_init.cc b/lib/ubsan/ubsan_init.cc
index 6080e30..219273d 100644
--- a/lib/ubsan/ubsan_init.cc
+++ b/lib/ubsan/ubsan_init.cc
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
+#include "ubsan_diag.h"
#include "ubsan_init.h"
#include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
-#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __ubsan;
@@ -31,6 +31,7 @@
#endif
if (LIKELY(ubsan_inited))
return;
+ bool standalone = false;
if (0 == internal_strcmp(SanitizerToolName, "SanitizerTool")) {
// WARNING: If this condition holds, then either UBSan runs in a standalone
// mode, or initializer for another sanitizer hasn't run yet. In a latter
@@ -38,11 +39,12 @@
// common flags. It means, that we are not allowed to *use* common flags
// in this function.
SanitizerToolName = "UndefinedBehaviorSanitizer";
- InitializeCommonFlags();
+ standalone = true;
}
// Initialize UBSan-specific flags.
- InitializeFlags();
- SuppressionContext::InitIfNecessary();
+ InitializeFlags(standalone);
+ InitializeSuppressions();
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
ubsan_inited = true;
}
diff --git a/lib/ubsan/ubsan_type_hash.cc b/lib/ubsan/ubsan_type_hash.cc
index 808a433..a388bcc 100644
--- a/lib/ubsan/ubsan_type_hash.cc
+++ b/lib/ubsan/ubsan_type_hash.cc
@@ -115,8 +115,7 @@
/// \brief Determine whether \p Derived has a \p Base base class subobject at
/// offset \p Offset.
-static bool isDerivedFromAtOffset(sptr Object,
- const abi::__class_type_info *Derived,
+static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
const abi::__class_type_info *Base,
sptr Offset) {
if (Derived->__type_name == Base->__type_name)
@@ -124,7 +123,7 @@
if (const abi::__si_class_type_info *SI =
dynamic_cast<const abi::__si_class_type_info*>(Derived))
- return isDerivedFromAtOffset(Object, SI->__base_type, Base, Offset);
+ return isDerivedFromAtOffset(SI->__base_type, Base, Offset);
const abi::__vmi_class_type_info *VTI =
dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
@@ -139,13 +138,13 @@
sptr OffsetHere = VTI->base_info[base].__offset_flags >>
abi::__base_class_type_info::__offset_shift;
if (VTI->base_info[base].__offset_flags &
- abi::__base_class_type_info::__virtual_mask) {
- sptr VTable = *reinterpret_cast<const sptr *>(Object);
- OffsetHere = *reinterpret_cast<const sptr *>(VTable + OffsetHere);
- }
- if (isDerivedFromAtOffset(Object + OffsetHere,
- VTI->base_info[base].__base_type, Base,
- Offset - OffsetHere))
+ abi::__base_class_type_info::__virtual_mask)
+ // For now, just punt on virtual bases and say 'yes'.
+ // FIXME: OffsetHere is the offset in the vtable of the virtual base
+ // offset. Read the vbase offset out of the vtable and use it.
+ return true;
+ if (isDerivedFromAtOffset(VTI->base_info[base].__base_type,
+ Base, Offset - OffsetHere))
return true;
}
@@ -154,15 +153,14 @@
/// \brief Find the derived-most dynamic base class of \p Derived at offset
/// \p Offset.
-static const abi::__class_type_info *
-findBaseAtOffset(sptr Object, const abi::__class_type_info *Derived,
- sptr Offset) {
+static const abi::__class_type_info *findBaseAtOffset(
+ const abi::__class_type_info *Derived, sptr Offset) {
if (!Offset)
return Derived;
if (const abi::__si_class_type_info *SI =
dynamic_cast<const abi::__si_class_type_info*>(Derived))
- return findBaseAtOffset(Object, SI->__base_type, Offset);
+ return findBaseAtOffset(SI->__base_type, Offset);
const abi::__vmi_class_type_info *VTI =
dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
@@ -174,13 +172,12 @@
sptr OffsetHere = VTI->base_info[base].__offset_flags >>
abi::__base_class_type_info::__offset_shift;
if (VTI->base_info[base].__offset_flags &
- abi::__base_class_type_info::__virtual_mask) {
- sptr VTable = *reinterpret_cast<const sptr *>(Object);
- OffsetHere = *reinterpret_cast<const sptr *>(VTable + OffsetHere);
- }
- if (const abi::__class_type_info *Base = findBaseAtOffset(
- Object + OffsetHere, VTI->base_info[base].__base_type,
- Offset - OffsetHere))
+ abi::__base_class_type_info::__virtual_mask)
+ // FIXME: Can't handle virtual bases yet.
+ continue;
+ if (const abi::__class_type_info *Base =
+ findBaseAtOffset(VTI->base_info[base].__base_type,
+ Offset - OffsetHere))
return Base;
}
@@ -232,8 +229,7 @@
return false;
abi::__class_type_info *Base = (abi::__class_type_info*)Type;
- if (!isDerivedFromAtOffset(reinterpret_cast<sptr>(Object), Derived, Base,
- -Vtable->Offset))
+ if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset))
return false;
// Success. Cache this result.
@@ -247,9 +243,8 @@
if (!Vtable)
return DynamicTypeInfo(0, 0, 0);
const abi::__class_type_info *ObjectType = findBaseAtOffset(
- reinterpret_cast<sptr>(Object),
- static_cast<const abi::__class_type_info *>(Vtable->TypeInfo),
- -Vtable->Offset);
+ static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
+ -Vtable->Offset);
return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset,
ObjectType ? ObjectType->__type_name : "<unknown>");
}