Revert to C-style callbacks for iteration over allocator chunks.
Also clean up LSan code, fix some comments and replace void* with uptr
to bring down the number of reinterpret_casts.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@184700 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc b/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc
index 2081c61..94e4fc3 100644
--- a/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc
+++ b/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc
@@ -12,7 +12,7 @@
pthread_key_t key;
-void key_destructor(void *) {
+void key_destructor(void *arg) {
__lsan::ScopedDisabler d;
void *p = malloc(1337);
// Break optimization.
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index 2bdb4a2..1512c2e 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -52,7 +52,7 @@
}
static ChunkMetadata *Metadata(void *p) {
- return (ChunkMetadata *)allocator.GetMetaData(p);
+ return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
@@ -62,14 +62,14 @@
m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
m->requested_size = size;
- atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
}
static void RegisterDeallocation(void *p) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
- atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
@@ -129,25 +129,26 @@
*end = *begin + sizeof(allocator);
}
-void *PointsIntoChunk(void* p) {
- void *chunk = allocator.GetBlockBeginFastLocked(p);
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
if (!chunk) return 0;
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
// valid, but we don't want that.
- if (p < chunk) return 0;
- ChunkMetadata *m = Metadata(chunk);
+ if (addr < chunk) return 0;
+ ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
CHECK(m);
- if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
+ if (m->allocated && addr < chunk + m->requested_size)
return chunk;
return 0;
}
-void *GetUserBegin(void *p) {
- return p;
+uptr GetUserBegin(uptr chunk) {
+ return chunk;
}
-LsanMetadata::LsanMetadata(void *chunk) {
- metadata_ = Metadata(chunk);
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = Metadata(reinterpret_cast<void *>(chunk));
CHECK(metadata_);
}
@@ -171,20 +172,10 @@
return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
}
-template<typename Callable>
-void ForEachChunk(Callable const &callback) {
- allocator.ForEachChunk(callback);
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ allocator.ForEachChunk(callback, arg);
}
-template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
- ProcessPlatformSpecificAllocationsCb const &callback);
-template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
-template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
-template void ForEachChunk<MarkIndirectlyLeakedCb>(
- MarkIndirectlyLeakedCb const &callback);
-template void ForEachChunk<CollectIgnoredCb>(
- CollectIgnoredCb const &callback);
-
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
void *chunk = allocator.GetBlockBegin(p);
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index ed1adb2..5e936cd 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -23,7 +23,7 @@
#if CAN_SANITIZE_LEAKS
namespace __lsan {
-// This mutex is used to prevent races between DoLeakCheck and SuppressObject.
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
BlockingMutex global_mutex(LINKER_INITIALIZED);
THREADLOCAL int disable_counter;
@@ -84,12 +84,12 @@
#endif
}
-// Scan the memory range, looking for byte patterns that point into allocator
-// chunks. Mark those chunks with tag and add them to the frontier.
-// There are two usage modes for this function: finding reachable or ignored
-// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks
-// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
-// so frontier = 0.
+// Scans the memory range, looking for byte patterns that point into allocator
+// chunks. Marks those chunks with |tag| and adds them to |frontier|.
+// There are two usage modes for this function: finding reachable or ignored
+// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
+// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
+// so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
@@ -99,10 +99,10 @@
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
- for (; pp + sizeof(void *) <= end; pp += alignment) {
+ for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
void *p = *reinterpret_cast<void**>(pp);
if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
- void *chunk = PointsIntoChunk(p);
+ uptr chunk = PointsIntoChunk(p);
if (!chunk) continue;
LsanMetadata m(chunk);
// Reachable beats ignored beats leaked.
@@ -111,14 +111,13 @@
m.set_tag(tag);
if (flags()->log_pointers)
Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
- chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
- m.requested_size());
+ chunk, chunk + m.requested_size(), m.requested_size());
if (frontier)
- frontier->push_back(reinterpret_cast<uptr>(chunk));
+ frontier->push_back(chunk);
}
}
-// Scan thread data (stacks and TLS) for heap pointers.
+// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
@@ -191,31 +190,34 @@
while (frontier->size()) {
uptr next_chunk = frontier->back();
frontier->pop_back();
- LsanMetadata m(reinterpret_cast<void *>(next_chunk));
+ LsanMetadata m(next_chunk);
ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
"HEAP", tag);
}
}
-// Mark leaked chunks which are reachable from other leaked chunks.
-void MarkIndirectlyLeakedCb::operator()(void *p) const {
- p = GetUserBegin(p);
- LsanMetadata m(p);
+// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
+// which are reachable from it as indirectly leaked.
+static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
- ScanRangeForPointers(reinterpret_cast<uptr>(p),
- reinterpret_cast<uptr>(p) + m.requested_size(),
+ ScanRangeForPointers(chunk, chunk + m.requested_size(),
/* frontier */ 0, "HEAP", kIndirectlyLeaked);
}
}
-void CollectIgnoredCb::operator()(void *p) const {
- p = GetUserBegin(p);
- LsanMetadata m(p);
+// ForEachChunk callback. If chunk is marked as ignored, adds its address to
+// frontier.
+static void CollectIgnoredCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
if (m.allocated() && m.tag() == kIgnored)
- frontier_->push_back(reinterpret_cast<uptr>(p));
+ reinterpret_cast<Frontier *>(arg)->push_back(chunk);
}
-// Set the appropriate tag on each chunk.
+// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
Frontier frontier(GetPageSizeCached());
@@ -233,14 +235,14 @@
if (flags()->log_pointers)
Report("Scanning ignored chunks.\n");
CHECK_EQ(0, frontier.size());
- ForEachChunk(CollectIgnoredCb(&frontier));
+ ForEachChunk(CollectIgnoredCb, &frontier);
FloodFillTag(&frontier, kIgnored);
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
if (flags()->log_pointers)
Report("Scanning leaked chunks.\n");
- ForEachChunk(MarkIndirectlyLeakedCb());
+ ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
}
static void PrintStackTraceById(u32 stack_trace_id) {
@@ -251,9 +253,12 @@
common_flags()->strip_path_prefix, 0);
}
-void CollectLeaksCb::operator()(void *p) const {
- p = GetUserBegin(p);
- LsanMetadata m(p);
+// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
+static void CollectLeaksCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
@@ -261,33 +266,29 @@
uptr size = 0;
const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
size = Min(size, resolution);
- leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
- m.tag());
+ leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
} else {
- leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
+ leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
}
}
}
-static void CollectLeaks(LeakReport *leak_report) {
- ForEachChunk(CollectLeaksCb(leak_report));
-}
-
-void PrintLeakedCb::operator()(void *p) const {
- p = GetUserBegin(p);
- LsanMetadata m(p);
+// ForEachChunkCallback. Prints addresses of unreachable chunks.
+static void PrintLeakedCb(uptr chunk, void *arg) {
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
Printf("%s leaked %zu byte object at %p.\n",
m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
- m.requested_size(), p);
+ m.requested_size(), chunk);
}
}
static void PrintLeaked() {
Printf("\n");
Printf("Reporting individual objects:\n");
- ForEachChunk(PrintLeakedCb());
+ ForEachChunk(PrintLeakedCb, 0 /* arg */);
}
struct DoLeakCheckParam {
@@ -302,7 +303,7 @@
CHECK(!param->success);
CHECK(param->leak_report.IsEmpty());
ClassifyAllChunks(suspended_threads);
- CollectLeaks(¶m->leak_report);
+ ForEachChunk(CollectLeaksCb, ¶m->leak_report);
if (!param->leak_report.IsEmpty() && flags()->report_objects)
PrintLeaked();
param->success = true;
diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h
index 0d90acf..e5f0a22 100644
--- a/lib/lsan/lsan_common.h
+++ b/lib/lsan/lsan_common.h
@@ -15,6 +15,7 @@
#ifndef LSAN_COMMON_H
#define LSAN_COMMON_H
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
@@ -105,55 +106,6 @@
Frontier *frontier,
const char *region_type, ChunkTag tag);
-// Callables for iterating over chunks. Those classes are used as template
-// parameters in ForEachChunk, so we must expose them here to allow for explicit
-// template instantiation.
-
-// Identifies unreachable chunks which must be treated as reachable. Marks them
-// as reachable and adds them to the frontier.
-class ProcessPlatformSpecificAllocationsCb {
- public:
- explicit ProcessPlatformSpecificAllocationsCb(
- Frontier *frontier)
- : frontier_(frontier) {}
- void operator()(void *p) const;
- private:
- Frontier *frontier_;
-};
-
-// Prints addresses of unreachable chunks.
-class PrintLeakedCb {
- public:
- void operator()(void *p) const;
-};
-
-// Aggregates unreachable chunks into a LeakReport.
-class CollectLeaksCb {
- public:
- explicit CollectLeaksCb(LeakReport *leak_report)
- : leak_report_(leak_report) {}
- void operator()(void *p) const;
- private:
- LeakReport *leak_report_;
-};
-
-// Scans each leaked chunk for pointers to other leaked chunks, and marks each
-// of them as indirectly leaked.
-class MarkIndirectlyLeakedCb {
- public:
- void operator()(void *p) const;
-};
-
-// Finds all chunk marked as kIgnored and adds their addresses to frontier.
-class CollectIgnoredCb {
- public:
- explicit CollectIgnoredCb(Frontier *frontier)
- : frontier_(frontier) {}
- void operator()(void *p) const;
- private:
- Frontier *frontier_;
-};
-
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
kIgnoreObjectAlreadyIgnored,
@@ -167,8 +119,8 @@
// The following must be implemented in the parent tool.
-template<typename Callable> void ForEachChunk(Callable const &callback);
-// The address range occupied by the global allocator object.
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+// Returns the address range occupied by the global allocator object.
void GetAllocatorGlobalRange(uptr *begin, uptr *end);
// Wrappers for allocator's ForceLock()/ForceUnlock().
void LockAllocator();
@@ -179,18 +131,18 @@
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end,
uptr *cache_begin, uptr *cache_end);
-// If p points into a chunk that has been allocated to the user, return its
-// user-visible address. Otherwise, return 0.
-void *PointsIntoChunk(void *p);
-// Return address of user-visible chunk contained in this allocator chunk.
-void *GetUserBegin(void *p);
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
- // Constructor accepts pointer to user-visible chunk.
- explicit LsanMetadata(void *chunk);
+ // Constructor accepts address of user-visible chunk.
+ explicit LsanMetadata(uptr chunk);
bool allocated() const;
ChunkTag tag() const;
void set_tag(ChunkTag value);
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index 3ce0ea4..08a0595 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -53,8 +53,7 @@
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
void *data) {
- Frontier *frontier =
- reinterpret_cast<Frontier *>(data);
+ Frontier *frontier = reinterpret_cast<Frontier *>(data);
for (uptr j = 0; j < info->dlpi_phnum; j++) {
const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
// We're looking for .data and .bss sections, which reside in writeable,
@@ -82,7 +81,7 @@
return 0;
}
-// Scan global variables for heap pointers.
+// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
// FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
// deadlocking by running this under StopTheWorld. However, the lock is
@@ -101,23 +100,26 @@
return 0;
}
-void ProcessPlatformSpecificAllocationsCb::operator()(void *p) const {
- p = GetUserBegin(p);
- LsanMetadata m(p);
+// ForEachChunk callback. Identifies unreachable chunks which must be treated as
+// reachable. Marks them as reachable and adds them to the frontier.
+static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) {
m.set_tag(kReachable);
- frontier_->push_back(reinterpret_cast<uptr>(p));
+ reinterpret_cast<Frontier *>(arg)->push_back(chunk);
}
}
}
-// Handle dynamically allocated TLS blocks by treating all chunks allocated from
-// ld-linux.so as reachable.
+// Handles dynamically allocated TLS blocks by treating all chunks allocated
+// from ld-linux.so as reachable.
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
if (!flags()->use_tls) return;
if (!linker) return;
- ForEachChunk(ProcessPlatformSpecificAllocationsCb(frontier));
+ ForEachChunk(ProcessPlatformSpecificAllocationsCb, frontier);
}
} // namespace __lsan