tsan: allow memory overlap in __tsan_java_move
JVM actually moves memory between overlapping ranges.
llvm-svn: 212560
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc
index e63b93f..5dfb476 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cc
@@ -126,7 +126,8 @@
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
CHECK_GE(dst, jctx->heap_begin);
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK(dst >= src + size || src >= dst + size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
@@ -136,7 +137,14 @@
u64 *s = (u64*)MemToShadow(src);
u64 *d = (u64*)MemToShadow(dst);
u64 *send = (u64*)MemToShadow(src + size);
- for (; s != send; s++, d++) {
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
+ }
+ for (; s != send; s += inc, d += inc) {
*d = *s;
*s = 0;
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.h b/compiler-rt/lib/tsan/rtl/tsan_interface_java.h
index 6a83885..1f793df 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.h
@@ -50,7 +50,7 @@
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory move by GC.
// Can be aggregated for several objects (preferably).
-// The ranges must not overlap.
+// The ranges can overlap.
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
// This function must be called on the finalizer thread
// before executing a batch of finalizers.
diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cc b/compiler-rt/lib/tsan/rtl/tsan_sync.cc
index 3462b04..15392c9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.cc
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cc
@@ -180,13 +180,22 @@
}
void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
- // Here we assume that src and dst do not overlap,
- // and there are no concurrent accesses to the regions (e.g. stop-the-world).
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
uptr diff = dst - src;
u32 *src_meta = MemToMeta(src);
u32 *dst_meta = MemToMeta(dst);
u32 *src_meta_end = MemToMeta(src + sz);
- for (; src_meta != src_meta_end; src_meta++, dst_meta++) {
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
+ }
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
CHECK_EQ(*dst_meta, 0);
u32 idx = *src_meta;
*src_meta = 0;