Modified bitmap implementation such that more expressions can be evaluated at compile time.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@7688 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/exp-drd/drd_bitmap.c b/exp-drd/drd_bitmap.c
index d56062f..3ff24d7 100644
--- a/exp-drd/drd_bitmap.c
+++ b/exp-drd/drd_bitmap.c
@@ -136,44 +136,26 @@
static inline
void bm_access_aligned_load(struct bitmap* const bm,
- const Addr a1, const Addr a2)
+ const Addr a1, const SizeT size)
{
struct bitmap2* bm2;
-#if 0
- /* Commented out the statements below because of performance reasons. */
- tl_assert(bm);
- tl_assert(a1 < a2);
- tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
- || (a2 - a1) == 4 || (a2 - a1) == 8);
- tl_assert((a1 & (a2 - a1 - 1)) == 0);
-#endif
-
bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
tl_assert(bm2);
- bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
+ bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size);
}
static inline
void bm_access_aligned_store(struct bitmap* const bm,
- const Addr a1, const Addr a2)
+ const Addr a1, const SizeT size)
{
struct bitmap2* bm2;
-#if 0
- /* Commented out the statements below because of performance reasons. */
- tl_assert(bm);
- tl_assert(a1 < a2);
- tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
- || (a2 - a1) == 4 || (a2 - a1) == 8);
- tl_assert((a1 & (a2 - a1 - 1)) == 0);
-#endif
-
bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
tl_assert(bm2);
- bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
+ bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size);
}
void bm_access_range_load(struct bitmap* const bm,
@@ -184,13 +166,13 @@
void bm_access_load_1(struct bitmap* const bm, const Addr a1)
{
- bm_access_aligned_load(bm, a1, a1 + 1);
+ bm_access_aligned_load(bm, a1, 1);
}
void bm_access_load_2(struct bitmap* const bm, const Addr a1)
{
if ((a1 & 1) == 0)
- bm_access_aligned_load(bm, a1, a1 + 2);
+ bm_access_aligned_load(bm, a1, 2);
else
bm_access_range(bm, a1, a1 + 2, eLoad);
}
@@ -198,7 +180,7 @@
void bm_access_load_4(struct bitmap* const bm, const Addr a1)
{
if ((a1 & 3) == 0)
- bm_access_aligned_load(bm, a1, a1 + 4);
+ bm_access_aligned_load(bm, a1, 4);
else
bm_access_range(bm, a1, a1 + 4, eLoad);
}
@@ -206,11 +188,11 @@
void bm_access_load_8(struct bitmap* const bm, const Addr a1)
{
if ((a1 & 7) == 0)
- bm_access_aligned_load(bm, a1, a1 + 8);
+ bm_access_aligned_load(bm, a1, 8);
else if ((a1 & 3) == 0)
{
- bm_access_aligned_load(bm, a1 + 0, a1 + 4);
- bm_access_aligned_load(bm, a1 + 4, a1 + 8);
+ bm_access_aligned_load(bm, a1 + 0, 4);
+ bm_access_aligned_load(bm, a1 + 4, 4);
}
else
bm_access_range(bm, a1, a1 + 8, eLoad);
@@ -218,13 +200,13 @@
void bm_access_store_1(struct bitmap* const bm, const Addr a1)
{
- bm_access_aligned_store(bm, a1, a1 + 1);
+ bm_access_aligned_store(bm, a1, 1);
}
void bm_access_store_2(struct bitmap* const bm, const Addr a1)
{
if ((a1 & 1) == 0)
- bm_access_aligned_store(bm, a1, a1 + 2);
+ bm_access_aligned_store(bm, a1, 2);
else
bm_access_range(bm, a1, a1 + 2, eStore);
}
@@ -232,7 +214,7 @@
void bm_access_store_4(struct bitmap* const bm, const Addr a1)
{
if ((a1 & 3) == 0)
- bm_access_aligned_store(bm, a1, a1 + 4);
+ bm_access_aligned_store(bm, a1, 4);
else
bm_access_range(bm, a1, a1 + 4, eStore);
}
@@ -240,11 +222,11 @@
void bm_access_store_8(struct bitmap* const bm, const Addr a1)
{
if ((a1 & 7) == 0)
- bm_access_aligned_store(bm, a1, a1 + 8);
+ bm_access_aligned_store(bm, a1, 8);
else if ((a1 & 3) == 0)
{
- bm_access_aligned_store(bm, a1 + 0, a1 + 4);
- bm_access_aligned_store(bm, a1 + 4, a1 + 8);
+ bm_access_aligned_store(bm, a1 + 0, 4);
+ bm_access_aligned_store(bm, a1 + 4, 4);
}
else
bm_access_range(bm, a1, a1 + 8, eStore);
@@ -531,52 +513,27 @@
static inline
Bool bm_aligned_load_has_conflict_with(const struct bitmap* const bm,
- const Addr a1, const Addr a2)
+ const Addr a1, const SizeT size)
{
struct bitmap2* bm2;
-#if 0
- /* Commented out the statements below because of performance reasons. */
- tl_assert(bm);
- tl_assert(a1 < a2);
- tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
- || (a2 - a1) == 4 || (a2 - a1) == 8);
- tl_assert((a1 & (a2 - a1 - 1)) == 0);
-#endif
-
bm2 = bm_lookup(bm, a1);
- if (bm2
- && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
- {
- return True;
- }
- return False;
+ return (bm2 && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size));
}
static inline
Bool bm_aligned_store_has_conflict_with(const struct bitmap* const bm,
- const Addr a1, const Addr a2)
+ const Addr a1, const SizeT size)
{
struct bitmap2* bm2;
-#if 0
- /* Commented out the statements below because of performance reasons. */
- tl_assert(bm);
- tl_assert(a1 < a2);
- tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
- || (a2 - a1) == 4 || (a2 - a1) == 8);
- tl_assert((a1 & (a2 - a1 - 1)) == 0);
-#endif
-
bm2 = bm_lookup(bm, a1);
if (bm2)
{
- const struct bitmap1* const p1 = &bm2->bm1;
-
- if (bm0_is_any_set(p1->bm0_r, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK)
- | bm0_is_any_set(p1->bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
+ if (bm0_is_any_set(bm2->bm1.bm0_r, a1 & ADDR0_MASK, size)
+ | bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, size))
{
return True;
}
@@ -592,13 +549,13 @@
Bool bm_load_1_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
- return bm_aligned_load_has_conflict_with(bm, a1, a1 + 1);
+ return bm_aligned_load_has_conflict_with(bm, a1, 1);
}
Bool bm_load_2_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
if ((a1 & 1) == 0)
- return bm_aligned_load_has_conflict_with(bm, a1, a1 + 2);
+ return bm_aligned_load_has_conflict_with(bm, a1, 2);
else
return bm_has_conflict_with(bm, a1, a1 + 2, eLoad);
}
@@ -606,7 +563,7 @@
Bool bm_load_4_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
if ((a1 & 3) == 0)
- return bm_aligned_load_has_conflict_with(bm, a1, a1 + 4);
+ return bm_aligned_load_has_conflict_with(bm, a1, 4);
else
return bm_has_conflict_with(bm, a1, a1 + 4, eLoad);
}
@@ -614,20 +571,20 @@
Bool bm_load_8_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
if ((a1 & 7) == 0)
- return bm_aligned_load_has_conflict_with(bm, a1, a1 + 8);
+ return bm_aligned_load_has_conflict_with(bm, a1, 8);
else
return bm_has_conflict_with(bm, a1, a1 + 8, eLoad);
}
Bool bm_store_1_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
- return bm_aligned_store_has_conflict_with(bm, a1, a1 + 1);
+ return bm_aligned_store_has_conflict_with(bm, a1, 1);
}
Bool bm_store_2_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
if ((a1 & 1) == 0)
- return bm_aligned_store_has_conflict_with(bm, a1, a1 + 2);
+ return bm_aligned_store_has_conflict_with(bm, a1, 2);
else
return bm_has_conflict_with(bm, a1, a1 + 2, eStore);
}
@@ -635,7 +592,7 @@
Bool bm_store_4_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
if ((a1 & 3) == 0)
- return bm_aligned_store_has_conflict_with(bm, a1, a1 + 4);
+ return bm_aligned_store_has_conflict_with(bm, a1, 4);
else
return bm_has_conflict_with(bm, a1, a1 + 4, eStore);
}
@@ -643,7 +600,7 @@
Bool bm_store_8_has_conflict_with(const struct bitmap* const bm, const Addr a1)
{
if ((a1 & 7) == 0)
- return bm_aligned_store_has_conflict_with(bm, a1, a1 + 8);
+ return bm_aligned_store_has_conflict_with(bm, a1, 8);
else
return bm_has_conflict_with(bm, a1, a1 + 8, eStore);
}