blob: d1710372fca47cc8c4f833b2d80ca85638373608 [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "pub_tool_basics.h" // Addr, SizeT
27#include "pub_tool_debuginfo.h" // VG_(get_objname)()
28#include "pub_tool_libcassert.h" // tl_assert()
29#include "pub_tool_libcbase.h" // VG_(memset)
30#include "pub_tool_libcprint.h" // VG_(printf)
31#include "pub_tool_machine.h" // VG_(get_IP)()
32#include "pub_tool_mallocfree.h" // VG_(malloc), VG_(free)
33#include "pub_drd_bitmap.h"
34#include "drd_bitmap.h"
35#include "drd_error.h"
36#include "drd_suppression.h"
37
38
39// Local constants.
40
41static ULong s_bitmap_creation_count;
42
43
44// Local function declarations.
45
46static void bm2_merge(struct bitmap2* const bm2l,
47 const struct bitmap2* const bm2r);
48
49
50// Function definitions.
51
52struct bitmap* bm_new()
53{
54 struct bitmap* bm;
55
56 // If this assert fails, fix the definition of BITS_PER_BITS_PER_UWORD
57 // in drd_bitmap.h.
58 tl_assert((1 << BITS_PER_BITS_PER_UWORD) == BITS_PER_UWORD);
59
60 bm = VG_(malloc)(sizeof(*bm));
61 tl_assert(bm);
62 bm->oset = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
63
64 s_bitmap_creation_count++;
65
66 return bm;
67}
68
69void bm_delete(struct bitmap* const bm)
70{
71 tl_assert(bm);
72 VG_(OSetGen_Destroy)(bm->oset);
73 VG_(free)(bm);
74}
75
76/**
77 * Record an access of type access_type at addresses a in bitmap bm.
78 */
79static
80__inline__
81void bm_access_1(struct bitmap* const bm,
82 const Addr a,
83 const BmAccessTypeT access_type)
84{
85 struct bitmap2* p2;
86 struct bitmap1* p1;
87 UWord* p0;
88 SPLIT_ADDRESS(a);
89
90 tl_assert(bm);
91
92 p2 = bm2_lookup_or_insert(bm, a1);
93 p1 = &p2->bm1;
94 p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
95 bm0_set(p0, a0);
96}
97
98static
99void bm_access_4_nonaligned(struct bitmap* const bm,
100 const Addr a,
101 const BmAccessTypeT access_type)
102{
103 bm_access_1(bm, a + 0, access_type);
104 bm_access_1(bm, a + 1, access_type);
105 bm_access_1(bm, a + 2, access_type);
106 bm_access_1(bm, a + 3, access_type);
107}
108
109static
110__inline__
111void bm_access_4_aligned(struct bitmap* const bm,
112 const Addr a,
113 const BmAccessTypeT access_type)
114{
115 struct bitmap2* p2;
116 struct bitmap1* p1;
117 UWord* p0;
118 SPLIT_ADDRESS(a);
119
120 tl_assert(bm);
121
122 p2 = bm2_lookup_or_insert(bm, a1);
123 p1 = &p2->bm1;
124 p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
125 bm0_set(p0, a0+0);
126 bm0_set(p0, a0+1);
127 bm0_set(p0, a0+2);
128 bm0_set(p0, a0+3);
129}
130
131/**
132 * Record an access of type access_type at addresses a .. a + 3 in bitmap bm.
133 */
134void bm_access_4(struct bitmap* const bm,
135 const Addr a,
136 const BmAccessTypeT access_type)
137{
138 tl_assert(bm);
139 if ((a & 3) != 0)
140 {
141 bm_access_4_nonaligned(bm, a, access_type);
142 }
143 else
144 {
145 bm_access_4_aligned(bm, a, access_type);
146 }
147}
148
149/**
150 * Record an access of type access_type at addresses a .. a + size - 1 in
151 * bitmap bm.
152 */
153void bm_access_range(struct bitmap* const bm,
154 const Addr a,
155 const SizeT size,
156 const BmAccessTypeT access_type)
157{
158 tl_assert(bm);
159 tl_assert(size > 0);
160
161 if (size == 4)
162 bm_access_4(bm, a, access_type);
163 else if (size == 1)
164 bm_access_1(bm, a, access_type);
165 else
166 {
167 Addr b;
168 for (b = a; b != a + size; b++)
169 {
170 bm_access_1(bm, b, access_type);
171 }
172 }
173}
174
175Bool bm_has(const struct bitmap* const bm,
176 const Addr a1,
177 const Addr a2,
178 const BmAccessTypeT access_type)
179{
180 Addr b;
181 for (b = a1; b < a2; b++)
182 {
183 if (! bm_has_1(bm, b, access_type))
184 {
185 return False;
186 }
187 }
188 return True;
189}
190
191Bool bm_has_any(const struct bitmap* const bm,
192 const Addr a1,
193 const Addr a2,
194 const BmAccessTypeT access_type)
195{
196 Addr b;
197
198 tl_assert(bm);
199
200 for (b = a1; b < a2; b++)
201 {
202 if (bm_has_1(bm, b, access_type))
203 {
204 return True;
205 }
206 }
207 return False;
208}
209
210/* Return a non-zero value if there is a read access, write access or both */
211/* to any of the addresses in the range [ a1, a2 [ in bitmap bm. */
212UWord bm_has_any_access(const struct bitmap* const bm,
213 const Addr a1,
214 const Addr a2)
215{
216 Addr b, b_next;
217
218 tl_assert(bm);
219
220 for (b = a1; b < a2; b = b_next)
221 {
222 struct bitmap2* bm2 = bm_lookup(bm, b);
223
224 b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
225 if (b_next > a2)
226 {
227 b_next = a2;
228 }
229
230 if (bm2)
231 {
232 Addr b_start;
233 Addr b_end;
234 UWord b0;
235
236 if ((bm2->addr << ADDR0_BITS) < a1)
237 b_start = a1;
238 else
239 if ((bm2->addr << ADDR0_BITS) < a2)
240 b_start = (bm2->addr << ADDR0_BITS);
241 else
242 break;
243 tl_assert(a1 <= b_start && b_start <= a2);
244
245 if ((bm2->addr << ADDR0_BITS) + ADDR0_COUNT < a2)
246 b_end = (bm2->addr << ADDR0_BITS) + ADDR0_COUNT;
247 else
248 b_end = a2;
249#if 0
250 VG_(message)(Vg_DebugMsg,
251 "in 0x%lx 0x%lx / cur 0x%lx 0x%lx / out 0x%lx 0x%lx",
252 a1, a2,
253 (bm2->addr << ADDR0_BITS),
254 (bm2->addr << ADDR0_BITS) + ADDR0_COUNT,
255 b_start, b_end);
256#endif
257 tl_assert(a1 <= b_end && b_end <= a2);
258 tl_assert(b_start < b_end);
259 tl_assert((b_start & ADDR0_MASK) <= ((b_end - 1) & ADDR0_MASK));
260
261 for (b0 = b_start & ADDR0_MASK; b0 <= ((b_end - 1) & ADDR0_MASK); b0++)
262 {
263 const struct bitmap1* const p1 = &bm2->bm1;
264 const UWord mask
265 = bm0_is_set(p1->bm0_r, b0) | bm0_is_set(p1->bm0_w, b0);
266 if (mask)
267 {
268 return mask;
269 }
270 }
271 }
272 }
273 return 0;
274}
275
276/**
277 * Report whether an access of type access_type at address a is recorded in
278 * bitmap bm.
279 * @return != 0 means true, and == 0 means false
280 */
281UWord bm_has_1(const struct bitmap* const bm,
282 const Addr a,
283 const BmAccessTypeT access_type)
284{
285 struct bitmap2* p2;
286 struct bitmap1* p1;
287 UWord* p0;
288 const UWord a0 = a & ADDR0_MASK;
289
290 tl_assert(bm);
291
292 p2 = bm_lookup(bm, a);
293 if (p2)
294 {
295 p1 = &p2->bm1;
296 p0 = (access_type == eLoad) ? p1->bm0_r : p1->bm0_w;
297 return bm0_is_set(p0, a0);
298 }
299 return 0;
300}
301
302static __inline__
303void bm1_clear(struct bitmap1* const bm1, const Addr a1, const Addr a2)
304{
305 UWord idx;
306 UWord mask;
307
308#if 0
309 // Commented out the assert statements below because of performance reasons.
310 tl_assert(a1);
311 tl_assert(a1 <= a2);
312 tl_assert(UWORD_MSB(a1) == UWORD_MSB(a2)
313 || UWORD_MSB(a1) == UWORD_MSB(a2 - 1));
314#endif
315
316 idx = (a1 & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
317 /* mask: a contiguous series of one bits. The first bit set is bit */
318 /* UWORD_LSB(a2-1), and the last bit set is UWORD_LSB(a1). */
319 mask = UWORD_LSB(a2) ? bm0_mask(a2) - bm0_mask(a1) : - bm0_mask(a1);
320 bm1->bm0_r[idx] &= ~mask;
321 bm1->bm0_w[idx] &= ~mask;
322}
323
324void bm_clear_all(const struct bitmap* const bm)
325{
326 struct bitmap2* bm2;
327
328 VG_(OSetGen_ResetIter)(bm->oset);
329
330 for ( ; (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0; )
331 {
332 struct bitmap1* const bm1 = &bm2->bm1;
333 tl_assert(bm1);
334 VG_(memset)(&bm1->bm0_r[0], 0, sizeof(bm1->bm0_r));
335 VG_(memset)(&bm1->bm0_w[0], 0, sizeof(bm1->bm0_w));
336 }
337}
338
339#if 1
340// New and fast implementation.
341void bm_clear(const struct bitmap* const bm,
342 const Addr a1,
343 const Addr a2)
344{
345 Addr b, b_next;
346
347 tl_assert(bm);
348 tl_assert(a1);
349 tl_assert(a1 <= a2);
350
351 for (b = a1; b < a2; b = b_next)
352 {
353 struct bitmap2* const p2 = bm_lookup(bm, b);
354
355 b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
356 if (b_next > a2)
357 {
358 b_next = a2;
359 }
360
361 if (p2)
362 {
363 Addr c = b;
364 if (UWORD_LSB(c))
365 {
366 Addr c_next = UWORD_MSB(c) + BITS_PER_UWORD;
367 if (c_next > b_next)
368 c_next = b_next;
369 bm1_clear(&p2->bm1, c, c_next);
370 c = c_next;
371 }
372 if (UWORD_LSB(c) == 0)
373 {
374 const Addr c_next = UWORD_MSB(b_next);
375 tl_assert(UWORD_LSB(c) == 0);
376 tl_assert(UWORD_LSB(c_next) == 0);
377 tl_assert(c_next <= b_next);
378 tl_assert(c <= c_next);
379 if (c_next > c)
380 {
381 UWord idx = (c & ADDR0_MASK) >> BITS_PER_BITS_PER_UWORD;
382 VG_(memset)(&p2->bm1.bm0_r[idx], 0, (c_next - c) / 8);
383 VG_(memset)(&p2->bm1.bm0_w[idx], 0, (c_next - c) / 8);
384 c = c_next;
385 }
386 }
387 if (c != b_next)
388 {
389 bm1_clear(&p2->bm1, c, b_next);
390 }
391 }
392 }
393}
394#else
395// Old and slow implementation
396void bm_clear(const struct bitmap* const bm,
397 const Addr a1,
398 const Addr a2)
399{
400 Addr b, b_next, c;
401
402 tl_assert(bm);
403 tl_assert(a1);
404 tl_assert(a1 <= a2);
405
406 for (b = a1; b < a2; b = b_next)
407 {
408 struct bitmap2* const p2 = bm_lookup(bm, b);
409
410 b_next = (b & ~ADDR0_MASK) + ADDR0_COUNT;
411 if (b_next > a2)
412 {
413 b_next = a2;
414 }
415
416 if (p2)
417 {
418 for (c = b; c < b_next; c++)
419 {
420 const UWord c0 = c & ADDR0_MASK;
421
422 p2->bm1.bm0_r[c0 / (8*sizeof(UWord))]
423 &= ~(1UL << (c0 % (8*sizeof(UWord))));
424 p2->bm1.bm0_w[c0 / (8*sizeof(UWord))]
425 &= ~(1UL << (c0 % (8*sizeof(UWord))));
426 }
427 }
428 }
429}
430#endif
431
432static
433__inline__
434UWord bm_has_conflict_with_1(const struct bitmap* const bm,
435 const Addr a,
436 const BmAccessTypeT access_type)
437{
438 struct bitmap2* p2;
439 const UWord a0 = a & ADDR0_MASK;
440
441 tl_assert(bm);
442
443 p2 = bm_lookup(bm, a);
444 if (p2)
445 {
446 if (access_type == eLoad)
447 {
448 return bm0_is_set(p2->bm1.bm0_w, a0);
449 }
450 else
451 {
452 tl_assert(access_type == eStore);
453 return (bm0_is_set(p2->bm1.bm0_r, a0)
454 | bm0_is_set(p2->bm1.bm0_w, a0));
455 }
456 }
457 return False;
458}
459
460/**
461 * Return true if the access to [a,a+size[ of type access_type conflicts with
462 * any access stored in bitmap bm.
463 */
464Bool bm_has_conflict_with(const struct bitmap* const bm,
465 const Addr a1,
466 const Addr a2,
467 const BmAccessTypeT access_type)
468{
469 Addr b;
470 for (b = a1; b != a2; b++)
471 {
472 if (bm_has_conflict_with_1(bm, b, access_type))
473 {
474 return True;
475 }
476 }
477 return False;
478}
479
480void bm_swap(struct bitmap* const bm1, struct bitmap* const bm2)
481{
482 OSet* const tmp = bm1->oset;
483 bm1->oset = bm2->oset;
484 bm2->oset = tmp;
485}
486
487void bm_merge2(struct bitmap* const lhs,
488 const struct bitmap* const rhs)
489{
490 struct bitmap2* bm2l;
491 const struct bitmap2* bm2r;
492
493 // First step: allocate any missing bitmaps in *lhs.
494 VG_(OSetGen_ResetIter)(rhs->oset);
495 for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
496 {
497 bm2_lookup_or_insert(lhs, bm2r->addr);
498 }
499
500 VG_(OSetGen_ResetIter)(lhs->oset);
501 VG_(OSetGen_ResetIter)(rhs->oset);
502
503 for ( ; (bm2r = VG_(OSetGen_Next)(rhs->oset)) != 0; )
504 {
505 do
506 {
507 bm2l = VG_(OSetGen_Next)(lhs->oset);
508 //VG_(message)(Vg_DebugMsg, "0x%x 0x%x", bm2l->addr, bm2r->addr);
509 } while (bm2l->addr < bm2r->addr);
510
511 tl_assert(bm2l->addr == bm2r->addr);
512
513 bm2_merge(bm2l, bm2r);
514 }
515}
516
517/**
518 * Report whether there are any RW / WR / WW patterns in lhs and rhs.
519 * @param lhs First bitmap.
520 * @param rhs Bitmap to be compared with lhs.
521 * @return !=0 if there are data races, == 0 if there are none.
522 */
523int bm_has_races(const struct bitmap* const lhs,
524 const struct bitmap* const rhs)
525{
526 VG_(OSetGen_ResetIter)(lhs->oset);
527 VG_(OSetGen_ResetIter)(rhs->oset);
528
529 for (;;)
530 {
531 const struct bitmap2* bm2l = VG_(OSetGen_Next)(lhs->oset);
532 const struct bitmap2* bm2r = VG_(OSetGen_Next)(rhs->oset);
533 const struct bitmap1* bm1l;
534 const struct bitmap1* bm1r;
535 unsigned k;
536
537 while (bm2l && bm2r && bm2l->addr != bm2r->addr)
538 {
539 if (bm2l->addr < bm2r->addr)
540 bm2l = VG_(OSetGen_Next)(lhs->oset);
541 else
542 bm2r = VG_(OSetGen_Next)(rhs->oset);
543 }
544 if (bm2l == 0 || bm2r == 0)
545 break;
546
547 bm1l = &bm2l->bm1;
548 bm1r = &bm2r->bm1;
549
550 for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
551 {
552 unsigned b;
553 for (b = 0; b < BITS_PER_UWORD; b++)
554 {
555 UWord const access
556 = ((bm1l->bm0_r[k] & bm0_mask(b)) ? LHS_R : 0)
557 | ((bm1l->bm0_w[k] & bm0_mask(b)) ? LHS_W : 0)
558 | ((bm1r->bm0_r[k] & bm0_mask(b)) ? RHS_R : 0)
559 | ((bm1r->bm0_w[k] & bm0_mask(b)) ? RHS_W : 0);
560 Addr const a = MAKE_ADDRESS(bm2l->addr, k * BITS_PER_UWORD | b);
561 if (HAS_RACE(access) && ! drd_is_suppressed(a, a + 1))
562 {
563 return 1;
564 }
565 }
566 }
567 }
568 return 0;
569}
570
571#ifdef OLD_RACE_DETECTION_ALGORITHM
572/**
573 * Report RW / WR / WW patterns between lhs and rhs.
574 * @param tid1 Thread ID of lhs.
575 * @param tid2 Thread ID of rhs.
576 * @param lhs First bitmap.
577 * @param rhs Bitmap to be compared with lhs.
578 * @return Number of reported ranges with data races.
579 */
580void bm_report_races(const ThreadId tid1,
581 const ThreadId tid2,
582 const struct bitmap* const lhs,
583 const struct bitmap* const rhs)
584{
585 Addr range_begin = 0;
586 Addr range_end = 0;
587 UWord range_access = 0;
588
589 VG_(message)(Vg_UserMsg, "Data addresses accessed by both segments:");
590
591 VG_(OSetGen_ResetIter)(lhs->oset);
592 VG_(OSetGen_ResetIter)(rhs->oset);
593
594 for (;;)
595 {
596 const struct bitmap2* bm2l = VG_(OSetGen_Next)(lhs->oset);
597 const struct bitmap2* bm2r = VG_(OSetGen_Next)(rhs->oset);
598 const struct bitmap1* bm1l;
599 const struct bitmap1* bm1r;
600 unsigned k;
601
602 while (bm2l && bm2r && bm2l->addr != bm2r->addr)
603 {
604 if (bm2l->addr < bm2r->addr)
605 bm2l = VG_(OSetGen_Next)(lhs->oset);
606 else
607 bm2r = VG_(OSetGen_Next)(rhs->oset);
608 }
609 if (bm2l == 0 || bm2r == 0)
610 break;
611
612 bm1l = &bm2l->bm1;
613 bm1r = &bm2r->bm1;
614
615 for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
616 {
617 unsigned b;
618 for (b = 0; b < BITS_PER_UWORD; b++)
619 {
620 UWord const access
621 = ((bm1l->bm0_r[k] & bm0_mask(b)) ? LHS_R : 0)
622 | ((bm1l->bm0_w[k] & bm0_mask(b)) ? LHS_W : 0)
623 | ((bm1r->bm0_r[k] & bm0_mask(b)) ? RHS_R : 0)
624 | ((bm1r->bm0_w[k] & bm0_mask(b)) ? RHS_W : 0);
625 Addr const a = MAKE_ADDRESS(bm2l->addr, k * BITS_PER_UWORD | b);
626 if (access == range_access)
627 range_end = a + 1;
628 else
629 {
630 tl_assert(range_begin < range_end);
631 if (HAS_RACE(range_access)
632 && ! drd_is_suppressed(range_begin, range_end))
633 {
634 DataRaceInfo dri;
635 dri.tid1 = tid1;
636 dri.tid2 = tid2;
637 dri.range_begin = range_begin;
638 dri.range_end = range_end;
639 dri.range_access = range_access;
640 tl_assert(dri.range_begin < dri.range_end);
641#if 0
642 VG_(maybe_record_error)(tid1,
643 DataRaceErr,
644 VG_(get_IP)(tid1), // where
645 "data race",
646 &dri);
647#else
648 drd_report_data_race(&dri);
649#endif
650 }
651 range_access = access;
652 range_begin = a;
653 range_end = a + 1;
654 }
655 }
656 }
657 }
658}
659#endif
660
661void bm_print(const struct bitmap* const bm)
662{
663 struct bitmap2* bm2;
664
665 VG_(OSetGen_ResetIter)(bm->oset);
666
667 for ( ; (bm2 = VG_(OSetGen_Next)(bm->oset)) != 0; )
668 {
669 const struct bitmap1* const bm1 = &bm2->bm1;
670 unsigned k;
671 for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
672 {
673 unsigned b;
674 for (b = 0; b < BITS_PER_UWORD; b++)
675 {
676 int const r = bm1->bm0_r[k] & bm0_mask(b);
677 int const w = bm1->bm0_w[k] & bm0_mask(b);
678 Addr const a = MAKE_ADDRESS(bm2->addr, k * BITS_PER_UWORD | b);
679 if (r || w)
680 {
681 VG_(printf)("0x%08lx %c %c\n",
682 (Addr)(a),
683 w ? 'W' : ' ', r ? 'R' : ' ');
684 }
685 }
686 }
687 }
688}
689
690ULong bm_get_bitmap_creation_count(void)
691{
692 return s_bitmap_creation_count;
693}
694
695ULong bm_get_bitmap2_creation_count(void)
696{
697 return s_bitmap2_creation_count;
698}
699
700static void bm2_merge(struct bitmap2* const bm2l,
701 const struct bitmap2* const bm2r)
702{
703 unsigned k;
704
705 tl_assert(bm2l->addr == bm2r->addr);
706
707 for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
708 {
709 bm2l->bm1.bm0_r[k] |= bm2r->bm1.bm0_r[k];
710 }
711 for (k = 0; k < BITMAP1_UWORD_COUNT; k++)
712 {
713 bm2l->bm1.bm0_w[k] |= bm2r->bm1.bm0_w[k];
714 }
715}
716
717#if 0
718
719/* Unit test */
720static
721struct { Addr address; SizeT size; BmAccessTypeT access_type; }
722 s_args[] = {
723 { 0, 1, eLoad },
724 { 666, 4, eLoad },
725 { 667, 2, eStore },
726 { 1024, 1, eStore },
727 { 0x0000ffff, 1, eLoad },
728 { 0x0001ffff, 1, eLoad },
729 { 0x00ffffff, 1, eLoad },
730 { 0xffffffff, 1, eStore },
731 };
732
733void bm_test(void)
734{
735 struct bitmap* bm;
736 struct bitmap* bm2;
737 int i, j;
738
739 VG_(printf)("Start of DRD BM unit test.\n");
740
741 bm = bm_new();
742
743 for (i = 0; i < sizeof(s_args)/sizeof(s_args[0]); i++)
744 {
745 bm_access_range(bm, s_args[i].address,
746 s_args[i].size, s_args[i].access_type);
747 }
748
749 VG_(printf)("Map contents -- should contain 10 addresses:\n");
750 bm_print(bm);
751
752 for (i = 0; i < sizeof(s_args)/sizeof(s_args[0]); i++)
753 {
754 for (j = 0; j < s_args[i].size; j++)
755 {
756 tl_assert(bm_has_1(bm, s_args[i].address + j, s_args[i].access_type));
757 }
758 }
759
760 VG_(printf)("Merge result:\n");
761 bm2 = bm_merge(bm, bm);
762 bm_print(bm);
763
764 bm_delete(bm);
765 bm_delete(bm2);
766
767 VG_(printf)("End of DRD BM unit test.\n");
768}
769#endif
770
771
772/*
773 * Local variables:
774 * c-basic-offset: 3
775 * End:
776 */