blob: 6e887d07e5494f43e32198c77045e65565e2ad18 [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_vc.h"
27#include "pub_tool_basics.h" // Addr, SizeT
28#include "pub_tool_libcassert.h" // tl_assert()
29#include "pub_tool_libcbase.h" // VG_(memset), VG_(memmove)
30#include "pub_tool_libcprint.h" // VG_(printf)
31#include "pub_tool_mallocfree.h" // VG_(malloc), VG_(free)
32#include "pub_tool_threadstate.h" // VG_(get_running_tid)
33
34
35static
36void vc_reserve(VectorClock* const vc, const unsigned new_capacity);
37
38
39void vc_init(VectorClock* const vc,
40 const VCElem* const vcelem,
41 const unsigned size)
42{
43 tl_assert(vc);
44 vc->size = 0;
45 vc->capacity = 0;
46 vc->vc = 0;
47 vc_reserve(vc, size);
48 tl_assert(size == 0 || vc->vc != 0);
49 if (vcelem)
50 {
51 VG_(memcpy)(vc->vc, vcelem, size * sizeof(vcelem[0]));
52 vc->size = size;
53 }
54}
55
56void vc_cleanup(VectorClock* const vc)
57{
58 vc_reserve(vc, 0);
59}
60
bartc46c2322008-02-24 18:26:46 +000061/** Copy constructor -- initializes *new. */
sewardjaf44c822007-11-25 14:01:38 +000062void vc_copy(VectorClock* const new,
63 const VectorClock* const rhs)
64{
65 vc_init(new, rhs->vc, rhs->size);
66}
67
bartc46c2322008-02-24 18:26:46 +000068/** Assignment operator -- *lhs is already a valid vector clock. */
69void vc_assign(VectorClock* const lhs,
70 const VectorClock* const rhs)
71{
72 vc_cleanup(lhs);
73 vc_copy(lhs, rhs);
74}
75
sewardjaf44c822007-11-25 14:01:38 +000076void vc_increment(VectorClock* const vc, ThreadId const threadid)
77{
78 unsigned i;
79 for (i = 0; i < vc->size; i++)
80 {
81 if (vc->vc[i].threadid == threadid)
82 {
83 typeof(vc->vc[i].count) const oldcount = vc->vc[i].count;
84 vc->vc[i].count++;
85 // Check for integer overflow.
86 tl_assert(oldcount < vc->vc[i].count);
87 return;
88 }
89 }
90
91 // The specified thread ID does not yet exist in the vector clock
92 // -- insert it.
93 {
94 VCElem vcelem = { threadid, 1 };
95 VectorClock vc2;
96 vc_init(&vc2, &vcelem, 1);
97 vc_combine(vc, &vc2);
98 vc_cleanup(&vc2);
99 }
100}
101
102/**
103 * @return True if all thread id's that are present in vc1 also exist in
104 * vc2, and if additionally all corresponding counters in v2 are higher or
105 * equal.
106 */
107Bool vc_lte(const VectorClock* const vc1,
108 const VectorClock* const vc2)
109{
110 unsigned i;
111 unsigned j = 0;
112 for (i = 0; i < vc1->size; i++)
113 {
114 while (j < vc2->size && vc2->vc[j].threadid < vc1->vc[i].threadid)
115 {
116 j++;
117 }
118 if (j >= vc2->size || vc2->vc[j].threadid > vc1->vc[i].threadid)
119 return False;
120 tl_assert(j < vc2->size && vc2->vc[j].threadid == vc1->vc[i].threadid);
121 if (vc1->vc[i].count > vc2->vc[j].count)
122 return False;
123 }
124 return True;
125}
126
127/**
128 * @return True if vector clocks vc1 and vc2 are ordered, and false otherwise.
129 * Order is as imposed by thread synchronization actions ("happens before").
130 */
131Bool vc_ordered(const VectorClock* const vc1,
132 const VectorClock* const vc2)
133{
134 return vc_lte(vc1, vc2) || vc_lte(vc2, vc1);
135}
136
bart5bd9f2d2008-03-03 20:31:58 +0000137/** Compute elementwise minimum. */
sewardjaf44c822007-11-25 14:01:38 +0000138void vc_min(VectorClock* const result,
139 const VectorClock* const rhs)
140{
141 unsigned i;
142 unsigned j;
143 unsigned shared;
144 unsigned new_size;
145
146 tl_assert(result);
147 tl_assert(rhs);
148
bart5bd9f2d2008-03-03 20:31:58 +0000149 /* First count the number of shared thread ID's. */
sewardjaf44c822007-11-25 14:01:38 +0000150 j = 0;
151 shared = 0;
152 for (i = 0; i < result->size; i++)
153 {
154 while (j < rhs->size && rhs->vc[j].threadid < result->vc[i].threadid)
155 j++;
156 if (j >= rhs->size)
157 break;
158 if (result->vc[i].threadid == rhs->vc[j].threadid)
159 shared++;
160 }
161
162 vc_check(result);
163
164 new_size = result->size + rhs->size - shared;
165 if (new_size > result->capacity)
166 vc_reserve(result, new_size);
167
168 vc_check(result);
169
bart5bd9f2d2008-03-03 20:31:58 +0000170 /* Next, combine both vector clocks into one. */
sewardjaf44c822007-11-25 14:01:38 +0000171 i = 0;
172 for (j = 0; j < rhs->size; j++)
173 {
174 vc_check(result);
175
176 while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
bart5bd9f2d2008-03-03 20:31:58 +0000177 {
178 /* Thread ID is missing in second vector clock. Clear the count. */
179 result->vc[i].count = 0;
sewardjaf44c822007-11-25 14:01:38 +0000180 i++;
bart5bd9f2d2008-03-03 20:31:58 +0000181 }
sewardjaf44c822007-11-25 14:01:38 +0000182 if (i >= result->size)
183 {
184 result->size++;
185 result->vc[i] = rhs->vc[j];
186 vc_check(result);
187 }
188 else if (result->vc[i].threadid > rhs->vc[j].threadid)
189 {
bart5bd9f2d2008-03-03 20:31:58 +0000190 /* Thread ID is missing in first vector clock. Leave out. */
sewardjaf44c822007-11-25 14:01:38 +0000191 }
192 else
193 {
bart5bd9f2d2008-03-03 20:31:58 +0000194 /* The thread ID is present in both vector clocks. Compute the minimum */
195 /* of vc[i].count and vc[j].count. */
sewardjaf44c822007-11-25 14:01:38 +0000196 tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
197 if (rhs->vc[j].count < result->vc[i].count)
198 {
199 result->vc[i].count = rhs->vc[j].count;
200 }
201 vc_check(result);
202 }
203 }
204 vc_check(result);
sewardjaf44c822007-11-25 14:01:38 +0000205}
206
207/**
208 * Compute elementwise maximum.
209 */
210void vc_combine(VectorClock* const result,
211 const VectorClock* const rhs)
212{
213 unsigned i;
214 unsigned j;
215 unsigned shared;
216 unsigned new_size;
217
218 tl_assert(result);
219 tl_assert(rhs);
220
221 // First count the number of shared thread id's.
222 j = 0;
223 shared = 0;
224 for (i = 0; i < result->size; i++)
225 {
226 while (j < rhs->size && rhs->vc[j].threadid < result->vc[i].threadid)
227 j++;
228 if (j >= rhs->size)
229 break;
230 if (result->vc[i].threadid == rhs->vc[j].threadid)
231 shared++;
232 }
233
234 vc_check(result);
235
236 new_size = result->size + rhs->size - shared;
237 if (new_size > result->capacity)
238 vc_reserve(result, new_size);
239
240 vc_check(result);
241
242 // Next, combine both vector clocks into one.
243 i = 0;
244 for (j = 0; j < rhs->size; j++)
245 {
246 vc_check(result);
247
248 while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
249 i++;
250 if (i >= result->size)
251 {
252 result->size++;
253 result->vc[i] = rhs->vc[j];
254 vc_check(result);
255 }
256 else if (result->vc[i].threadid > rhs->vc[j].threadid)
257 {
258 unsigned k;
259 for (k = result->size; k > i; k--)
260 {
261 result->vc[k] = result->vc[k - 1];
262 }
263 result->size++;
264 result->vc[i] = rhs->vc[j];
265 vc_check(result);
266 }
267 else
268 {
269 tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
270 if (rhs->vc[j].count > result->vc[i].count)
271 {
272 result->vc[i].count = rhs->vc[j].count;
273 }
274 vc_check(result);
275 }
276 }
277 vc_check(result);
278 tl_assert(result->size == new_size);
279}
280
281void vc_print(const VectorClock* const vc)
282{
283 unsigned i;
284
285 tl_assert(vc);
286 VG_(printf)("[");
287 for (i = 0; i < vc->size; i++)
288 {
289 tl_assert(vc->vc);
290 VG_(printf)("%s %d: %d", i > 0 ? "," : "",
291 vc->vc[i].threadid, vc->vc[i].count);
292 }
293 VG_(printf)(" ]");
294}
295
296void vc_snprint(Char* const str, Int const size,
297 const VectorClock* const vc)
298{
299 unsigned i;
300
301 tl_assert(vc);
302 VG_(snprintf)(str, size, "[");
303 for (i = 0; i < vc->size; i++)
304 {
305 tl_assert(vc->vc);
306 VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str),
307 "%s %d: %d", i > 0 ? "," : "",
308 vc->vc[i].threadid, vc->vc[i].count);
309 }
310 VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str), " ]");
311}
312
313/**
314 * Invariant test.
315 */
316void vc_check(const VectorClock* const vc)
317{
318 unsigned i;
319 tl_assert(vc->size <= vc->capacity);
320 for (i = 1; i < vc->size; i++)
321 {
322 tl_assert(vc->vc[i-1].threadid < vc->vc[i].threadid);
323 }
324}
325
326/**
327 * Change the size of the memory block pointed at by vc->vc.
328 * Changes capacity, but does not change size. If the size of the memory
329 * block is increased, the newly allocated memory is not initialized.
330 */
331static
332void vc_reserve(VectorClock* const vc, const unsigned new_capacity)
333{
334 tl_assert(vc);
335 if (new_capacity > vc->capacity)
336 {
337 if (vc->vc)
338 {
339 vc->vc = VG_(realloc)(vc->vc, new_capacity * sizeof(vc->vc[0]));
340 }
341 else if (new_capacity > 0)
342 {
343 vc->vc = VG_(malloc)(new_capacity * sizeof(vc->vc[0]));
344 }
345 else
346 {
347 tl_assert(vc->vc == 0 && new_capacity == 0);
348 }
349 vc->capacity = new_capacity;
350 }
351 tl_assert(new_capacity == 0 || vc->vc != 0);
352}
353
354/**
355 * Unit test.
356 */
357void vc_test(void)
358{
359 VectorClock vc1;
360 VCElem vc1elem[] = { { 3, 7 }, { 5, 8 }, };
361 VectorClock vc2;
362 VCElem vc2elem[] = { { 1, 4 }, { 3, 9 }, };
363 VectorClock vc3;
364 VCElem vc4elem[] = { { 1, 3 }, { 2, 1 }, };
365 VectorClock vc4;
366 VCElem vc5elem[] = { { 1, 4 }, };
367 VectorClock vc5;
368
369 vc_init(&vc1, vc1elem, sizeof(vc1elem)/sizeof(vc1elem[0]));
370 vc_init(&vc2, vc2elem, sizeof(vc2elem)/sizeof(vc2elem[0]));
371 vc_init(&vc3, 0, 0);
372 vc_init(&vc4, vc4elem, sizeof(vc4elem)/sizeof(vc4elem[0]));
373 vc_init(&vc5, vc5elem, sizeof(vc5elem)/sizeof(vc5elem[0]));
374
375 vc_combine(&vc3, &vc1);
376 vc_combine(&vc3, &vc2);
377
378 VG_(printf)("vc1: ");
379 vc_print(&vc1);
380 VG_(printf)("\nvc2: ");
381 vc_print(&vc2);
382 VG_(printf)("\nvc3: ");
383 vc_print(&vc3);
384 VG_(printf)("\n");
385 VG_(printf)("vc_lte(vc1, vc2) = %d, vc_lte(vc1, vc3) = %d, vc_lte(vc2, vc3) = %d, vc_lte(", vc_lte(&vc1, &vc2), vc_lte(&vc1, &vc3), vc_lte(&vc2, &vc3));
386 vc_print(&vc4);
387 VG_(printf)(", ");
388 vc_print(&vc5);
389 VG_(printf)(") = %d sw %d\n", vc_lte(&vc4, &vc5), vc_lte(&vc5, &vc4));
390
391 vc_cleanup(&vc1);
392 vc_cleanup(&vc2);
393 vc_cleanup(&vc3);
394}