blob: 26c6e5ca375cc69c7b3f0b597eb57528b391000d [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_vc.h"
27#include "pub_tool_basics.h" // Addr, SizeT
28#include "pub_tool_libcassert.h" // tl_assert()
29#include "pub_tool_libcbase.h" // VG_(memset), VG_(memmove)
30#include "pub_tool_libcprint.h" // VG_(printf)
31#include "pub_tool_mallocfree.h" // VG_(malloc), VG_(free)
32#include "pub_tool_threadstate.h" // VG_(get_running_tid)
33
34
35static
36void vc_reserve(VectorClock* const vc, const unsigned new_capacity);
37
38
39void vc_init(VectorClock* const vc,
40 const VCElem* const vcelem,
41 const unsigned size)
42{
43 tl_assert(vc);
44 vc->size = 0;
45 vc->capacity = 0;
46 vc->vc = 0;
47 vc_reserve(vc, size);
48 tl_assert(size == 0 || vc->vc != 0);
49 if (vcelem)
50 {
51 VG_(memcpy)(vc->vc, vcelem, size * sizeof(vcelem[0]));
52 vc->size = size;
53 }
54}
55
56void vc_cleanup(VectorClock* const vc)
57{
58 vc_reserve(vc, 0);
59}
60
bartc46c2322008-02-24 18:26:46 +000061/** Copy constructor -- initializes *new. */
sewardjaf44c822007-11-25 14:01:38 +000062void vc_copy(VectorClock* const new,
63 const VectorClock* const rhs)
64{
65 vc_init(new, rhs->vc, rhs->size);
66}
67
bartc46c2322008-02-24 18:26:46 +000068/** Assignment operator -- *lhs is already a valid vector clock. */
69void vc_assign(VectorClock* const lhs,
70 const VectorClock* const rhs)
71{
72 vc_cleanup(lhs);
73 vc_copy(lhs, rhs);
74}
75
sewardjaf44c822007-11-25 14:01:38 +000076void vc_increment(VectorClock* const vc, ThreadId const threadid)
77{
78 unsigned i;
79 for (i = 0; i < vc->size; i++)
80 {
81 if (vc->vc[i].threadid == threadid)
82 {
83 typeof(vc->vc[i].count) const oldcount = vc->vc[i].count;
84 vc->vc[i].count++;
85 // Check for integer overflow.
86 tl_assert(oldcount < vc->vc[i].count);
87 return;
88 }
89 }
90
91 // The specified thread ID does not yet exist in the vector clock
92 // -- insert it.
93 {
94 VCElem vcelem = { threadid, 1 };
95 VectorClock vc2;
96 vc_init(&vc2, &vcelem, 1);
97 vc_combine(vc, &vc2);
98 vc_cleanup(&vc2);
99 }
100}
101
102/**
103 * @return True if all thread id's that are present in vc1 also exist in
104 * vc2, and if additionally all corresponding counters in v2 are higher or
105 * equal.
106 */
107Bool vc_lte(const VectorClock* const vc1,
108 const VectorClock* const vc2)
109{
110 unsigned i;
111 unsigned j = 0;
112 for (i = 0; i < vc1->size; i++)
113 {
114 while (j < vc2->size && vc2->vc[j].threadid < vc1->vc[i].threadid)
115 {
116 j++;
117 }
118 if (j >= vc2->size || vc2->vc[j].threadid > vc1->vc[i].threadid)
119 return False;
120 tl_assert(j < vc2->size && vc2->vc[j].threadid == vc1->vc[i].threadid);
121 if (vc1->vc[i].count > vc2->vc[j].count)
122 return False;
123 }
124 return True;
125}
126
127/**
128 * @return True if vector clocks vc1 and vc2 are ordered, and false otherwise.
129 * Order is as imposed by thread synchronization actions ("happens before").
130 */
131Bool vc_ordered(const VectorClock* const vc1,
132 const VectorClock* const vc2)
133{
134 return vc_lte(vc1, vc2) || vc_lte(vc2, vc1);
135}
136
137/**
138 * Compute elementwise minimum.
139 */
140void vc_min(VectorClock* const result,
141 const VectorClock* const rhs)
142{
143 unsigned i;
144 unsigned j;
145 unsigned shared;
146 unsigned new_size;
147
148 tl_assert(result);
149 tl_assert(rhs);
150
151 // First count the number of shared thread id's.
152 j = 0;
153 shared = 0;
154 for (i = 0; i < result->size; i++)
155 {
156 while (j < rhs->size && rhs->vc[j].threadid < result->vc[i].threadid)
157 j++;
158 if (j >= rhs->size)
159 break;
160 if (result->vc[i].threadid == rhs->vc[j].threadid)
161 shared++;
162 }
163
164 vc_check(result);
165
166 new_size = result->size + rhs->size - shared;
167 if (new_size > result->capacity)
168 vc_reserve(result, new_size);
169
170 vc_check(result);
171
172 // Next, combine both vector clocks into one.
173 i = 0;
174 for (j = 0; j < rhs->size; j++)
175 {
176 vc_check(result);
177
178 while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
179 i++;
180 if (i >= result->size)
181 {
182 result->size++;
183 result->vc[i] = rhs->vc[j];
184 vc_check(result);
185 }
186 else if (result->vc[i].threadid > rhs->vc[j].threadid)
187 {
188 unsigned k;
189 for (k = result->size; k > i; k--)
190 {
191 result->vc[k] = result->vc[k - 1];
192 }
193 result->size++;
194 result->vc[i] = rhs->vc[j];
195 vc_check(result);
196 }
197 else
198 {
199 tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
200 if (rhs->vc[j].count < result->vc[i].count)
201 {
202 result->vc[i].count = rhs->vc[j].count;
203 }
204 vc_check(result);
205 }
206 }
207 vc_check(result);
208 tl_assert(result->size == new_size);
209}
210
211/**
212 * Compute elementwise maximum.
213 */
214void vc_combine(VectorClock* const result,
215 const VectorClock* const rhs)
216{
217 unsigned i;
218 unsigned j;
219 unsigned shared;
220 unsigned new_size;
221
222 tl_assert(result);
223 tl_assert(rhs);
224
225 // First count the number of shared thread id's.
226 j = 0;
227 shared = 0;
228 for (i = 0; i < result->size; i++)
229 {
230 while (j < rhs->size && rhs->vc[j].threadid < result->vc[i].threadid)
231 j++;
232 if (j >= rhs->size)
233 break;
234 if (result->vc[i].threadid == rhs->vc[j].threadid)
235 shared++;
236 }
237
238 vc_check(result);
239
240 new_size = result->size + rhs->size - shared;
241 if (new_size > result->capacity)
242 vc_reserve(result, new_size);
243
244 vc_check(result);
245
246 // Next, combine both vector clocks into one.
247 i = 0;
248 for (j = 0; j < rhs->size; j++)
249 {
250 vc_check(result);
251
252 while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
253 i++;
254 if (i >= result->size)
255 {
256 result->size++;
257 result->vc[i] = rhs->vc[j];
258 vc_check(result);
259 }
260 else if (result->vc[i].threadid > rhs->vc[j].threadid)
261 {
262 unsigned k;
263 for (k = result->size; k > i; k--)
264 {
265 result->vc[k] = result->vc[k - 1];
266 }
267 result->size++;
268 result->vc[i] = rhs->vc[j];
269 vc_check(result);
270 }
271 else
272 {
273 tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
274 if (rhs->vc[j].count > result->vc[i].count)
275 {
276 result->vc[i].count = rhs->vc[j].count;
277 }
278 vc_check(result);
279 }
280 }
281 vc_check(result);
282 tl_assert(result->size == new_size);
283}
284
285void vc_print(const VectorClock* const vc)
286{
287 unsigned i;
288
289 tl_assert(vc);
290 VG_(printf)("[");
291 for (i = 0; i < vc->size; i++)
292 {
293 tl_assert(vc->vc);
294 VG_(printf)("%s %d: %d", i > 0 ? "," : "",
295 vc->vc[i].threadid, vc->vc[i].count);
296 }
297 VG_(printf)(" ]");
298}
299
300void vc_snprint(Char* const str, Int const size,
301 const VectorClock* const vc)
302{
303 unsigned i;
304
305 tl_assert(vc);
306 VG_(snprintf)(str, size, "[");
307 for (i = 0; i < vc->size; i++)
308 {
309 tl_assert(vc->vc);
310 VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str),
311 "%s %d: %d", i > 0 ? "," : "",
312 vc->vc[i].threadid, vc->vc[i].count);
313 }
314 VG_(snprintf)(str + VG_(strlen)(str), size - VG_(strlen)(str), " ]");
315}
316
317/**
318 * Invariant test.
319 */
320void vc_check(const VectorClock* const vc)
321{
322 unsigned i;
323 tl_assert(vc->size <= vc->capacity);
324 for (i = 1; i < vc->size; i++)
325 {
326 tl_assert(vc->vc[i-1].threadid < vc->vc[i].threadid);
327 }
328}
329
330/**
331 * Change the size of the memory block pointed at by vc->vc.
332 * Changes capacity, but does not change size. If the size of the memory
333 * block is increased, the newly allocated memory is not initialized.
334 */
335static
336void vc_reserve(VectorClock* const vc, const unsigned new_capacity)
337{
338 tl_assert(vc);
339 if (new_capacity > vc->capacity)
340 {
341 if (vc->vc)
342 {
343 vc->vc = VG_(realloc)(vc->vc, new_capacity * sizeof(vc->vc[0]));
344 }
345 else if (new_capacity > 0)
346 {
347 vc->vc = VG_(malloc)(new_capacity * sizeof(vc->vc[0]));
348 }
349 else
350 {
351 tl_assert(vc->vc == 0 && new_capacity == 0);
352 }
353 vc->capacity = new_capacity;
354 }
355 tl_assert(new_capacity == 0 || vc->vc != 0);
356}
357
358/**
359 * Unit test.
360 */
361void vc_test(void)
362{
363 VectorClock vc1;
364 VCElem vc1elem[] = { { 3, 7 }, { 5, 8 }, };
365 VectorClock vc2;
366 VCElem vc2elem[] = { { 1, 4 }, { 3, 9 }, };
367 VectorClock vc3;
368 VCElem vc4elem[] = { { 1, 3 }, { 2, 1 }, };
369 VectorClock vc4;
370 VCElem vc5elem[] = { { 1, 4 }, };
371 VectorClock vc5;
372
373 vc_init(&vc1, vc1elem, sizeof(vc1elem)/sizeof(vc1elem[0]));
374 vc_init(&vc2, vc2elem, sizeof(vc2elem)/sizeof(vc2elem[0]));
375 vc_init(&vc3, 0, 0);
376 vc_init(&vc4, vc4elem, sizeof(vc4elem)/sizeof(vc4elem[0]));
377 vc_init(&vc5, vc5elem, sizeof(vc5elem)/sizeof(vc5elem[0]));
378
379 vc_combine(&vc3, &vc1);
380 vc_combine(&vc3, &vc2);
381
382 VG_(printf)("vc1: ");
383 vc_print(&vc1);
384 VG_(printf)("\nvc2: ");
385 vc_print(&vc2);
386 VG_(printf)("\nvc3: ");
387 vc_print(&vc3);
388 VG_(printf)("\n");
389 VG_(printf)("vc_lte(vc1, vc2) = %d, vc_lte(vc1, vc3) = %d, vc_lte(vc2, vc3) = %d, vc_lte(", vc_lte(&vc1, &vc2), vc_lte(&vc1, &vc3), vc_lte(&vc2, &vc3));
390 vc_print(&vc4);
391 VG_(printf)(", ");
392 vc_print(&vc5);
393 VG_(printf)(") = %d sw %d\n", vc_lte(&vc4, &vc5), vc_lte(&vc5, &vc4));
394
395 vc_cleanup(&vc1);
396 vc_cleanup(&vc2);
397 vc_cleanup(&vc3);
398}