sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | ---------------------------------------------------------------- |
| 3 | |
| 4 | Notice that the above BSD-style license applies to this one file |
| 5 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 6 | the terms of the GNU General Public License, version 2. See the |
| 7 | COPYING file in the source distribution for details. |
| 8 | |
| 9 | ---------------------------------------------------------------- |
| 10 | |
| 11 | This file is part of Helgrind, a Valgrind tool for detecting errors |
| 12 | in threaded programs. |
| 13 | |
njn | 9f20746 | 2009-03-10 22:02:09 +0000 | [diff] [blame] | 14 | Copyright (C) 2007-2009 OpenWorks LLP |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 15 | info@open-works.co.uk |
| 16 | |
| 17 | Redistribution and use in source and binary forms, with or without |
| 18 | modification, are permitted provided that the following conditions |
| 19 | are met: |
| 20 | |
| 21 | 1. Redistributions of source code must retain the above copyright |
| 22 | notice, this list of conditions and the following disclaimer. |
| 23 | |
| 24 | 2. The origin of this software must not be misrepresented; you must |
| 25 | not claim that you wrote the original software. If you use this |
| 26 | software in a product, an acknowledgment in the product |
| 27 | documentation would be appreciated but is not required. |
| 28 | |
| 29 | 3. Altered source versions must be plainly marked as such, and must |
| 30 | not be misrepresented as being the original software. |
| 31 | |
| 32 | 4. The name of the author may not be used to endorse or promote |
| 33 | products derived from this software without specific prior written |
| 34 | permission. |
| 35 | |
| 36 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS |
| 37 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 38 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 39 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
| 40 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 41 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE |
| 42 | GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 43 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 44 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 45 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 46 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 47 | |
| 48 | ---------------------------------------------------------------- |
| 49 | |
| 50 | Notice that the above BSD-style license applies to this one file |
| 51 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 52 | the terms of the GNU General Public License, version 2. See the |
| 53 | COPYING file in the source distribution for details. |
| 54 | |
| 55 | ---------------------------------------------------------------- |
| 56 | */ |
| 57 | |
| 58 | #ifndef __HELGRIND_H |
| 59 | #define __HELGRIND_H |
| 60 | |
| 61 | #include "valgrind.h" |
| 62 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame^] | 63 | /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! |
| 64 | This enum comprises an ABI exported by Valgrind to programs |
| 65 | which use client requests. DO NOT CHANGE THE ORDER OF THESE |
| 66 | ENTRIES, NOR DELETE ANY -- add new ones at the end. */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 67 | typedef |
| 68 | enum { |
| 69 | VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), |
| 70 | |
| 71 | /* The rest are for Helgrind's internal use. Not for end-user |
| 72 | use. Do not use them unless you are a Valgrind developer. */ |
| 73 | |
| 74 | /* Notify the tool what this thread's pthread_t is. */ |
| 75 | _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G') |
| 76 | + 256, |
| 77 | _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */ |
| 78 | _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */ |
| 79 | _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */ |
| 80 | _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t* */ |
| 81 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */ |
| 82 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */ |
| 83 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */ |
| 84 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t* */ |
| 85 | _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */ |
| 86 | _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */ |
| 87 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */ |
| 88 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */ |
sewardj | f98e1c0 | 2008-10-25 16:22:41 +0000 | [diff] [blame] | 89 | _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t* */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 90 | _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */ |
| 91 | _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */ |
| 92 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */ |
| 93 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*, long isW */ |
| 94 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */ |
| 95 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */ |
sewardj | 11e352f | 2007-11-30 11:11:02 +0000 | [diff] [blame] | 96 | _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */ |
| 97 | _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */ |
| 98 | _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */ |
| 99 | _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t* */ |
sewardj | 9f569b7 | 2008-11-13 13:33:09 +0000 | [diff] [blame] | 100 | _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong */ |
| 101 | _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 102 | _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */ |
| 103 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */ |
| 104 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */ |
| 105 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */ |
| 106 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame^] | 107 | _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */ |
| 108 | _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */ |
| 109 | _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */ |
| 110 | _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */ |
| 111 | _VG_USERREQ__HG_RESERVED1, /* Do not use */ |
| 112 | _VG_USERREQ__HG_RESERVED2 /* Do not use */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 113 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 114 | } Vg_TCheckClientRequest; |
| 115 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame^] | 116 | |
| 117 | /*----------------------------------------------------------------*/ |
| 118 | /*--- An implementation-only request -- not for end user use ---*/ |
| 119 | /*----------------------------------------------------------------*/ |
| 120 | |
| 121 | #define _HG_CLIENTREQ_UNIMP(_qzz_str) \ |
| 122 | do { \ |
| 123 | unsigned long _qzz_res; \ |
| 124 | VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ |
| 125 | _VG_USERREQ__HG_CLIENTREQ_UNIMP, \ |
| 126 | _qzz_str, 0, 0, 0, 0); \ |
| 127 | (void)0; \ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 128 | } while(0) |
| 129 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame^] | 130 | |
| 131 | /*----------------------------------------------------------------*/ |
| 132 | /*--- Misc requests ---*/ |
| 133 | /*----------------------------------------------------------------*/ |
| 134 | |
| 135 | /* Clean memory state. This makes Helgrind forget everything it knew |
| 136 | about the specified memory range. Effectively this announces that |
| 137 | the specified memory range now "belongs" to the calling thread, so |
| 138 | that: (1) the calling thread can access it safely without |
| 139 | synchronisation, and (2) all other threads must sync with this one |
| 140 | to access it safely. This is particularly useful for memory |
| 141 | allocators that wish to recycle memory. */ |
| 142 | #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \ |
| 143 | do { \ |
| 144 | unsigned long _qzz_res; \ |
| 145 | VALGRIND_DO_CLIENT_REQUEST( \ |
| 146 | (_qzz_res), 0, VG_USERREQ__HG_CLEAN_MEMORY, \ |
| 147 | (_qzz_start), (_qzz_len), 0, 0, 0 \ |
| 148 | ); \ |
| 149 | (void)0; \ |
| 150 | } while(0) |
| 151 | |
| 152 | |
| 153 | /*----------------------------------------------------------------*/ |
| 154 | /*--- ThreadSanitizer-compatible requests ---*/ |
| 155 | /*----------------------------------------------------------------*/ |
| 156 | |
| 157 | /* A quite-broad set of annotations, as used in the ThreadSanitizer |
| 158 | project. This implementation aims to be a (source-level) |
| 159 | compatible implementation of the macros defined in: |
| 160 | |
| 161 | http://code.google.com/p/google-perftools/source \ |
| 162 | /browse/trunk/src/base/dynamic_annotations.h |
| 163 | |
| 164 | (some of the comments below are taken from the above file) |
| 165 | |
| 166 | The implementation here is very incomplete, and intended as a |
| 167 | starting point. Many of the macros are unimplemented. Rather than |
| 168 | allowing unimplemented macros to silently do nothing, they cause an |
| 169 | assertion. Intention is to implement them on demand. |
| 170 | |
| 171 | The major use of these macros is to make visible to race detectors, |
| 172 | the behaviour (effects) of user-implemented synchronisation |
| 173 | primitives, that the detectors could not otherwise deduce from the |
| 174 | normal observation of pthread etc calls. |
| 175 | |
| 176 | Some of the macros are no-ops in Helgrind. That's because Helgrind |
| 177 | is a pure happens-before detector, whereas ThreadSanitizer uses a |
| 178 | hybrid lockset and happens-before scheme, which requires more |
| 179 | accurate annotations for correct operation. |
| 180 | |
| 181 | The macros are listed in the same order as in dynamic_annotations.h |
| 182 | (URL just above). |
| 183 | |
| 184 | I should point out that I am less than clear about the intended |
| 185 | semantics of quite a number of them. Comments and clarifications |
| 186 | welcomed! |
| 187 | */ |
| 188 | |
| 189 | /* ---------------------------------------------------------------- |
| 190 | These four allow description of user-level condition variables, |
| 191 | apparently in the style of POSIX's pthread_cond_t. Currently |
| 192 | unimplemented and will assert. |
| 193 | ---------------------------------------------------------------- |
| 194 | */ |
| 195 | /* Report that wait on the condition variable at address CV has |
| 196 | succeeded and the lock at address LOCK is now held. CV and LOCK |
| 197 | are completely arbitrary memory addresses which presumably mean |
| 198 | something to the application, but are meaningless to Helgrind. */ |
| 199 | #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ |
| 200 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT") |
| 201 | |
| 202 | /* Report that wait on the condition variable at CV has succeeded. |
| 203 | Variant w/o lock. */ |
| 204 | #define ANNOTATE_CONDVAR_WAIT(cv) \ |
| 205 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT") |
| 206 | |
| 207 | /* Report that we are about to signal on the condition variable at |
| 208 | address CV. */ |
| 209 | #define ANNOTATE_CONDVAR_SIGNAL(cv) \ |
| 210 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL") |
| 211 | |
| 212 | /* Report that we are about to signal_all on the condition variable at |
| 213 | CV. */ |
| 214 | #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ |
| 215 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL") |
| 216 | |
| 217 | |
| 218 | /* ---------------------------------------------------------------- |
| 219 | Create completely arbitrary happens-before edges between threads. |
| 220 | If thread T1 does ANNOTATE_HAPPENS_BEFORE(obj) and later (w.r.t. |
| 221 | some notional global clock for the computation) thread T2 does |
| 222 | ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all memory |
| 223 | accesses done by T1 before the ..BEFORE.. call as happening-before |
| 224 | all memory accesses done by T2 after the ..AFTER.. call. Hence |
| 225 | Helgrind won't complain about races if T2's accesses afterwards are |
| 226 | to the same locations as T1's accesses before. |
| 227 | |
| 228 | OBJ is a machine word (unsigned long, or void*), is completely |
| 229 | arbitrary, and denotes the identity of some synchronisation object |
| 230 | you're modelling. |
| 231 | |
| 232 | You must do the _BEFORE call just before the real sync event on the |
| 233 | signaller's side, and _AFTER just after the real sync event on the |
| 234 | waiter's side. |
| 235 | |
| 236 | If none of the rest of these macros make sense to you, at least |
| 237 | take the time to understand these two. They form the very essence |
| 238 | of describing arbitrary inter-thread synchronisation events to |
| 239 | Helgrind. You can get a long way just with them alone. |
| 240 | ---------------------------------------------------------------- |
| 241 | */ |
| 242 | #define ANNOTATE_HAPPENS_BEFORE(obj) \ |
| 243 | do { \ |
| 244 | unsigned long _qzz_res; \ |
| 245 | VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ |
| 246 | _VG_USERREQ__HG_USERSO_SEND_PRE, \ |
| 247 | obj, 0, 0, 0, 0); \ |
| 248 | (void)0; \ |
| 249 | } while (0) |
| 250 | |
| 251 | #define ANNOTATE_HAPPENS_AFTER(obj) \ |
| 252 | do { \ |
| 253 | unsigned long _qzz_res; \ |
| 254 | VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \ |
| 255 | _VG_USERREQ__HG_USERSO_RECV_POST, \ |
| 256 | obj, 0, 0, 0, 0); \ |
| 257 | (void)0; \ |
| 258 | } while (0) |
| 259 | |
| 260 | |
| 261 | /* ---------------------------------------------------------------- |
| 262 | Memory publishing. The TSan sources say: |
| 263 | |
| 264 | Report that the bytes in the range [pointer, pointer+size) are about |
| 265 | to be published safely. The race checker will create a happens-before |
| 266 | arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to |
| 267 | subsequent accesses to this memory. |
| 268 | |
| 269 | I'm not sure I understand what this means exactly, nor whether it |
| 270 | is relevant for a pure h-b detector. Leaving unimplemented for |
| 271 | now. |
| 272 | ---------------------------------------------------------------- |
| 273 | */ |
| 274 | #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ |
| 275 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE") |
| 276 | |
| 277 | |
| 278 | /* ---------------------------------------------------------------- |
| 279 | TSan sources say: |
| 280 | |
| 281 | Instruct the tool to create a happens-before arc between |
| 282 | MU->Unlock() and MU->Lock(). This annotation may slow down the |
| 283 | race detector; normally it is used only when it would be |
| 284 | difficult to annotate each of the mutex's critical sections |
| 285 | individually using the annotations above. |
| 286 | |
| 287 | If MU is a posix pthread_mutex_t then Helgrind will do this anyway. |
| 288 | In any case, leave as unimp for now. I'm unsure about the intended |
| 289 | behaviour. |
| 290 | ---------------------------------------------------------------- |
| 291 | */ |
| 292 | #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) \ |
| 293 | _HG_CLIENTREQ_UNIMP("ANNOTATE_MUTEX_IS_USED_AS_CONDVAR") |
| 294 | |
| 295 | |
| 296 | /* ---------------------------------------------------------------- |
| 297 | TSan sources say: |
| 298 | |
| 299 | Annotations useful when defining memory allocators, or when |
| 300 | memory that was protected in one way starts to be protected in |
| 301 | another. |
| 302 | |
| 303 | Report that a new memory at "address" of size "size" has been |
| 304 | allocated. This might be used when the memory has been retrieved |
| 305 | from a free list and is about to be reused, or when a the locking |
| 306 | discipline for a variable changes. |
| 307 | |
| 308 | AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY. |
| 309 | ---------------------------------------------------------------- |
| 310 | */ |
| 311 | #define ANNOTATE_NEW_MEMORY(address, size) \ |
| 312 | VALGRIND_HG_CLEAN_MEMORY((address), (size)) |
| 313 | |
| 314 | |
| 315 | /* ---------------------------------------------------------------- |
| 316 | TSan sources say: |
| 317 | |
| 318 | Annotations useful when defining FIFO queues that transfer data |
| 319 | between threads. |
| 320 | |
| 321 | All unimplemented. Am not claiming to understand this (yet). |
| 322 | ---------------------------------------------------------------- |
| 323 | */ |
| 324 | |
| 325 | /* Report that the producer-consumer queue object at address PCQ has |
| 326 | been created. The ANNOTATE_PCQ_* annotations should be used only |
| 327 | for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE |
| 328 | (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ |
| 329 | #define ANNOTATE_PCQ_CREATE(pcq) \ |
| 330 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE") |
| 331 | |
| 332 | /* Report that the queue at address PCQ is about to be destroyed. */ |
| 333 | #define ANNOTATE_PCQ_DESTROY(pcq) \ |
| 334 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY") |
| 335 | |
| 336 | /* Report that we are about to put an element into a FIFO queue at |
| 337 | address PCQ. */ |
| 338 | #define ANNOTATE_PCQ_PUT(pcq) \ |
| 339 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT") |
| 340 | |
| 341 | /* Report that we've just got an element from a FIFO queue at address |
| 342 | PCQ. */ |
| 343 | #define ANNOTATE_PCQ_GET(pcq) \ |
| 344 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET") |
| 345 | |
| 346 | |
| 347 | /* ---------------------------------------------------------------- |
| 348 | Annotations that suppress errors. It is usually better to express |
| 349 | the program's synchronization using the other annotations, but |
| 350 | these can be used when all else fails. |
| 351 | |
| 352 | Currently these are all unimplemented. I can't think of a simple |
| 353 | way to implement them without at least some performance overhead. |
| 354 | ---------------------------------------------------------------- |
| 355 | */ |
| 356 | |
| 357 | /* Report that we may have a benign race on ADDRESS. Insert at the |
| 358 | point where ADDRESS has been allocated, preferably close to the |
| 359 | point where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. |
| 360 | |
| 361 | XXX: what's this actually supposed to do? And what's the type of |
| 362 | DESCRIPTION? When does the annotation stop having an effect? |
| 363 | */ |
| 364 | #define ANNOTATE_BENIGN_RACE(address, description) \ |
| 365 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE") |
| 366 | |
| 367 | |
| 368 | /* Request the analysis tool to ignore all reads in the current thread |
| 369 | until ANNOTATE_IGNORE_READS_END is called. Useful to ignore |
| 370 | intentional racey reads, while still checking other reads and all |
| 371 | writes. */ |
| 372 | #define ANNOTATE_IGNORE_READS_BEGIN() \ |
| 373 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN") |
| 374 | |
| 375 | /* Stop ignoring reads. */ |
| 376 | #define ANNOTATE_IGNORE_READS_END() \ |
| 377 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END") |
| 378 | |
| 379 | /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ |
| 380 | #define ANNOTATE_IGNORE_WRITES_BEGIN() \ |
| 381 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN") |
| 382 | |
| 383 | /* Stop ignoring writes. */ |
| 384 | #define ANNOTATE_IGNORE_WRITES_END() \ |
| 385 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END") |
| 386 | |
| 387 | /* Start ignoring all memory accesses (reads and writes). */ |
| 388 | #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ |
| 389 | do { \ |
| 390 | ANNOTATE_IGNORE_READS_BEGIN(); \ |
| 391 | ANNOTATE_IGNORE_WRITES_BEGIN(); \ |
| 392 | } while (0) |
| 393 | |
| 394 | /* Stop ignoring all memory accesses. */ |
| 395 | #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ |
| 396 | do { \ |
| 397 | ANNOTATE_IGNORE_WRITES_END(); \ |
| 398 | ANNOTATE_IGNORE_READS_END(); \ |
| 399 | } while (0) |
| 400 | |
| 401 | |
| 402 | /* ---------------------------------------------------------------- |
| 403 | Annotations useful for debugging. |
| 404 | |
| 405 | Again, so for unimplemented, partly for performance reasons. |
| 406 | ---------------------------------------------------------------- |
| 407 | */ |
| 408 | |
| 409 | /* Request to trace every access to ADDRESS. */ |
| 410 | #define ANNOTATE_TRACE_MEMORY(address) \ |
| 411 | _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY") |
| 412 | |
| 413 | /* Report the current thread name to a race detector. */ |
| 414 | #define ANNOTATE_THREAD_NAME(name) \ |
| 415 | _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME") |
| 416 | |
| 417 | |
| 418 | /* ---------------------------------------------------------------- |
| 419 | Annotations for describing behaviour of user-implemented lock |
| 420 | primitives. In all cases, the LOCK argument is a completely |
| 421 | arbitrary machine word (unsigned long, or void*) and can be any |
| 422 | value which gives a unique identity to the lock objects being |
| 423 | modelled. |
| 424 | |
| 425 | We just pretend they're ordinary posix rwlocks. That'll probably |
| 426 | give some rather confusing wording in error messages, claiming that |
| 427 | the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact |
| 428 | they are not. Ah well. |
| 429 | ---------------------------------------------------------------- |
| 430 | */ |
| 431 | /* Report that a lock has just been created at address LOCK. */ |
| 432 | #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 433 | do { \ |
| 434 | unsigned long _qzz_res; \ |
| 435 | VALGRIND_DO_CLIENT_REQUEST( \ |
| 436 | _qzz_res, 0, _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \ |
| 437 | lock, 0, 0, 0, 0 \ |
| 438 | ); \ |
| 439 | (void)0; \ |
| 440 | } while(0) |
| 441 | |
| 442 | /* Report that the lock at address LOCK is about to be destroyed. */ |
| 443 | #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 444 | do { \ |
| 445 | unsigned long _qzz_res; \ |
| 446 | VALGRIND_DO_CLIENT_REQUEST( \ |
| 447 | _qzz_res, 0, _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \ |
| 448 | lock, 0, 0, 0, 0 \ |
| 449 | ); \ |
| 450 | (void)0; \ |
| 451 | } while(0) |
| 452 | |
| 453 | /* Report that the lock at address LOCK has just been acquired. |
| 454 | is_w=1 for writer lock, is_w=0 for reader lock. */ |
| 455 | #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
| 456 | do { \ |
| 457 | unsigned long _qzz_res; \ |
| 458 | VALGRIND_DO_CLIENT_REQUEST( \ |
| 459 | _qzz_res, 0, _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, \ |
| 460 | lock, is_w ? 1 : 0, 0, 0, 0 \ |
| 461 | ); \ |
| 462 | (void)0; \ |
| 463 | } while(0) |
| 464 | |
| 465 | /* Report that the lock at address LOCK is about to be released. */ |
| 466 | #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
| 467 | do { \ |
| 468 | unsigned long _qzz_res; \ |
| 469 | VALGRIND_DO_CLIENT_REQUEST( \ |
| 470 | _qzz_res, 0, _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, \ |
| 471 | lock, 0, 0, 0, 0 \ |
| 472 | ); \ |
| 473 | (void)0; \ |
| 474 | } while(0) |
| 475 | |
| 476 | |
| 477 | /* ---------------------------------------------------------------- |
| 478 | Annotations useful for testing race detectors. |
| 479 | ---------------------------------------------------------------- |
| 480 | */ |
| 481 | |
| 482 | /* Report that we expect a race on the variable at ADDRESS. Use only |
| 483 | in unit tests for a race detector. */ |
| 484 | #define ANNOTATE_EXPECT_RACE(address, description) \ |
| 485 | _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE") |
| 486 | |
| 487 | /* A no-op. Insert where you like to test the interceptors. */ |
| 488 | #define ANNOTATE_NO_OP(arg) \ |
| 489 | _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP") |
| 490 | |
| 491 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 492 | #endif /* __HELGRIND_H */ |