sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | ---------------------------------------------------------------- |
| 3 | |
| 4 | Notice that the above BSD-style license applies to this one file |
| 5 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 6 | the terms of the GNU General Public License, version 2. See the |
| 7 | COPYING file in the source distribution for details. |
| 8 | |
| 9 | ---------------------------------------------------------------- |
| 10 | |
| 11 | This file is part of Helgrind, a Valgrind tool for detecting errors |
| 12 | in threaded programs. |
| 13 | |
njn | 9f20746 | 2009-03-10 22:02:09 +0000 | [diff] [blame] | 14 | Copyright (C) 2007-2009 OpenWorks LLP |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 15 | info@open-works.co.uk |
| 16 | |
| 17 | Redistribution and use in source and binary forms, with or without |
| 18 | modification, are permitted provided that the following conditions |
| 19 | are met: |
| 20 | |
| 21 | 1. Redistributions of source code must retain the above copyright |
| 22 | notice, this list of conditions and the following disclaimer. |
| 23 | |
| 24 | 2. The origin of this software must not be misrepresented; you must |
| 25 | not claim that you wrote the original software. If you use this |
| 26 | software in a product, an acknowledgment in the product |
| 27 | documentation would be appreciated but is not required. |
| 28 | |
| 29 | 3. Altered source versions must be plainly marked as such, and must |
| 30 | not be misrepresented as being the original software. |
| 31 | |
| 32 | 4. The name of the author may not be used to endorse or promote |
| 33 | products derived from this software without specific prior written |
| 34 | permission. |
| 35 | |
| 36 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS |
| 37 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 38 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 39 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
| 40 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 41 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE |
| 42 | GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 43 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 44 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 45 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 46 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 47 | |
| 48 | ---------------------------------------------------------------- |
| 49 | |
| 50 | Notice that the above BSD-style license applies to this one file |
| 51 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 52 | the terms of the GNU General Public License, version 2. See the |
| 53 | COPYING file in the source distribution for details. |
| 54 | |
| 55 | ---------------------------------------------------------------- |
| 56 | */ |
| 57 | |
| 58 | #ifndef __HELGRIND_H |
| 59 | #define __HELGRIND_H |
| 60 | |
| 61 | #include "valgrind.h" |
| 62 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 63 | /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! |
| 64 | This enum comprises an ABI exported by Valgrind to programs |
| 65 | which use client requests. DO NOT CHANGE THE ORDER OF THESE |
| 66 | ENTRIES, NOR DELETE ANY -- add new ones at the end. */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 67 | typedef |
| 68 | enum { |
| 69 | VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), |
| 70 | |
| 71 | /* The rest are for Helgrind's internal use. Not for end-user |
| 72 | use. Do not use them unless you are a Valgrind developer. */ |
| 73 | |
| 74 | /* Notify the tool what this thread's pthread_t is. */ |
| 75 | _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G') |
| 76 | + 256, |
| 77 | _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */ |
| 78 | _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */ |
| 79 | _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */ |
| 80 | _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t* */ |
| 81 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */ |
| 82 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */ |
| 83 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */ |
| 84 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t* */ |
| 85 | _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */ |
| 86 | _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */ |
| 87 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */ |
| 88 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */ |
sewardj | f98e1c0 | 2008-10-25 16:22:41 +0000 | [diff] [blame] | 89 | _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t* */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 90 | _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */ |
| 91 | _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */ |
| 92 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */ |
| 93 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*, long isW */ |
| 94 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */ |
| 95 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */ |
sewardj | 11e352f | 2007-11-30 11:11:02 +0000 | [diff] [blame] | 96 | _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */ |
| 97 | _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */ |
| 98 | _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */ |
| 99 | _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t* */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 100 | _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */ |
sewardj | 9f569b7 | 2008-11-13 13:33:09 +0000 | [diff] [blame] | 101 | _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 102 | _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */ |
| 103 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */ |
| 104 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */ |
| 105 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */ |
| 106 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 107 | _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */ |
| 108 | _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */ |
| 109 | _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */ |
| 110 | _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */ |
| 111 | _VG_USERREQ__HG_RESERVED1, /* Do not use */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 112 | _VG_USERREQ__HG_RESERVED2, /* Do not use */ |
| 113 | _VG_USERREQ__HG_RESERVED3, /* Do not use */ |
| 114 | _VG_USERREQ__HG_RESERVED4, /* Do not use */ |
| 115 | _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */ |
| 116 | _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */ |
| 117 | _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE /* pth_bar_t*, ulong */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 118 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 119 | } Vg_TCheckClientRequest; |
| 120 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 121 | |
| 122 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 123 | /*--- ---*/ |
| 124 | /*--- Implementation-only facilities. Not for end-user use. ---*/ |
| 125 | /*--- For end-user facilities see below (the next section in ---*/ |
| 126 | /*--- this file.) ---*/ |
| 127 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 128 | /*----------------------------------------------------------------*/ |
| 129 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 130 | /* Do a client request. These are macros rather than a functions so |
| 131 | as to avoid having an extra frame in stack traces. |
| 132 | |
| 133 | NB: these duplicate definitions in hg_intercepts.c. But here, we |
| 134 | have to make do with weaker typing (no definition of Word etc) and |
| 135 | no assertions, whereas in helgrind.h we can use those facilities. |
| 136 | Obviously it's important the two sets of definitions are kept in |
| 137 | sync. |
| 138 | |
| 139 | The commented-out asserts should actually hold, but unfortunately |
| 140 | they can't be allowed to be visible here, because that would |
| 141 | require the end-user code to #include <assert.h>. |
| 142 | */ |
| 143 | |
| 144 | #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \ |
| 145 | do { \ |
| 146 | long int _unused_res, _arg1; \ |
| 147 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 148 | _arg1 = (long int)(_arg1F); \ |
| 149 | VALGRIND_DO_CLIENT_REQUEST(_unused_res, 0, \ |
| 150 | (_creqF), \ |
| 151 | _arg1, 0,0,0,0); \ |
| 152 | } while (0) |
| 153 | |
| 154 | #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \ |
| 155 | do { \ |
| 156 | long int _unused_res, _arg1, _arg2; \ |
| 157 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 158 | /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ |
| 159 | _arg1 = (long int)(_arg1F); \ |
| 160 | _arg2 = (long int)(_arg2F); \ |
| 161 | VALGRIND_DO_CLIENT_REQUEST(_unused_res, 0, \ |
| 162 | (_creqF), \ |
| 163 | _arg1,_arg2,0,0,0); \ |
| 164 | } while (0) |
| 165 | |
| 166 | #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \ |
| 167 | _ty2F,_arg2F, _ty3F, _arg3F) \ |
| 168 | do { \ |
| 169 | long int _unused_res, _arg1, _arg2, _arg3; \ |
| 170 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 171 | /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ |
| 172 | /* assert(sizeof(_ty3F) == sizeof(long int)); */ \ |
| 173 | _arg1 = (long int)(_arg1F); \ |
| 174 | _arg2 = (long int)(_arg2F); \ |
| 175 | _arg3 = (long int)(_arg3F); \ |
| 176 | VALGRIND_DO_CLIENT_REQUEST(_unused_res, 0, \ |
| 177 | (_creqF), \ |
| 178 | _arg1,_arg2,_arg3,0,0); \ |
| 179 | } while (0) |
| 180 | |
| 181 | |
| 182 | #define _HG_CLIENTREQ_UNIMP(_qzz_str) \ |
| 183 | DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \ |
| 184 | (char*),(_qzz_str)) |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 185 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 186 | |
| 187 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 188 | /*--- ---*/ |
| 189 | /*--- Helgrind-native requests. These allow access to ---*/ |
| 190 | /*--- the same set of annotation primitives that are used ---*/ |
| 191 | /*--- to build the POSIX pthread wrappers. ---*/ |
| 192 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 193 | /*----------------------------------------------------------------*/ |
| 194 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 195 | /* ---------------------------------------------------------- |
| 196 | For describing ordinary mutexes (non-rwlocks). For rwlock |
| 197 | descriptions see ANNOTATE_RWLOCK_* below. |
| 198 | ---------------------------------------------------------- */ |
| 199 | |
| 200 | /* Notify here immediately after mutex creation. _mbRec == 0 for a |
| 201 | non-recursive mutex, 1 for a recursive mutex. */ |
| 202 | #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \ |
| 203 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \ |
| 204 | void*,(_mutex), long,(_mbRec)) |
| 205 | |
| 206 | /* Notify here immediately before mutex acquisition. _isTryLock == 0 |
| 207 | for a normal acquisition, 1 for a "try" style acquisition. */ |
| 208 | #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \ |
| 209 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, \ |
| 210 | void*,(_mutex), long,(_isTryLock)) |
| 211 | |
| 212 | /* Notify here immediately after a successful mutex acquisition. */ |
| 213 | #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \ |
| 214 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, \ |
| 215 | void*,(_mutex)) |
| 216 | |
| 217 | /* Notify here immediately before a mutex release. */ |
| 218 | #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \ |
| 219 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \ |
| 220 | void*,(_mutex)) |
| 221 | |
| 222 | /* Notify here immediately after a mutex release. */ |
| 223 | #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \ |
| 224 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \ |
| 225 | void*,(_mutex)) |
| 226 | |
| 227 | /* Notify here immediately before mutex destruction. */ |
| 228 | #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \ |
| 229 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \ |
| 230 | void*,(_mutex)) |
| 231 | |
| 232 | /* ---------------------------------------------------------- |
| 233 | For describing semaphores. |
| 234 | ---------------------------------------------------------- */ |
| 235 | |
| 236 | /* Notify here immediately after semaphore creation. */ |
| 237 | #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \ |
| 238 | DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \ |
| 239 | void*, (_sem), unsigned long, (_value)) |
| 240 | |
| 241 | /* Notify here immediately after a semaphore wait (an acquire-style |
| 242 | operation) */ |
| 243 | #define VALGRIND_HG_SEM_WAIT_POST(_sem) \ |
| 244 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_WAIT_POST, \ |
| 245 | void*,(_sem)) |
| 246 | |
| 247 | /* Notify here immediately before semaphore post (a release-style |
| 248 | operation) */ |
| 249 | #define VALGRIND_HG_SEM_POST_PRE(_sem) \ |
| 250 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_POST_PRE, \ |
| 251 | void*,(_sem)) |
| 252 | |
| 253 | /* Notify here immediately before semaphore destruction. */ |
| 254 | #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \ |
| 255 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \ |
| 256 | void*, (_sem)) |
| 257 | |
| 258 | /* ---------------------------------------------------------- |
| 259 | For describing barriers. |
| 260 | ---------------------------------------------------------- */ |
| 261 | |
| 262 | /* Notify here immediately before barrier creation. _count is the |
| 263 | capacity. _resizable == 0 means the barrier may not be resized, 1 |
| 264 | means it may be. */ |
| 265 | #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \ |
| 266 | DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \ |
| 267 | void*,(_bar), \ |
| 268 | unsigned long,(_count), \ |
| 269 | unsigned long,(_resizable)) |
| 270 | |
| 271 | /* Notify here immediately before arrival at a barrier. */ |
| 272 | #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \ |
| 273 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \ |
| 274 | void*,(_bar)) |
| 275 | |
| 276 | /* Notify here immediately before a resize (change of barrier |
| 277 | capacity). If _newcount >= the existing capacity, then there is no |
| 278 | change in the state of any threads waiting at the barrier. If |
| 279 | _newcount < the existing capacity, and >= _newcount threads are |
| 280 | currently waiting at the barrier, then this notification is |
| 281 | considered to also have the effect of telling the checker that all |
| 282 | waiting threads have now moved past the barrier. (I can't think of |
| 283 | any other sane semantics.) */ |
| 284 | #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \ |
| 285 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \ |
| 286 | void*,(_bar), \ |
| 287 | unsigned long,(_newcount)) |
| 288 | |
| 289 | /* Notify here immediately before barrier destruction. */ |
| 290 | #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \ |
| 291 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \ |
| 292 | void*,(_bar)) |
| 293 | |
| 294 | /* ---------------------------------------------------------- |
| 295 | For describing memory ownership changes. |
| 296 | ---------------------------------------------------------- */ |
| 297 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 298 | /* Clean memory state. This makes Helgrind forget everything it knew |
| 299 | about the specified memory range. Effectively this announces that |
| 300 | the specified memory range now "belongs" to the calling thread, so |
| 301 | that: (1) the calling thread can access it safely without |
| 302 | synchronisation, and (2) all other threads must sync with this one |
| 303 | to access it safely. This is particularly useful for memory |
| 304 | allocators that wish to recycle memory. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 305 | #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \ |
| 306 | DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \ |
| 307 | void*,(_qzz_start), \ |
| 308 | unsigned long,(_qzz_len)) |
| 309 | |
| 310 | /* ---------------------------------------------------------- |
| 311 | For error control. |
| 312 | ---------------------------------------------------------- */ |
| 313 | |
| 314 | /* Tell H that an address range is not to be "tracked" until further |
| 315 | notice. This puts it in the NOACCESS state, in which case we |
| 316 | ignore all reads and writes to it. Useful for ignoring ranges of |
| 317 | memory where there might be races we don't want to see. If the |
| 318 | memory is subsequently reallocated via malloc/new/stack allocation, |
| 319 | then it is put back in the trackable state. Hence it is safe in |
| 320 | the situation where checking is disabled, the containing area is |
| 321 | deallocated and later reallocated for some other purpose. */ |
| 322 | #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 323 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \ |
| 324 | void*,(_qzz_start), \ |
| 325 | unsigned long,(_qzz_len)) |
| 326 | |
| 327 | /* And put it back into the normal "tracked" state, that is, make it |
| 328 | once again subject to the normal race-checking machinery. This |
| 329 | puts it in the same state as new memory allocated by this thread -- |
| 330 | that is, basically owned exclusively by this thread. */ |
| 331 | #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 332 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \ |
| 333 | void*,(_qzz_start), \ |
| 334 | unsigned long,(_qzz_len)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 335 | |
| 336 | |
| 337 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 338 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 339 | /*--- ThreadSanitizer-compatible requests ---*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 340 | /*--- (mostly unimplemented) ---*/ |
| 341 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 342 | /*----------------------------------------------------------------*/ |
| 343 | |
| 344 | /* A quite-broad set of annotations, as used in the ThreadSanitizer |
| 345 | project. This implementation aims to be a (source-level) |
| 346 | compatible implementation of the macros defined in: |
| 347 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 348 | http://code.google.com/p/data-race-test/source |
| 349 | /browse/trunk/dynamic_annotations/dynamic_annotations.h |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 350 | |
| 351 | (some of the comments below are taken from the above file) |
| 352 | |
| 353 | The implementation here is very incomplete, and intended as a |
| 354 | starting point. Many of the macros are unimplemented. Rather than |
| 355 | allowing unimplemented macros to silently do nothing, they cause an |
| 356 | assertion. Intention is to implement them on demand. |
| 357 | |
| 358 | The major use of these macros is to make visible to race detectors, |
| 359 | the behaviour (effects) of user-implemented synchronisation |
| 360 | primitives, that the detectors could not otherwise deduce from the |
| 361 | normal observation of pthread etc calls. |
| 362 | |
| 363 | Some of the macros are no-ops in Helgrind. That's because Helgrind |
| 364 | is a pure happens-before detector, whereas ThreadSanitizer uses a |
| 365 | hybrid lockset and happens-before scheme, which requires more |
| 366 | accurate annotations for correct operation. |
| 367 | |
| 368 | The macros are listed in the same order as in dynamic_annotations.h |
| 369 | (URL just above). |
| 370 | |
| 371 | I should point out that I am less than clear about the intended |
| 372 | semantics of quite a number of them. Comments and clarifications |
| 373 | welcomed! |
| 374 | */ |
| 375 | |
| 376 | /* ---------------------------------------------------------------- |
| 377 | These four allow description of user-level condition variables, |
| 378 | apparently in the style of POSIX's pthread_cond_t. Currently |
| 379 | unimplemented and will assert. |
| 380 | ---------------------------------------------------------------- |
| 381 | */ |
| 382 | /* Report that wait on the condition variable at address CV has |
| 383 | succeeded and the lock at address LOCK is now held. CV and LOCK |
| 384 | are completely arbitrary memory addresses which presumably mean |
| 385 | something to the application, but are meaningless to Helgrind. */ |
| 386 | #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ |
| 387 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT") |
| 388 | |
| 389 | /* Report that wait on the condition variable at CV has succeeded. |
| 390 | Variant w/o lock. */ |
| 391 | #define ANNOTATE_CONDVAR_WAIT(cv) \ |
| 392 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT") |
| 393 | |
| 394 | /* Report that we are about to signal on the condition variable at |
| 395 | address CV. */ |
| 396 | #define ANNOTATE_CONDVAR_SIGNAL(cv) \ |
| 397 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL") |
| 398 | |
| 399 | /* Report that we are about to signal_all on the condition variable at |
| 400 | CV. */ |
| 401 | #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ |
| 402 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL") |
| 403 | |
| 404 | |
| 405 | /* ---------------------------------------------------------------- |
| 406 | Create completely arbitrary happens-before edges between threads. |
| 407 | If thread T1 does ANNOTATE_HAPPENS_BEFORE(obj) and later (w.r.t. |
| 408 | some notional global clock for the computation) thread T2 does |
| 409 | ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all memory |
| 410 | accesses done by T1 before the ..BEFORE.. call as happening-before |
| 411 | all memory accesses done by T2 after the ..AFTER.. call. Hence |
| 412 | Helgrind won't complain about races if T2's accesses afterwards are |
| 413 | to the same locations as T1's accesses before. |
| 414 | |
| 415 | OBJ is a machine word (unsigned long, or void*), is completely |
| 416 | arbitrary, and denotes the identity of some synchronisation object |
| 417 | you're modelling. |
| 418 | |
| 419 | You must do the _BEFORE call just before the real sync event on the |
| 420 | signaller's side, and _AFTER just after the real sync event on the |
| 421 | waiter's side. |
| 422 | |
| 423 | If none of the rest of these macros make sense to you, at least |
| 424 | take the time to understand these two. They form the very essence |
| 425 | of describing arbitrary inter-thread synchronisation events to |
| 426 | Helgrind. You can get a long way just with them alone. |
| 427 | ---------------------------------------------------------------- |
| 428 | */ |
| 429 | #define ANNOTATE_HAPPENS_BEFORE(obj) \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 430 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 431 | |
| 432 | #define ANNOTATE_HAPPENS_AFTER(obj) \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 433 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 434 | |
| 435 | |
| 436 | /* ---------------------------------------------------------------- |
| 437 | Memory publishing. The TSan sources say: |
| 438 | |
| 439 | Report that the bytes in the range [pointer, pointer+size) are about |
| 440 | to be published safely. The race checker will create a happens-before |
| 441 | arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to |
| 442 | subsequent accesses to this memory. |
| 443 | |
| 444 | I'm not sure I understand what this means exactly, nor whether it |
| 445 | is relevant for a pure h-b detector. Leaving unimplemented for |
| 446 | now. |
| 447 | ---------------------------------------------------------------- |
| 448 | */ |
| 449 | #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ |
| 450 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE") |
| 451 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 452 | /* DEPRECATED. Don't use it. */ |
| 453 | /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */ |
| 454 | |
| 455 | /* DEPRECATED. Don't use it. */ |
| 456 | /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */ |
| 457 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 458 | |
| 459 | /* ---------------------------------------------------------------- |
| 460 | TSan sources say: |
| 461 | |
| 462 | Instruct the tool to create a happens-before arc between |
| 463 | MU->Unlock() and MU->Lock(). This annotation may slow down the |
| 464 | race detector; normally it is used only when it would be |
| 465 | difficult to annotate each of the mutex's critical sections |
| 466 | individually using the annotations above. |
| 467 | |
| 468 | If MU is a posix pthread_mutex_t then Helgrind will do this anyway. |
| 469 | In any case, leave as unimp for now. I'm unsure about the intended |
| 470 | behaviour. |
| 471 | ---------------------------------------------------------------- |
| 472 | */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 473 | #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ |
| 474 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX") |
| 475 | |
| 476 | /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ |
| 477 | /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 478 | |
| 479 | |
| 480 | /* ---------------------------------------------------------------- |
| 481 | TSan sources say: |
| 482 | |
| 483 | Annotations useful when defining memory allocators, or when |
| 484 | memory that was protected in one way starts to be protected in |
| 485 | another. |
| 486 | |
| 487 | Report that a new memory at "address" of size "size" has been |
| 488 | allocated. This might be used when the memory has been retrieved |
| 489 | from a free list and is about to be reused, or when a the locking |
| 490 | discipline for a variable changes. |
| 491 | |
| 492 | AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY. |
| 493 | ---------------------------------------------------------------- |
| 494 | */ |
| 495 | #define ANNOTATE_NEW_MEMORY(address, size) \ |
| 496 | VALGRIND_HG_CLEAN_MEMORY((address), (size)) |
| 497 | |
| 498 | |
| 499 | /* ---------------------------------------------------------------- |
| 500 | TSan sources say: |
| 501 | |
| 502 | Annotations useful when defining FIFO queues that transfer data |
| 503 | between threads. |
| 504 | |
| 505 | All unimplemented. Am not claiming to understand this (yet). |
| 506 | ---------------------------------------------------------------- |
| 507 | */ |
| 508 | |
| 509 | /* Report that the producer-consumer queue object at address PCQ has |
| 510 | been created. The ANNOTATE_PCQ_* annotations should be used only |
| 511 | for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE |
| 512 | (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ |
| 513 | #define ANNOTATE_PCQ_CREATE(pcq) \ |
| 514 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE") |
| 515 | |
| 516 | /* Report that the queue at address PCQ is about to be destroyed. */ |
| 517 | #define ANNOTATE_PCQ_DESTROY(pcq) \ |
| 518 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY") |
| 519 | |
| 520 | /* Report that we are about to put an element into a FIFO queue at |
| 521 | address PCQ. */ |
| 522 | #define ANNOTATE_PCQ_PUT(pcq) \ |
| 523 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT") |
| 524 | |
| 525 | /* Report that we've just got an element from a FIFO queue at address |
| 526 | PCQ. */ |
| 527 | #define ANNOTATE_PCQ_GET(pcq) \ |
| 528 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET") |
| 529 | |
| 530 | |
| 531 | /* ---------------------------------------------------------------- |
| 532 | Annotations that suppress errors. It is usually better to express |
| 533 | the program's synchronization using the other annotations, but |
| 534 | these can be used when all else fails. |
| 535 | |
| 536 | Currently these are all unimplemented. I can't think of a simple |
| 537 | way to implement them without at least some performance overhead. |
| 538 | ---------------------------------------------------------------- |
| 539 | */ |
| 540 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 541 | /* Report that we may have a benign race at "pointer", with size |
| 542 | "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the |
| 543 | point where "pointer" has been allocated, preferably close to the point |
| 544 | where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 545 | |
| 546 | XXX: what's this actually supposed to do? And what's the type of |
| 547 | DESCRIPTION? When does the annotation stop having an effect? |
| 548 | */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 549 | #define ANNOTATE_BENIGN_RACE(pointer, description) \ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 550 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE") |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 551 | |
| 552 | /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to |
| 553 | the memory range [address, address+size). */ |
| 554 | #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ |
| 555 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE_SIZED") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 556 | |
| 557 | /* Request the analysis tool to ignore all reads in the current thread |
| 558 | until ANNOTATE_IGNORE_READS_END is called. Useful to ignore |
| 559 | intentional racey reads, while still checking other reads and all |
| 560 | writes. */ |
| 561 | #define ANNOTATE_IGNORE_READS_BEGIN() \ |
| 562 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN") |
| 563 | |
| 564 | /* Stop ignoring reads. */ |
| 565 | #define ANNOTATE_IGNORE_READS_END() \ |
| 566 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END") |
| 567 | |
| 568 | /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ |
| 569 | #define ANNOTATE_IGNORE_WRITES_BEGIN() \ |
| 570 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN") |
| 571 | |
| 572 | /* Stop ignoring writes. */ |
| 573 | #define ANNOTATE_IGNORE_WRITES_END() \ |
| 574 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END") |
| 575 | |
| 576 | /* Start ignoring all memory accesses (reads and writes). */ |
| 577 | #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ |
| 578 | do { \ |
| 579 | ANNOTATE_IGNORE_READS_BEGIN(); \ |
| 580 | ANNOTATE_IGNORE_WRITES_BEGIN(); \ |
| 581 | } while (0) |
| 582 | |
| 583 | /* Stop ignoring all memory accesses. */ |
| 584 | #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ |
| 585 | do { \ |
| 586 | ANNOTATE_IGNORE_WRITES_END(); \ |
| 587 | ANNOTATE_IGNORE_READS_END(); \ |
| 588 | } while (0) |
| 589 | |
| 590 | |
| 591 | /* ---------------------------------------------------------------- |
| 592 | Annotations useful for debugging. |
| 593 | |
| 594 | Again, so for unimplemented, partly for performance reasons. |
| 595 | ---------------------------------------------------------------- |
| 596 | */ |
| 597 | |
| 598 | /* Request to trace every access to ADDRESS. */ |
| 599 | #define ANNOTATE_TRACE_MEMORY(address) \ |
| 600 | _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY") |
| 601 | |
| 602 | /* Report the current thread name to a race detector. */ |
| 603 | #define ANNOTATE_THREAD_NAME(name) \ |
| 604 | _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME") |
| 605 | |
| 606 | |
| 607 | /* ---------------------------------------------------------------- |
| 608 | Annotations for describing behaviour of user-implemented lock |
| 609 | primitives. In all cases, the LOCK argument is a completely |
| 610 | arbitrary machine word (unsigned long, or void*) and can be any |
| 611 | value which gives a unique identity to the lock objects being |
| 612 | modelled. |
| 613 | |
| 614 | We just pretend they're ordinary posix rwlocks. That'll probably |
| 615 | give some rather confusing wording in error messages, claiming that |
| 616 | the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact |
| 617 | they are not. Ah well. |
| 618 | ---------------------------------------------------------------- |
| 619 | */ |
| 620 | /* Report that a lock has just been created at address LOCK. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 621 | #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 622 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \ |
| 623 | void*,(lock)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 624 | |
| 625 | /* Report that the lock at address LOCK is about to be destroyed. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 626 | #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 627 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \ |
| 628 | void*,(lock)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 629 | |
| 630 | /* Report that the lock at address LOCK has just been acquired. |
| 631 | is_w=1 for writer lock, is_w=0 for reader lock. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 632 | #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
| 633 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, \ |
| 634 | void*,(lock), unsigned long,(is_w)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 635 | |
| 636 | /* Report that the lock at address LOCK is about to be released. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 637 | #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
| 638 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, \ |
| 639 | void*,(lock)) /* is_w is ignored */ |
| 640 | |
| 641 | |
| 642 | /* ------------------------------------------------------------- |
| 643 | Annotations useful when implementing barriers. They are not |
| 644 | normally needed by modules that merely use barriers. |
| 645 | The "barrier" argument is a pointer to the barrier object. |
| 646 | ---------------------------------------------------------------- |
| 647 | */ |
| 648 | |
| 649 | /* Report that the "barrier" has been initialized with initial |
| 650 | "count". If 'reinitialization_allowed' is true, initialization is |
| 651 | allowed to happen multiple times w/o calling barrier_destroy() */ |
| 652 | #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ |
| 653 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT") |
| 654 | |
| 655 | /* Report that we are about to enter barrier_wait("barrier"). */ |
| 656 | #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ |
| 657 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
| 658 | |
| 659 | /* Report that we just exited barrier_wait("barrier"). */ |
| 660 | #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ |
| 661 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
| 662 | |
| 663 | /* Report that the "barrier" has been destroyed. */ |
| 664 | #define ANNOTATE_BARRIER_DESTROY(barrier) \ |
| 665 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 666 | |
| 667 | |
| 668 | /* ---------------------------------------------------------------- |
| 669 | Annotations useful for testing race detectors. |
| 670 | ---------------------------------------------------------------- |
| 671 | */ |
| 672 | |
| 673 | /* Report that we expect a race on the variable at ADDRESS. Use only |
| 674 | in unit tests for a race detector. */ |
| 675 | #define ANNOTATE_EXPECT_RACE(address, description) \ |
| 676 | _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE") |
| 677 | |
| 678 | /* A no-op. Insert where you like to test the interceptors. */ |
| 679 | #define ANNOTATE_NO_OP(arg) \ |
| 680 | _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP") |
| 681 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 682 | /* Force the race detector to flush its state. The actual effect depends on |
| 683 | * the implementation of the detector. */ |
| 684 | #define ANNOTATE_FLUSH_STATE() \ |
| 685 | _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 686 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 687 | #endif /* __HELGRIND_H */ |