sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | ---------------------------------------------------------------- |
| 3 | |
| 4 | Notice that the above BSD-style license applies to this one file |
| 5 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 6 | the terms of the GNU General Public License, version 2. See the |
| 7 | COPYING file in the source distribution for details. |
| 8 | |
| 9 | ---------------------------------------------------------------- |
| 10 | |
| 11 | This file is part of Helgrind, a Valgrind tool for detecting errors |
| 12 | in threaded programs. |
| 13 | |
sewardj | ec062e8 | 2011-10-23 07:32:08 +0000 | [diff] [blame] | 14 | Copyright (C) 2007-2011 OpenWorks LLP |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 15 | info@open-works.co.uk |
| 16 | |
| 17 | Redistribution and use in source and binary forms, with or without |
| 18 | modification, are permitted provided that the following conditions |
| 19 | are met: |
| 20 | |
| 21 | 1. Redistributions of source code must retain the above copyright |
| 22 | notice, this list of conditions and the following disclaimer. |
| 23 | |
| 24 | 2. The origin of this software must not be misrepresented; you must |
| 25 | not claim that you wrote the original software. If you use this |
| 26 | software in a product, an acknowledgment in the product |
| 27 | documentation would be appreciated but is not required. |
| 28 | |
| 29 | 3. Altered source versions must be plainly marked as such, and must |
| 30 | not be misrepresented as being the original software. |
| 31 | |
| 32 | 4. The name of the author may not be used to endorse or promote |
| 33 | products derived from this software without specific prior written |
| 34 | permission. |
| 35 | |
| 36 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS |
| 37 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 38 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 39 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
| 40 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 41 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE |
| 42 | GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 43 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 44 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 45 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 46 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 47 | |
| 48 | ---------------------------------------------------------------- |
| 49 | |
| 50 | Notice that the above BSD-style license applies to this one file |
| 51 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 52 | the terms of the GNU General Public License, version 2. See the |
| 53 | COPYING file in the source distribution for details. |
| 54 | |
| 55 | ---------------------------------------------------------------- |
| 56 | */ |
| 57 | |
| 58 | #ifndef __HELGRIND_H |
| 59 | #define __HELGRIND_H |
| 60 | |
| 61 | #include "valgrind.h" |
| 62 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 63 | /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! |
| 64 | This enum comprises an ABI exported by Valgrind to programs |
| 65 | which use client requests. DO NOT CHANGE THE ORDER OF THESE |
| 66 | ENTRIES, NOR DELETE ANY -- add new ones at the end. */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 67 | typedef |
| 68 | enum { |
| 69 | VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), |
| 70 | |
| 71 | /* The rest are for Helgrind's internal use. Not for end-user |
| 72 | use. Do not use them unless you are a Valgrind developer. */ |
| 73 | |
| 74 | /* Notify the tool what this thread's pthread_t is. */ |
| 75 | _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G') |
| 76 | + 256, |
| 77 | _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */ |
| 78 | _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */ |
| 79 | _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */ |
| 80 | _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t* */ |
| 81 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */ |
| 82 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */ |
| 83 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */ |
| 84 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t* */ |
| 85 | _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */ |
| 86 | _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */ |
| 87 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */ |
| 88 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */ |
sewardj | f98e1c0 | 2008-10-25 16:22:41 +0000 | [diff] [blame] | 89 | _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t* */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 90 | _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */ |
| 91 | _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */ |
| 92 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */ |
| 93 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*, long isW */ |
| 94 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */ |
| 95 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */ |
sewardj | 11e352f | 2007-11-30 11:11:02 +0000 | [diff] [blame] | 96 | _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */ |
| 97 | _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */ |
| 98 | _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */ |
| 99 | _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t* */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 100 | _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */ |
sewardj | 9f569b7 | 2008-11-13 13:33:09 +0000 | [diff] [blame] | 101 | _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 102 | _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */ |
| 103 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */ |
| 104 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */ |
| 105 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */ |
| 106 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 107 | _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */ |
| 108 | _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */ |
| 109 | _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */ |
| 110 | _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */ |
sewardj | 6015d0e | 2011-03-11 19:10:48 +0000 | [diff] [blame] | 111 | _VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 112 | _VG_USERREQ__HG_RESERVED2, /* Do not use */ |
| 113 | _VG_USERREQ__HG_RESERVED3, /* Do not use */ |
| 114 | _VG_USERREQ__HG_RESERVED4, /* Do not use */ |
| 115 | _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */ |
| 116 | _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */ |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 117 | _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */ |
| 118 | _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK /* Addr start_of_block */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 119 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 120 | } Vg_TCheckClientRequest; |
| 121 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 122 | |
| 123 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 124 | /*--- ---*/ |
| 125 | /*--- Implementation-only facilities. Not for end-user use. ---*/ |
| 126 | /*--- For end-user facilities see below (the next section in ---*/ |
| 127 | /*--- this file.) ---*/ |
| 128 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 129 | /*----------------------------------------------------------------*/ |
| 130 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 131 | /* Do a client request. These are macros rather than a functions so |
| 132 | as to avoid having an extra frame in stack traces. |
| 133 | |
| 134 | NB: these duplicate definitions in hg_intercepts.c. But here, we |
| 135 | have to make do with weaker typing (no definition of Word etc) and |
| 136 | no assertions, whereas in helgrind.h we can use those facilities. |
| 137 | Obviously it's important the two sets of definitions are kept in |
| 138 | sync. |
| 139 | |
| 140 | The commented-out asserts should actually hold, but unfortunately |
| 141 | they can't be allowed to be visible here, because that would |
| 142 | require the end-user code to #include <assert.h>. |
| 143 | */ |
| 144 | |
| 145 | #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \ |
| 146 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 147 | long int _arg1; \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 148 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 149 | _arg1 = (long int)(_arg1F); \ |
sewardj | 4b3a742 | 2011-10-24 13:21:57 +0000 | [diff] [blame^] | 150 | VALGRIND_DO_CLIENT_REQUEST_STMT( \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 151 | (_creqF), \ |
| 152 | _arg1, 0,0,0,0); \ |
| 153 | } while (0) |
| 154 | |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 155 | #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \ |
| 156 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 157 | long int arg1; \ |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 158 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 159 | _arg1 = (long int)(_arg1F); \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 160 | _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| 161 | (_dfltF), \ |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 162 | (_creqF), \ |
| 163 | _arg1, 0,0,0,0); \ |
| 164 | _resF = _qzz_res; \ |
| 165 | } while (0) |
| 166 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 167 | #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \ |
| 168 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 169 | long int _arg1, _arg2; \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 170 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 171 | /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ |
| 172 | _arg1 = (long int)(_arg1F); \ |
| 173 | _arg2 = (long int)(_arg2F); \ |
sewardj | 4b3a742 | 2011-10-24 13:21:57 +0000 | [diff] [blame^] | 174 | VALGRIND_DO_CLIENT_REQUEST_STMT( \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 175 | (_creqF), \ |
| 176 | _arg1,_arg2,0,0,0); \ |
| 177 | } while (0) |
| 178 | |
| 179 | #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \ |
| 180 | _ty2F,_arg2F, _ty3F, _arg3F) \ |
| 181 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 182 | long int _arg1, _arg2, _arg3; \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 183 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 184 | /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ |
| 185 | /* assert(sizeof(_ty3F) == sizeof(long int)); */ \ |
| 186 | _arg1 = (long int)(_arg1F); \ |
| 187 | _arg2 = (long int)(_arg2F); \ |
| 188 | _arg3 = (long int)(_arg3F); \ |
sewardj | 4b3a742 | 2011-10-24 13:21:57 +0000 | [diff] [blame^] | 189 | VALGRIND_DO_CLIENT_REQUEST_STMT( \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 190 | (_creqF), \ |
| 191 | _arg1,_arg2,_arg3,0,0); \ |
| 192 | } while (0) |
| 193 | |
| 194 | |
| 195 | #define _HG_CLIENTREQ_UNIMP(_qzz_str) \ |
| 196 | DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \ |
| 197 | (char*),(_qzz_str)) |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 198 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 199 | |
| 200 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 201 | /*--- ---*/ |
| 202 | /*--- Helgrind-native requests. These allow access to ---*/ |
| 203 | /*--- the same set of annotation primitives that are used ---*/ |
| 204 | /*--- to build the POSIX pthread wrappers. ---*/ |
| 205 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 206 | /*----------------------------------------------------------------*/ |
| 207 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 208 | /* ---------------------------------------------------------- |
| 209 | For describing ordinary mutexes (non-rwlocks). For rwlock |
| 210 | descriptions see ANNOTATE_RWLOCK_* below. |
| 211 | ---------------------------------------------------------- */ |
| 212 | |
| 213 | /* Notify here immediately after mutex creation. _mbRec == 0 for a |
| 214 | non-recursive mutex, 1 for a recursive mutex. */ |
| 215 | #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \ |
| 216 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \ |
| 217 | void*,(_mutex), long,(_mbRec)) |
| 218 | |
| 219 | /* Notify here immediately before mutex acquisition. _isTryLock == 0 |
| 220 | for a normal acquisition, 1 for a "try" style acquisition. */ |
| 221 | #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \ |
| 222 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, \ |
| 223 | void*,(_mutex), long,(_isTryLock)) |
| 224 | |
| 225 | /* Notify here immediately after a successful mutex acquisition. */ |
| 226 | #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \ |
| 227 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, \ |
| 228 | void*,(_mutex)) |
| 229 | |
| 230 | /* Notify here immediately before a mutex release. */ |
| 231 | #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \ |
| 232 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \ |
| 233 | void*,(_mutex)) |
| 234 | |
| 235 | /* Notify here immediately after a mutex release. */ |
| 236 | #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \ |
| 237 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \ |
| 238 | void*,(_mutex)) |
| 239 | |
| 240 | /* Notify here immediately before mutex destruction. */ |
| 241 | #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \ |
| 242 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \ |
| 243 | void*,(_mutex)) |
| 244 | |
| 245 | /* ---------------------------------------------------------- |
| 246 | For describing semaphores. |
| 247 | ---------------------------------------------------------- */ |
| 248 | |
| 249 | /* Notify here immediately after semaphore creation. */ |
| 250 | #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \ |
| 251 | DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \ |
| 252 | void*, (_sem), unsigned long, (_value)) |
| 253 | |
| 254 | /* Notify here immediately after a semaphore wait (an acquire-style |
| 255 | operation) */ |
| 256 | #define VALGRIND_HG_SEM_WAIT_POST(_sem) \ |
| 257 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_WAIT_POST, \ |
| 258 | void*,(_sem)) |
| 259 | |
| 260 | /* Notify here immediately before semaphore post (a release-style |
| 261 | operation) */ |
| 262 | #define VALGRIND_HG_SEM_POST_PRE(_sem) \ |
| 263 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_POST_PRE, \ |
| 264 | void*,(_sem)) |
| 265 | |
| 266 | /* Notify here immediately before semaphore destruction. */ |
| 267 | #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \ |
| 268 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \ |
| 269 | void*, (_sem)) |
| 270 | |
| 271 | /* ---------------------------------------------------------- |
| 272 | For describing barriers. |
| 273 | ---------------------------------------------------------- */ |
| 274 | |
| 275 | /* Notify here immediately before barrier creation. _count is the |
| 276 | capacity. _resizable == 0 means the barrier may not be resized, 1 |
| 277 | means it may be. */ |
| 278 | #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \ |
| 279 | DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \ |
| 280 | void*,(_bar), \ |
| 281 | unsigned long,(_count), \ |
| 282 | unsigned long,(_resizable)) |
| 283 | |
| 284 | /* Notify here immediately before arrival at a barrier. */ |
| 285 | #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \ |
| 286 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \ |
| 287 | void*,(_bar)) |
| 288 | |
| 289 | /* Notify here immediately before a resize (change of barrier |
| 290 | capacity). If _newcount >= the existing capacity, then there is no |
| 291 | change in the state of any threads waiting at the barrier. If |
| 292 | _newcount < the existing capacity, and >= _newcount threads are |
| 293 | currently waiting at the barrier, then this notification is |
| 294 | considered to also have the effect of telling the checker that all |
| 295 | waiting threads have now moved past the barrier. (I can't think of |
| 296 | any other sane semantics.) */ |
| 297 | #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \ |
| 298 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \ |
| 299 | void*,(_bar), \ |
| 300 | unsigned long,(_newcount)) |
| 301 | |
| 302 | /* Notify here immediately before barrier destruction. */ |
| 303 | #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \ |
| 304 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \ |
| 305 | void*,(_bar)) |
| 306 | |
| 307 | /* ---------------------------------------------------------- |
| 308 | For describing memory ownership changes. |
| 309 | ---------------------------------------------------------- */ |
| 310 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 311 | /* Clean memory state. This makes Helgrind forget everything it knew |
| 312 | about the specified memory range. Effectively this announces that |
| 313 | the specified memory range now "belongs" to the calling thread, so |
| 314 | that: (1) the calling thread can access it safely without |
| 315 | synchronisation, and (2) all other threads must sync with this one |
| 316 | to access it safely. This is particularly useful for memory |
| 317 | allocators that wish to recycle memory. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 318 | #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \ |
| 319 | DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \ |
| 320 | void*,(_qzz_start), \ |
| 321 | unsigned long,(_qzz_len)) |
| 322 | |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 323 | /* The same, but for the heap block starting at _qzz_blockstart. This |
| 324 | allows painting when we only know the address of an object, but not |
| 325 | its size, which is sometimes the case in C++ code involving |
| 326 | inheritance, and in which RTTI is not, for whatever reason, |
| 327 | available. Returns the number of bytes painted, which can be zero |
| 328 | for a zero-sized block. Hence, return values >= 0 indicate success |
| 329 | (the block was found), and the value -1 indicates block not |
| 330 | found, and -2 is returned when not running on Helgrind. */ |
| 331 | #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \ |
| 332 | (__extension__ \ |
| 333 | ({long int _npainted; \ |
| 334 | DO_CREQ_W_W(_npainted, (-2)/*default*/, \ |
| 335 | _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \ |
| 336 | void*,(_qzz_blockstart)); \ |
| 337 | _npainted; \ |
| 338 | })) |
| 339 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 340 | /* ---------------------------------------------------------- |
| 341 | For error control. |
| 342 | ---------------------------------------------------------- */ |
| 343 | |
| 344 | /* Tell H that an address range is not to be "tracked" until further |
| 345 | notice. This puts it in the NOACCESS state, in which case we |
| 346 | ignore all reads and writes to it. Useful for ignoring ranges of |
| 347 | memory where there might be races we don't want to see. If the |
| 348 | memory is subsequently reallocated via malloc/new/stack allocation, |
| 349 | then it is put back in the trackable state. Hence it is safe in |
| 350 | the situation where checking is disabled, the containing area is |
| 351 | deallocated and later reallocated for some other purpose. */ |
| 352 | #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 353 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \ |
| 354 | void*,(_qzz_start), \ |
| 355 | unsigned long,(_qzz_len)) |
| 356 | |
| 357 | /* And put it back into the normal "tracked" state, that is, make it |
| 358 | once again subject to the normal race-checking machinery. This |
| 359 | puts it in the same state as new memory allocated by this thread -- |
| 360 | that is, basically owned exclusively by this thread. */ |
| 361 | #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 362 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \ |
| 363 | void*,(_qzz_start), \ |
| 364 | unsigned long,(_qzz_len)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 365 | |
| 366 | |
| 367 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 368 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 369 | /*--- ThreadSanitizer-compatible requests ---*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 370 | /*--- (mostly unimplemented) ---*/ |
| 371 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 372 | /*----------------------------------------------------------------*/ |
| 373 | |
| 374 | /* A quite-broad set of annotations, as used in the ThreadSanitizer |
| 375 | project. This implementation aims to be a (source-level) |
| 376 | compatible implementation of the macros defined in: |
| 377 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 378 | http://code.google.com/p/data-race-test/source |
| 379 | /browse/trunk/dynamic_annotations/dynamic_annotations.h |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 380 | |
| 381 | (some of the comments below are taken from the above file) |
| 382 | |
| 383 | The implementation here is very incomplete, and intended as a |
| 384 | starting point. Many of the macros are unimplemented. Rather than |
| 385 | allowing unimplemented macros to silently do nothing, they cause an |
| 386 | assertion. Intention is to implement them on demand. |
| 387 | |
| 388 | The major use of these macros is to make visible to race detectors, |
| 389 | the behaviour (effects) of user-implemented synchronisation |
| 390 | primitives, that the detectors could not otherwise deduce from the |
| 391 | normal observation of pthread etc calls. |
| 392 | |
| 393 | Some of the macros are no-ops in Helgrind. That's because Helgrind |
| 394 | is a pure happens-before detector, whereas ThreadSanitizer uses a |
| 395 | hybrid lockset and happens-before scheme, which requires more |
| 396 | accurate annotations for correct operation. |
| 397 | |
| 398 | The macros are listed in the same order as in dynamic_annotations.h |
| 399 | (URL just above). |
| 400 | |
| 401 | I should point out that I am less than clear about the intended |
| 402 | semantics of quite a number of them. Comments and clarifications |
| 403 | welcomed! |
| 404 | */ |
| 405 | |
| 406 | /* ---------------------------------------------------------------- |
| 407 | These four allow description of user-level condition variables, |
| 408 | apparently in the style of POSIX's pthread_cond_t. Currently |
| 409 | unimplemented and will assert. |
| 410 | ---------------------------------------------------------------- |
| 411 | */ |
| 412 | /* Report that wait on the condition variable at address CV has |
| 413 | succeeded and the lock at address LOCK is now held. CV and LOCK |
| 414 | are completely arbitrary memory addresses which presumably mean |
| 415 | something to the application, but are meaningless to Helgrind. */ |
| 416 | #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ |
| 417 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT") |
| 418 | |
| 419 | /* Report that wait on the condition variable at CV has succeeded. |
| 420 | Variant w/o lock. */ |
| 421 | #define ANNOTATE_CONDVAR_WAIT(cv) \ |
| 422 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT") |
| 423 | |
| 424 | /* Report that we are about to signal on the condition variable at |
| 425 | address CV. */ |
| 426 | #define ANNOTATE_CONDVAR_SIGNAL(cv) \ |
| 427 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL") |
| 428 | |
| 429 | /* Report that we are about to signal_all on the condition variable at |
| 430 | CV. */ |
| 431 | #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ |
| 432 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL") |
| 433 | |
| 434 | |
| 435 | /* ---------------------------------------------------------------- |
| 436 | Create completely arbitrary happens-before edges between threads. |
sewardj | 8c50d3c | 2011-03-11 18:38:12 +0000 | [diff] [blame] | 437 | |
| 438 | If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later |
| 439 | (w.r.t. some notional global clock for the computation) thread Tm |
| 440 | does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all |
| 441 | memory accesses done by T1 .. Tn before the ..BEFORE.. call as |
| 442 | happening-before all memory accesses done by Tm after the |
| 443 | ..AFTER.. call. Hence Helgrind won't complain about races if Tm's |
| 444 | accesses afterwards are to the same locations as accesses before by |
| 445 | any of T1 .. Tn. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 446 | |
| 447 | OBJ is a machine word (unsigned long, or void*), is completely |
| 448 | arbitrary, and denotes the identity of some synchronisation object |
| 449 | you're modelling. |
| 450 | |
| 451 | You must do the _BEFORE call just before the real sync event on the |
| 452 | signaller's side, and _AFTER just after the real sync event on the |
| 453 | waiter's side. |
| 454 | |
| 455 | If none of the rest of these macros make sense to you, at least |
| 456 | take the time to understand these two. They form the very essence |
| 457 | of describing arbitrary inter-thread synchronisation events to |
| 458 | Helgrind. You can get a long way just with them alone. |
sewardj | 8c50d3c | 2011-03-11 18:38:12 +0000 | [diff] [blame] | 459 | |
| 460 | See also, extensive discussion on semantics of this in |
| 461 | https://bugs.kde.org/show_bug.cgi?id=243935 |
sewardj | 6015d0e | 2011-03-11 19:10:48 +0000 | [diff] [blame] | 462 | |
| 463 | ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time |
| 464 | as bug 243935 is fully resolved. It instructs Helgrind to forget |
| 465 | about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in |
| 466 | effect putting it back in its original state. Once in that state, |
| 467 | a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling |
| 468 | thread. |
| 469 | |
| 470 | An implementation may optionally release resources it has |
| 471 | associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) |
| 472 | happens. Users are recommended to use |
| 473 | ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a |
| 474 | synchronisation object is no longer needed, so as to avoid |
| 475 | potential indefinite resource leaks. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 476 | ---------------------------------------------------------------- |
| 477 | */ |
| 478 | #define ANNOTATE_HAPPENS_BEFORE(obj) \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 479 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 480 | |
| 481 | #define ANNOTATE_HAPPENS_AFTER(obj) \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 482 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 483 | |
sewardj | 6015d0e | 2011-03-11 19:10:48 +0000 | [diff] [blame] | 484 | #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \ |
| 485 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 486 | |
| 487 | /* ---------------------------------------------------------------- |
| 488 | Memory publishing. The TSan sources say: |
| 489 | |
| 490 | Report that the bytes in the range [pointer, pointer+size) are about |
| 491 | to be published safely. The race checker will create a happens-before |
| 492 | arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to |
| 493 | subsequent accesses to this memory. |
| 494 | |
| 495 | I'm not sure I understand what this means exactly, nor whether it |
| 496 | is relevant for a pure h-b detector. Leaving unimplemented for |
| 497 | now. |
| 498 | ---------------------------------------------------------------- |
| 499 | */ |
| 500 | #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ |
| 501 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE") |
| 502 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 503 | /* DEPRECATED. Don't use it. */ |
| 504 | /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */ |
| 505 | |
| 506 | /* DEPRECATED. Don't use it. */ |
| 507 | /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */ |
| 508 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 509 | |
| 510 | /* ---------------------------------------------------------------- |
| 511 | TSan sources say: |
| 512 | |
| 513 | Instruct the tool to create a happens-before arc between |
| 514 | MU->Unlock() and MU->Lock(). This annotation may slow down the |
| 515 | race detector; normally it is used only when it would be |
| 516 | difficult to annotate each of the mutex's critical sections |
| 517 | individually using the annotations above. |
| 518 | |
| 519 | If MU is a posix pthread_mutex_t then Helgrind will do this anyway. |
| 520 | In any case, leave as unimp for now. I'm unsure about the intended |
| 521 | behaviour. |
| 522 | ---------------------------------------------------------------- |
| 523 | */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 524 | #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ |
| 525 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX") |
| 526 | |
| 527 | /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ |
| 528 | /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 529 | |
| 530 | |
| 531 | /* ---------------------------------------------------------------- |
| 532 | TSan sources say: |
| 533 | |
| 534 | Annotations useful when defining memory allocators, or when |
| 535 | memory that was protected in one way starts to be protected in |
| 536 | another. |
| 537 | |
| 538 | Report that a new memory at "address" of size "size" has been |
| 539 | allocated. This might be used when the memory has been retrieved |
| 540 | from a free list and is about to be reused, or when a the locking |
| 541 | discipline for a variable changes. |
| 542 | |
| 543 | AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY. |
| 544 | ---------------------------------------------------------------- |
| 545 | */ |
| 546 | #define ANNOTATE_NEW_MEMORY(address, size) \ |
| 547 | VALGRIND_HG_CLEAN_MEMORY((address), (size)) |
| 548 | |
| 549 | |
| 550 | /* ---------------------------------------------------------------- |
| 551 | TSan sources say: |
| 552 | |
| 553 | Annotations useful when defining FIFO queues that transfer data |
| 554 | between threads. |
| 555 | |
| 556 | All unimplemented. Am not claiming to understand this (yet). |
| 557 | ---------------------------------------------------------------- |
| 558 | */ |
| 559 | |
| 560 | /* Report that the producer-consumer queue object at address PCQ has |
| 561 | been created. The ANNOTATE_PCQ_* annotations should be used only |
| 562 | for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE |
| 563 | (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ |
| 564 | #define ANNOTATE_PCQ_CREATE(pcq) \ |
| 565 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE") |
| 566 | |
| 567 | /* Report that the queue at address PCQ is about to be destroyed. */ |
| 568 | #define ANNOTATE_PCQ_DESTROY(pcq) \ |
| 569 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY") |
| 570 | |
| 571 | /* Report that we are about to put an element into a FIFO queue at |
| 572 | address PCQ. */ |
| 573 | #define ANNOTATE_PCQ_PUT(pcq) \ |
| 574 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT") |
| 575 | |
| 576 | /* Report that we've just got an element from a FIFO queue at address |
| 577 | PCQ. */ |
| 578 | #define ANNOTATE_PCQ_GET(pcq) \ |
| 579 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET") |
| 580 | |
| 581 | |
| 582 | /* ---------------------------------------------------------------- |
| 583 | Annotations that suppress errors. It is usually better to express |
| 584 | the program's synchronization using the other annotations, but |
| 585 | these can be used when all else fails. |
| 586 | |
| 587 | Currently these are all unimplemented. I can't think of a simple |
| 588 | way to implement them without at least some performance overhead. |
| 589 | ---------------------------------------------------------------- |
| 590 | */ |
| 591 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 592 | /* Report that we may have a benign race at "pointer", with size |
| 593 | "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the |
| 594 | point where "pointer" has been allocated, preferably close to the point |
| 595 | where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 596 | |
| 597 | XXX: what's this actually supposed to do? And what's the type of |
| 598 | DESCRIPTION? When does the annotation stop having an effect? |
| 599 | */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 600 | #define ANNOTATE_BENIGN_RACE(pointer, description) \ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 601 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE") |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 602 | |
| 603 | /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to |
| 604 | the memory range [address, address+size). */ |
| 605 | #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ |
| 606 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE_SIZED") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 607 | |
| 608 | /* Request the analysis tool to ignore all reads in the current thread |
| 609 | until ANNOTATE_IGNORE_READS_END is called. Useful to ignore |
| 610 | intentional racey reads, while still checking other reads and all |
| 611 | writes. */ |
| 612 | #define ANNOTATE_IGNORE_READS_BEGIN() \ |
| 613 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN") |
| 614 | |
| 615 | /* Stop ignoring reads. */ |
| 616 | #define ANNOTATE_IGNORE_READS_END() \ |
| 617 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END") |
| 618 | |
| 619 | /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ |
| 620 | #define ANNOTATE_IGNORE_WRITES_BEGIN() \ |
| 621 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN") |
| 622 | |
| 623 | /* Stop ignoring writes. */ |
| 624 | #define ANNOTATE_IGNORE_WRITES_END() \ |
| 625 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END") |
| 626 | |
| 627 | /* Start ignoring all memory accesses (reads and writes). */ |
| 628 | #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ |
| 629 | do { \ |
| 630 | ANNOTATE_IGNORE_READS_BEGIN(); \ |
| 631 | ANNOTATE_IGNORE_WRITES_BEGIN(); \ |
| 632 | } while (0) |
| 633 | |
| 634 | /* Stop ignoring all memory accesses. */ |
| 635 | #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ |
| 636 | do { \ |
| 637 | ANNOTATE_IGNORE_WRITES_END(); \ |
| 638 | ANNOTATE_IGNORE_READS_END(); \ |
| 639 | } while (0) |
| 640 | |
| 641 | |
| 642 | /* ---------------------------------------------------------------- |
| 643 | Annotations useful for debugging. |
| 644 | |
| 645 | Again, so for unimplemented, partly for performance reasons. |
| 646 | ---------------------------------------------------------------- |
| 647 | */ |
| 648 | |
| 649 | /* Request to trace every access to ADDRESS. */ |
| 650 | #define ANNOTATE_TRACE_MEMORY(address) \ |
| 651 | _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY") |
| 652 | |
| 653 | /* Report the current thread name to a race detector. */ |
| 654 | #define ANNOTATE_THREAD_NAME(name) \ |
| 655 | _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME") |
| 656 | |
| 657 | |
| 658 | /* ---------------------------------------------------------------- |
| 659 | Annotations for describing behaviour of user-implemented lock |
| 660 | primitives. In all cases, the LOCK argument is a completely |
| 661 | arbitrary machine word (unsigned long, or void*) and can be any |
| 662 | value which gives a unique identity to the lock objects being |
| 663 | modelled. |
| 664 | |
| 665 | We just pretend they're ordinary posix rwlocks. That'll probably |
| 666 | give some rather confusing wording in error messages, claiming that |
| 667 | the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact |
| 668 | they are not. Ah well. |
| 669 | ---------------------------------------------------------------- |
| 670 | */ |
| 671 | /* Report that a lock has just been created at address LOCK. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 672 | #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 673 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \ |
| 674 | void*,(lock)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 675 | |
| 676 | /* Report that the lock at address LOCK is about to be destroyed. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 677 | #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 678 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \ |
| 679 | void*,(lock)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 680 | |
| 681 | /* Report that the lock at address LOCK has just been acquired. |
| 682 | is_w=1 for writer lock, is_w=0 for reader lock. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 683 | #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
| 684 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, \ |
| 685 | void*,(lock), unsigned long,(is_w)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 686 | |
| 687 | /* Report that the lock at address LOCK is about to be released. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 688 | #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
| 689 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, \ |
| 690 | void*,(lock)) /* is_w is ignored */ |
| 691 | |
| 692 | |
| 693 | /* ------------------------------------------------------------- |
| 694 | Annotations useful when implementing barriers. They are not |
| 695 | normally needed by modules that merely use barriers. |
| 696 | The "barrier" argument is a pointer to the barrier object. |
| 697 | ---------------------------------------------------------------- |
| 698 | */ |
| 699 | |
| 700 | /* Report that the "barrier" has been initialized with initial |
| 701 | "count". If 'reinitialization_allowed' is true, initialization is |
| 702 | allowed to happen multiple times w/o calling barrier_destroy() */ |
| 703 | #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ |
| 704 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT") |
| 705 | |
| 706 | /* Report that we are about to enter barrier_wait("barrier"). */ |
| 707 | #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ |
| 708 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
| 709 | |
| 710 | /* Report that we just exited barrier_wait("barrier"). */ |
| 711 | #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ |
| 712 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
| 713 | |
| 714 | /* Report that the "barrier" has been destroyed. */ |
| 715 | #define ANNOTATE_BARRIER_DESTROY(barrier) \ |
| 716 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 717 | |
| 718 | |
| 719 | /* ---------------------------------------------------------------- |
| 720 | Annotations useful for testing race detectors. |
| 721 | ---------------------------------------------------------------- |
| 722 | */ |
| 723 | |
| 724 | /* Report that we expect a race on the variable at ADDRESS. Use only |
| 725 | in unit tests for a race detector. */ |
| 726 | #define ANNOTATE_EXPECT_RACE(address, description) \ |
| 727 | _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE") |
| 728 | |
| 729 | /* A no-op. Insert where you like to test the interceptors. */ |
| 730 | #define ANNOTATE_NO_OP(arg) \ |
| 731 | _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP") |
| 732 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 733 | /* Force the race detector to flush its state. The actual effect depends on |
| 734 | * the implementation of the detector. */ |
| 735 | #define ANNOTATE_FLUSH_STATE() \ |
| 736 | _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 737 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 738 | #endif /* __HELGRIND_H */ |