sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 1 | /* |
| 2 | ---------------------------------------------------------------- |
| 3 | |
| 4 | Notice that the above BSD-style license applies to this one file |
| 5 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 6 | the terms of the GNU General Public License, version 2. See the |
| 7 | COPYING file in the source distribution for details. |
| 8 | |
| 9 | ---------------------------------------------------------------- |
| 10 | |
| 11 | This file is part of Helgrind, a Valgrind tool for detecting errors |
| 12 | in threaded programs. |
| 13 | |
sewardj | 0f157dd | 2013-10-18 14:27:36 +0000 | [diff] [blame] | 14 | Copyright (C) 2007-2013 OpenWorks LLP |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 15 | info@open-works.co.uk |
| 16 | |
| 17 | Redistribution and use in source and binary forms, with or without |
| 18 | modification, are permitted provided that the following conditions |
| 19 | are met: |
| 20 | |
| 21 | 1. Redistributions of source code must retain the above copyright |
| 22 | notice, this list of conditions and the following disclaimer. |
| 23 | |
| 24 | 2. The origin of this software must not be misrepresented; you must |
| 25 | not claim that you wrote the original software. If you use this |
| 26 | software in a product, an acknowledgment in the product |
| 27 | documentation would be appreciated but is not required. |
| 28 | |
| 29 | 3. Altered source versions must be plainly marked as such, and must |
| 30 | not be misrepresented as being the original software. |
| 31 | |
| 32 | 4. The name of the author may not be used to endorse or promote |
| 33 | products derived from this software without specific prior written |
| 34 | permission. |
| 35 | |
| 36 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS |
| 37 | OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 38 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 39 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
| 40 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 41 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE |
| 42 | GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 43 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 44 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 45 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 46 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 47 | |
| 48 | ---------------------------------------------------------------- |
| 49 | |
| 50 | Notice that the above BSD-style license applies to this one file |
| 51 | (helgrind.h) only. The entire rest of Valgrind is licensed under |
| 52 | the terms of the GNU General Public License, version 2. See the |
| 53 | COPYING file in the source distribution for details. |
| 54 | |
| 55 | ---------------------------------------------------------------- |
| 56 | */ |
| 57 | |
| 58 | #ifndef __HELGRIND_H |
| 59 | #define __HELGRIND_H |
| 60 | |
| 61 | #include "valgrind.h" |
| 62 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 63 | /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! |
| 64 | This enum comprises an ABI exported by Valgrind to programs |
| 65 | which use client requests. DO NOT CHANGE THE ORDER OF THESE |
| 66 | ENTRIES, NOR DELETE ANY -- add new ones at the end. */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 67 | typedef |
| 68 | enum { |
| 69 | VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'), |
| 70 | |
| 71 | /* The rest are for Helgrind's internal use. Not for end-user |
| 72 | use. Do not use them unless you are a Valgrind developer. */ |
| 73 | |
| 74 | /* Notify the tool what this thread's pthread_t is. */ |
| 75 | _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G') |
| 76 | + 256, |
| 77 | _VG_USERREQ__HG_PTH_API_ERROR, /* char*, int */ |
| 78 | _VG_USERREQ__HG_PTHREAD_JOIN_POST, /* pthread_t of quitter */ |
| 79 | _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, /* pth_mx_t*, long mbRec */ |
sewardj | c02f6c4 | 2013-10-14 13:51:25 +0000 | [diff] [blame] | 80 | _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, /* pth_mx_t*, long isInit */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 81 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, /* pth_mx_t* */ |
| 82 | _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, /* pth_mx_t* */ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 83 | _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, /* void*, long isTryLock */ |
| 84 | _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 85 | _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE, /* pth_cond_t* */ |
| 86 | _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */ |
| 87 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE, /* pth_cond_t*, pth_mx_t* */ |
| 88 | _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST, /* pth_cond_t*, pth_mx_t* */ |
sewardj | c02f6c4 | 2013-10-14 13:51:25 +0000 | [diff] [blame] | 89 | _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE, /* pth_cond_t*, long isInit */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 90 | _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, /* pth_rwlk_t* */ |
| 91 | _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */ |
| 92 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE, /* pth_rwlk_t*, long isW */ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 93 | _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, /* void*, long isW */ |
| 94 | _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, /* void* */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 95 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */ |
sewardj | 11e352f | 2007-11-30 11:11:02 +0000 | [diff] [blame] | 96 | _VG_USERREQ__HG_POSIX_SEM_INIT_POST, /* sem_t*, ulong value */ |
| 97 | _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, /* sem_t* */ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 98 | _VG_USERREQ__HG_POSIX_SEM_RELEASED, /* void* */ |
| 99 | _VG_USERREQ__HG_POSIX_SEM_ACQUIRED, /* void* */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 100 | _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, /* pth_bar_t*, ulong, ulong */ |
sewardj | 9f569b7 | 2008-11-13 13:33:09 +0000 | [diff] [blame] | 101 | _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, /* pth_bar_t* */ |
sewardj | 5a644da | 2009-08-11 10:35:58 +0000 | [diff] [blame] | 102 | _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */ |
| 103 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE, /* pth_slk_t* */ |
| 104 | _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */ |
| 105 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE, /* pth_slk_t* */ |
| 106 | _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST, /* pth_slk_t* */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 107 | _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE, /* pth_slk_t* */ |
| 108 | _VG_USERREQ__HG_CLIENTREQ_UNIMP, /* char* */ |
| 109 | _VG_USERREQ__HG_USERSO_SEND_PRE, /* arbitrary UWord SO-tag */ |
| 110 | _VG_USERREQ__HG_USERSO_RECV_POST, /* arbitrary UWord SO-tag */ |
sewardj | 6015d0e | 2011-03-11 19:10:48 +0000 | [diff] [blame] | 111 | _VG_USERREQ__HG_USERSO_FORGET_ALL, /* arbitrary UWord SO-tag */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 112 | _VG_USERREQ__HG_RESERVED2, /* Do not use */ |
| 113 | _VG_USERREQ__HG_RESERVED3, /* Do not use */ |
| 114 | _VG_USERREQ__HG_RESERVED4, /* Do not use */ |
| 115 | _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */ |
| 116 | _VG_USERREQ__HG_ARANGE_MAKE_TRACKED, /* Addr a, ulong len */ |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 117 | _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */ |
philippe | 19dfe03 | 2013-03-24 20:10:23 +0000 | [diff] [blame] | 118 | _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */ |
philippe | d40aff5 | 2014-06-16 20:00:14 +0000 | [diff] [blame] | 119 | _VG_USERREQ__HG_PTHREAD_COND_INIT_POST, /* pth_cond_t*, pth_cond_attr_t*/ |
| 120 | _VG_USERREQ__HG_GNAT_MASTER_HOOK, /* void*d,void*m,Word ml */ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 121 | _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */ |
| 122 | _VG_USERREQ__HG_GET_ABITS, /* Addr a,Addr abits, ulong len */ |
| 123 | _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN, |
| 124 | _VG_USERREQ__HG_PTHREAD_CREATE_END, |
| 125 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*,long isTryLock */ |
| 126 | _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST, /* pth_mx_t *,long tookLock */ |
| 127 | _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST, /* pth_rwlk_t*,long isW,long */ |
| 128 | _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE, /* pth_rwlk_t* */ |
| 129 | _VG_USERREQ__HG_POSIX_SEM_POST_PRE, /* sem_t* */ |
| 130 | _VG_USERREQ__HG_POSIX_SEM_POST_POST, /* sem_t* */ |
| 131 | _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE, /* sem_t* */ |
| 132 | _VG_USERREQ__HG_POSIX_SEM_WAIT_POST, /* sem_t*, long tookLock */ |
| 133 | _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST, /* pth_cond_t* */ |
| 134 | _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */ |
| 135 | _VG_USERREQ__HG_RTLD_BIND_GUARD, /* int flags */ |
| 136 | _VG_USERREQ__HG_RTLD_BIND_CLEAR /* int flags */ |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 137 | } Vg_TCheckClientRequest; |
| 138 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 139 | |
| 140 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 141 | /*--- ---*/ |
| 142 | /*--- Implementation-only facilities. Not for end-user use. ---*/ |
| 143 | /*--- For end-user facilities see below (the next section in ---*/ |
| 144 | /*--- this file.) ---*/ |
| 145 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 146 | /*----------------------------------------------------------------*/ |
| 147 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 148 | /* Do a client request. These are macros rather than a functions so |
| 149 | as to avoid having an extra frame in stack traces. |
| 150 | |
| 151 | NB: these duplicate definitions in hg_intercepts.c. But here, we |
| 152 | have to make do with weaker typing (no definition of Word etc) and |
| 153 | no assertions, whereas in helgrind.h we can use those facilities. |
| 154 | Obviously it's important the two sets of definitions are kept in |
| 155 | sync. |
| 156 | |
| 157 | The commented-out asserts should actually hold, but unfortunately |
| 158 | they can't be allowed to be visible here, because that would |
| 159 | require the end-user code to #include <assert.h>. |
| 160 | */ |
| 161 | |
| 162 | #define DO_CREQ_v_W(_creqF, _ty1F,_arg1F) \ |
| 163 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 164 | long int _arg1; \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 165 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 166 | _arg1 = (long int)(_arg1F); \ |
sewardj | 4b3a742 | 2011-10-24 13:21:57 +0000 | [diff] [blame] | 167 | VALGRIND_DO_CLIENT_REQUEST_STMT( \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 168 | (_creqF), \ |
| 169 | _arg1, 0,0,0,0); \ |
| 170 | } while (0) |
| 171 | |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 172 | #define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \ |
| 173 | do { \ |
philippe | f54cb66 | 2015-05-10 22:19:31 +0000 | [diff] [blame] | 174 | long int _arg1; \ |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 175 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 176 | _arg1 = (long int)(_arg1F); \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 177 | _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| 178 | (_dfltF), \ |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 179 | (_creqF), \ |
| 180 | _arg1, 0,0,0,0); \ |
| 181 | _resF = _qzz_res; \ |
| 182 | } while (0) |
| 183 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 184 | #define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \ |
| 185 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 186 | long int _arg1, _arg2; \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 187 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 188 | /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ |
| 189 | _arg1 = (long int)(_arg1F); \ |
| 190 | _arg2 = (long int)(_arg2F); \ |
sewardj | 4b3a742 | 2011-10-24 13:21:57 +0000 | [diff] [blame] | 191 | VALGRIND_DO_CLIENT_REQUEST_STMT( \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 192 | (_creqF), \ |
| 193 | _arg1,_arg2,0,0,0); \ |
| 194 | } while (0) |
| 195 | |
| 196 | #define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F, \ |
| 197 | _ty2F,_arg2F, _ty3F, _arg3F) \ |
| 198 | do { \ |
bart | 575ce8e | 2011-05-15 07:04:03 +0000 | [diff] [blame] | 199 | long int _arg1, _arg2, _arg3; \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 200 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 201 | /* assert(sizeof(_ty2F) == sizeof(long int)); */ \ |
| 202 | /* assert(sizeof(_ty3F) == sizeof(long int)); */ \ |
| 203 | _arg1 = (long int)(_arg1F); \ |
| 204 | _arg2 = (long int)(_arg2F); \ |
| 205 | _arg3 = (long int)(_arg3F); \ |
sewardj | 4b3a742 | 2011-10-24 13:21:57 +0000 | [diff] [blame] | 206 | VALGRIND_DO_CLIENT_REQUEST_STMT( \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 207 | (_creqF), \ |
| 208 | _arg1,_arg2,_arg3,0,0); \ |
| 209 | } while (0) |
| 210 | |
philippe | f54cb66 | 2015-05-10 22:19:31 +0000 | [diff] [blame] | 211 | #define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \ |
| 212 | _ty2F,_arg2F, _ty3F, _arg3F) \ |
| 213 | do { \ |
| 214 | long int _qzz_res; \ |
| 215 | long int _arg1, _arg2, _arg3; \ |
| 216 | /* assert(sizeof(_ty1F) == sizeof(long int)); */ \ |
| 217 | _arg1 = (long int)(_arg1F); \ |
| 218 | _arg2 = (long int)(_arg2F); \ |
| 219 | _arg3 = (long int)(_arg3F); \ |
| 220 | _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR( \ |
| 221 | (_dfltF), \ |
| 222 | (_creqF), \ |
| 223 | _arg1,_arg2,_arg3,0,0); \ |
| 224 | _resF = _qzz_res; \ |
| 225 | } while (0) |
| 226 | |
| 227 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 228 | |
| 229 | #define _HG_CLIENTREQ_UNIMP(_qzz_str) \ |
| 230 | DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP, \ |
| 231 | (char*),(_qzz_str)) |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 232 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 233 | |
| 234 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 235 | /*--- ---*/ |
| 236 | /*--- Helgrind-native requests. These allow access to ---*/ |
| 237 | /*--- the same set of annotation primitives that are used ---*/ |
| 238 | /*--- to build the POSIX pthread wrappers. ---*/ |
| 239 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 240 | /*----------------------------------------------------------------*/ |
| 241 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 242 | /* ---------------------------------------------------------- |
| 243 | For describing ordinary mutexes (non-rwlocks). For rwlock |
| 244 | descriptions see ANNOTATE_RWLOCK_* below. |
| 245 | ---------------------------------------------------------- */ |
| 246 | |
| 247 | /* Notify here immediately after mutex creation. _mbRec == 0 for a |
| 248 | non-recursive mutex, 1 for a recursive mutex. */ |
| 249 | #define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec) \ |
| 250 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST, \ |
| 251 | void*,(_mutex), long,(_mbRec)) |
| 252 | |
| 253 | /* Notify here immediately before mutex acquisition. _isTryLock == 0 |
| 254 | for a normal acquisition, 1 for a "try" style acquisition. */ |
| 255 | #define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock) \ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 256 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE, \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 257 | void*,(_mutex), long,(_isTryLock)) |
| 258 | |
| 259 | /* Notify here immediately after a successful mutex acquisition. */ |
| 260 | #define VALGRIND_HG_MUTEX_LOCK_POST(_mutex) \ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 261 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 262 | void*,(_mutex)) |
| 263 | |
| 264 | /* Notify here immediately before a mutex release. */ |
| 265 | #define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex) \ |
| 266 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE, \ |
| 267 | void*,(_mutex)) |
| 268 | |
| 269 | /* Notify here immediately after a mutex release. */ |
| 270 | #define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex) \ |
| 271 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST, \ |
| 272 | void*,(_mutex)) |
| 273 | |
| 274 | /* Notify here immediately before mutex destruction. */ |
| 275 | #define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex) \ |
| 276 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE, \ |
| 277 | void*,(_mutex)) |
| 278 | |
| 279 | /* ---------------------------------------------------------- |
| 280 | For describing semaphores. |
| 281 | ---------------------------------------------------------- */ |
| 282 | |
| 283 | /* Notify here immediately after semaphore creation. */ |
| 284 | #define VALGRIND_HG_SEM_INIT_POST(_sem, _value) \ |
| 285 | DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST, \ |
| 286 | void*, (_sem), unsigned long, (_value)) |
| 287 | |
| 288 | /* Notify here immediately after a semaphore wait (an acquire-style |
| 289 | operation) */ |
| 290 | #define VALGRIND_HG_SEM_WAIT_POST(_sem) \ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 291 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED, \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 292 | void*,(_sem)) |
| 293 | |
| 294 | /* Notify here immediately before semaphore post (a release-style |
| 295 | operation) */ |
| 296 | #define VALGRIND_HG_SEM_POST_PRE(_sem) \ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 297 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED, \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 298 | void*,(_sem)) |
| 299 | |
| 300 | /* Notify here immediately before semaphore destruction. */ |
| 301 | #define VALGRIND_HG_SEM_DESTROY_PRE(_sem) \ |
| 302 | DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE, \ |
| 303 | void*, (_sem)) |
| 304 | |
| 305 | /* ---------------------------------------------------------- |
| 306 | For describing barriers. |
| 307 | ---------------------------------------------------------- */ |
| 308 | |
| 309 | /* Notify here immediately before barrier creation. _count is the |
| 310 | capacity. _resizable == 0 means the barrier may not be resized, 1 |
| 311 | means it may be. */ |
| 312 | #define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \ |
| 313 | DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE, \ |
| 314 | void*,(_bar), \ |
| 315 | unsigned long,(_count), \ |
| 316 | unsigned long,(_resizable)) |
| 317 | |
| 318 | /* Notify here immediately before arrival at a barrier. */ |
| 319 | #define VALGRIND_HG_BARRIER_WAIT_PRE(_bar) \ |
| 320 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE, \ |
| 321 | void*,(_bar)) |
| 322 | |
| 323 | /* Notify here immediately before a resize (change of barrier |
| 324 | capacity). If _newcount >= the existing capacity, then there is no |
| 325 | change in the state of any threads waiting at the barrier. If |
| 326 | _newcount < the existing capacity, and >= _newcount threads are |
| 327 | currently waiting at the barrier, then this notification is |
| 328 | considered to also have the effect of telling the checker that all |
| 329 | waiting threads have now moved past the barrier. (I can't think of |
| 330 | any other sane semantics.) */ |
| 331 | #define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount) \ |
| 332 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, \ |
| 333 | void*,(_bar), \ |
| 334 | unsigned long,(_newcount)) |
| 335 | |
| 336 | /* Notify here immediately before barrier destruction. */ |
| 337 | #define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar) \ |
| 338 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, \ |
| 339 | void*,(_bar)) |
| 340 | |
| 341 | /* ---------------------------------------------------------- |
| 342 | For describing memory ownership changes. |
| 343 | ---------------------------------------------------------- */ |
| 344 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 345 | /* Clean memory state. This makes Helgrind forget everything it knew |
| 346 | about the specified memory range. Effectively this announces that |
| 347 | the specified memory range now "belongs" to the calling thread, so |
| 348 | that: (1) the calling thread can access it safely without |
| 349 | synchronisation, and (2) all other threads must sync with this one |
| 350 | to access it safely. This is particularly useful for memory |
| 351 | allocators that wish to recycle memory. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 352 | #define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len) \ |
| 353 | DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY, \ |
| 354 | void*,(_qzz_start), \ |
| 355 | unsigned long,(_qzz_len)) |
| 356 | |
sewardj | c8028ad | 2010-05-05 09:34:42 +0000 | [diff] [blame] | 357 | /* The same, but for the heap block starting at _qzz_blockstart. This |
| 358 | allows painting when we only know the address of an object, but not |
| 359 | its size, which is sometimes the case in C++ code involving |
| 360 | inheritance, and in which RTTI is not, for whatever reason, |
| 361 | available. Returns the number of bytes painted, which can be zero |
| 362 | for a zero-sized block. Hence, return values >= 0 indicate success |
| 363 | (the block was found), and the value -1 indicates block not |
| 364 | found, and -2 is returned when not running on Helgrind. */ |
| 365 | #define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart) \ |
| 366 | (__extension__ \ |
| 367 | ({long int _npainted; \ |
| 368 | DO_CREQ_W_W(_npainted, (-2)/*default*/, \ |
| 369 | _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, \ |
| 370 | void*,(_qzz_blockstart)); \ |
| 371 | _npainted; \ |
| 372 | })) |
| 373 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 374 | /* ---------------------------------------------------------- |
| 375 | For error control. |
| 376 | ---------------------------------------------------------- */ |
| 377 | |
| 378 | /* Tell H that an address range is not to be "tracked" until further |
| 379 | notice. This puts it in the NOACCESS state, in which case we |
| 380 | ignore all reads and writes to it. Useful for ignoring ranges of |
| 381 | memory where there might be races we don't want to see. If the |
| 382 | memory is subsequently reallocated via malloc/new/stack allocation, |
| 383 | then it is put back in the trackable state. Hence it is safe in |
| 384 | the situation where checking is disabled, the containing area is |
| 385 | deallocated and later reallocated for some other purpose. */ |
| 386 | #define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 387 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, \ |
| 388 | void*,(_qzz_start), \ |
| 389 | unsigned long,(_qzz_len)) |
| 390 | |
| 391 | /* And put it back into the normal "tracked" state, that is, make it |
| 392 | once again subject to the normal race-checking machinery. This |
| 393 | puts it in the same state as new memory allocated by this thread -- |
| 394 | that is, basically owned exclusively by this thread. */ |
| 395 | #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 396 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \ |
| 397 | void*,(_qzz_start), \ |
| 398 | unsigned long,(_qzz_len)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 399 | |
| 400 | |
philippe | f54cb66 | 2015-05-10 22:19:31 +0000 | [diff] [blame] | 401 | #define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len) \ |
| 402 | DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED, \ |
| 403 | void*,(_qzz_start), \ |
| 404 | unsigned long,(_qzz_len)) |
| 405 | |
| 406 | |
| 407 | /* Checks the accessibility bits for addresses [zza..zza+zznbytes-1]. |
| 408 | If zzabits array is provided, copy the accessibility bits in zzabits. |
| 409 | Return values: |
| 410 | -2 if not running on helgrind |
| 411 | -1 if any parts of zzabits is not addressable |
| 412 | >= 0 : success. |
| 413 | When success, it returns the nr of addressable bytes found. |
| 414 | So, to check that a whole range is addressable, check |
| 415 | VALGRIND_HG_GET_ABITS(addr,NULL,len) == len |
| 416 | In addition, if you want to examine the addressability of each |
| 417 | byte of the range, you need to provide a non NULL ptr as |
| 418 | second argument, pointing to an array of unsigned char |
| 419 | of length len. |
| 420 | Addressable bytes are indicated with 0xff. |
| 421 | Non-addressable bytes are indicated with 0x00. |
| 422 | */ |
| 423 | #define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes) \ |
| 424 | (__extension__ \ |
| 425 | ({long int _res; \ |
| 426 | DO_CREQ_W_WWW(_res, (-2)/*default*/, \ |
| 427 | _VG_USERREQ__HG_GET_ABITS, \ |
| 428 | void*,(zza), void*,(zzabits), \ |
| 429 | unsigned long,(zznbytes)); \ |
| 430 | _res; \ |
| 431 | })) |
| 432 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 433 | /*----------------------------------------------------------------*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 434 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 435 | /*--- ThreadSanitizer-compatible requests ---*/ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 436 | /*--- (mostly unimplemented) ---*/ |
| 437 | /*--- ---*/ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 438 | /*----------------------------------------------------------------*/ |
| 439 | |
| 440 | /* A quite-broad set of annotations, as used in the ThreadSanitizer |
| 441 | project. This implementation aims to be a (source-level) |
| 442 | compatible implementation of the macros defined in: |
| 443 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 444 | http://code.google.com/p/data-race-test/source |
| 445 | /browse/trunk/dynamic_annotations/dynamic_annotations.h |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 446 | |
| 447 | (some of the comments below are taken from the above file) |
| 448 | |
| 449 | The implementation here is very incomplete, and intended as a |
| 450 | starting point. Many of the macros are unimplemented. Rather than |
| 451 | allowing unimplemented macros to silently do nothing, they cause an |
| 452 | assertion. Intention is to implement them on demand. |
| 453 | |
| 454 | The major use of these macros is to make visible to race detectors, |
| 455 | the behaviour (effects) of user-implemented synchronisation |
| 456 | primitives, that the detectors could not otherwise deduce from the |
| 457 | normal observation of pthread etc calls. |
| 458 | |
| 459 | Some of the macros are no-ops in Helgrind. That's because Helgrind |
| 460 | is a pure happens-before detector, whereas ThreadSanitizer uses a |
| 461 | hybrid lockset and happens-before scheme, which requires more |
| 462 | accurate annotations for correct operation. |
| 463 | |
| 464 | The macros are listed in the same order as in dynamic_annotations.h |
| 465 | (URL just above). |
| 466 | |
| 467 | I should point out that I am less than clear about the intended |
| 468 | semantics of quite a number of them. Comments and clarifications |
| 469 | welcomed! |
| 470 | */ |
| 471 | |
| 472 | /* ---------------------------------------------------------------- |
| 473 | These four allow description of user-level condition variables, |
| 474 | apparently in the style of POSIX's pthread_cond_t. Currently |
| 475 | unimplemented and will assert. |
| 476 | ---------------------------------------------------------------- |
| 477 | */ |
| 478 | /* Report that wait on the condition variable at address CV has |
| 479 | succeeded and the lock at address LOCK is now held. CV and LOCK |
| 480 | are completely arbitrary memory addresses which presumably mean |
| 481 | something to the application, but are meaningless to Helgrind. */ |
| 482 | #define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \ |
| 483 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT") |
| 484 | |
| 485 | /* Report that wait on the condition variable at CV has succeeded. |
| 486 | Variant w/o lock. */ |
| 487 | #define ANNOTATE_CONDVAR_WAIT(cv) \ |
| 488 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT") |
| 489 | |
| 490 | /* Report that we are about to signal on the condition variable at |
| 491 | address CV. */ |
| 492 | #define ANNOTATE_CONDVAR_SIGNAL(cv) \ |
| 493 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL") |
| 494 | |
| 495 | /* Report that we are about to signal_all on the condition variable at |
| 496 | CV. */ |
| 497 | #define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \ |
| 498 | _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL") |
| 499 | |
| 500 | |
| 501 | /* ---------------------------------------------------------------- |
| 502 | Create completely arbitrary happens-before edges between threads. |
sewardj | 8c50d3c | 2011-03-11 18:38:12 +0000 | [diff] [blame] | 503 | |
| 504 | If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later |
| 505 | (w.r.t. some notional global clock for the computation) thread Tm |
| 506 | does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all |
| 507 | memory accesses done by T1 .. Tn before the ..BEFORE.. call as |
| 508 | happening-before all memory accesses done by Tm after the |
| 509 | ..AFTER.. call. Hence Helgrind won't complain about races if Tm's |
| 510 | accesses afterwards are to the same locations as accesses before by |
| 511 | any of T1 .. Tn. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 512 | |
| 513 | OBJ is a machine word (unsigned long, or void*), is completely |
| 514 | arbitrary, and denotes the identity of some synchronisation object |
| 515 | you're modelling. |
| 516 | |
| 517 | You must do the _BEFORE call just before the real sync event on the |
| 518 | signaller's side, and _AFTER just after the real sync event on the |
| 519 | waiter's side. |
| 520 | |
| 521 | If none of the rest of these macros make sense to you, at least |
| 522 | take the time to understand these two. They form the very essence |
| 523 | of describing arbitrary inter-thread synchronisation events to |
| 524 | Helgrind. You can get a long way just with them alone. |
sewardj | 8c50d3c | 2011-03-11 18:38:12 +0000 | [diff] [blame] | 525 | |
| 526 | See also, extensive discussion on semantics of this in |
| 527 | https://bugs.kde.org/show_bug.cgi?id=243935 |
sewardj | 6015d0e | 2011-03-11 19:10:48 +0000 | [diff] [blame] | 528 | |
| 529 | ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time |
| 530 | as bug 243935 is fully resolved. It instructs Helgrind to forget |
| 531 | about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in |
| 532 | effect putting it back in its original state. Once in that state, |
| 533 | a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling |
| 534 | thread. |
| 535 | |
| 536 | An implementation may optionally release resources it has |
| 537 | associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) |
| 538 | happens. Users are recommended to use |
| 539 | ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a |
| 540 | synchronisation object is no longer needed, so as to avoid |
| 541 | potential indefinite resource leaks. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 542 | ---------------------------------------------------------------- |
| 543 | */ |
| 544 | #define ANNOTATE_HAPPENS_BEFORE(obj) \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 545 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 546 | |
| 547 | #define ANNOTATE_HAPPENS_AFTER(obj) \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 548 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 549 | |
sewardj | 6015d0e | 2011-03-11 19:10:48 +0000 | [diff] [blame] | 550 | #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \ |
| 551 | DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 552 | |
| 553 | /* ---------------------------------------------------------------- |
| 554 | Memory publishing. The TSan sources say: |
| 555 | |
| 556 | Report that the bytes in the range [pointer, pointer+size) are about |
| 557 | to be published safely. The race checker will create a happens-before |
| 558 | arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to |
| 559 | subsequent accesses to this memory. |
| 560 | |
| 561 | I'm not sure I understand what this means exactly, nor whether it |
| 562 | is relevant for a pure h-b detector. Leaving unimplemented for |
| 563 | now. |
| 564 | ---------------------------------------------------------------- |
| 565 | */ |
| 566 | #define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \ |
| 567 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE") |
| 568 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 569 | /* DEPRECATED. Don't use it. */ |
| 570 | /* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */ |
| 571 | |
| 572 | /* DEPRECATED. Don't use it. */ |
| 573 | /* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */ |
| 574 | |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 575 | |
| 576 | /* ---------------------------------------------------------------- |
| 577 | TSan sources say: |
| 578 | |
| 579 | Instruct the tool to create a happens-before arc between |
| 580 | MU->Unlock() and MU->Lock(). This annotation may slow down the |
| 581 | race detector; normally it is used only when it would be |
| 582 | difficult to annotate each of the mutex's critical sections |
| 583 | individually using the annotations above. |
| 584 | |
| 585 | If MU is a posix pthread_mutex_t then Helgrind will do this anyway. |
| 586 | In any case, leave as unimp for now. I'm unsure about the intended |
| 587 | behaviour. |
| 588 | ---------------------------------------------------------------- |
| 589 | */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 590 | #define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \ |
| 591 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX") |
| 592 | |
| 593 | /* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */ |
| 594 | /* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 595 | |
| 596 | |
| 597 | /* ---------------------------------------------------------------- |
| 598 | TSan sources say: |
| 599 | |
| 600 | Annotations useful when defining memory allocators, or when |
| 601 | memory that was protected in one way starts to be protected in |
| 602 | another. |
| 603 | |
| 604 | Report that a new memory at "address" of size "size" has been |
| 605 | allocated. This might be used when the memory has been retrieved |
| 606 | from a free list and is about to be reused, or when a the locking |
| 607 | discipline for a variable changes. |
| 608 | |
| 609 | AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY. |
| 610 | ---------------------------------------------------------------- |
| 611 | */ |
| 612 | #define ANNOTATE_NEW_MEMORY(address, size) \ |
| 613 | VALGRIND_HG_CLEAN_MEMORY((address), (size)) |
| 614 | |
| 615 | |
| 616 | /* ---------------------------------------------------------------- |
| 617 | TSan sources say: |
| 618 | |
| 619 | Annotations useful when defining FIFO queues that transfer data |
| 620 | between threads. |
| 621 | |
| 622 | All unimplemented. Am not claiming to understand this (yet). |
| 623 | ---------------------------------------------------------------- |
| 624 | */ |
| 625 | |
| 626 | /* Report that the producer-consumer queue object at address PCQ has |
| 627 | been created. The ANNOTATE_PCQ_* annotations should be used only |
| 628 | for FIFO queues. For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE |
| 629 | (for put) and ANNOTATE_HAPPENS_AFTER (for get). */ |
| 630 | #define ANNOTATE_PCQ_CREATE(pcq) \ |
| 631 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE") |
| 632 | |
| 633 | /* Report that the queue at address PCQ is about to be destroyed. */ |
| 634 | #define ANNOTATE_PCQ_DESTROY(pcq) \ |
| 635 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY") |
| 636 | |
| 637 | /* Report that we are about to put an element into a FIFO queue at |
| 638 | address PCQ. */ |
| 639 | #define ANNOTATE_PCQ_PUT(pcq) \ |
| 640 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT") |
| 641 | |
| 642 | /* Report that we've just got an element from a FIFO queue at address |
| 643 | PCQ. */ |
| 644 | #define ANNOTATE_PCQ_GET(pcq) \ |
| 645 | _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET") |
| 646 | |
| 647 | |
| 648 | /* ---------------------------------------------------------------- |
| 649 | Annotations that suppress errors. It is usually better to express |
| 650 | the program's synchronization using the other annotations, but |
| 651 | these can be used when all else fails. |
| 652 | |
| 653 | Currently these are all unimplemented. I can't think of a simple |
| 654 | way to implement them without at least some performance overhead. |
| 655 | ---------------------------------------------------------------- |
| 656 | */ |
| 657 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 658 | /* Report that we may have a benign race at "pointer", with size |
| 659 | "sizeof(*(pointer))". "pointer" must be a non-void* pointer. Insert at the |
| 660 | point where "pointer" has been allocated, preferably close to the point |
| 661 | where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 662 | |
| 663 | XXX: what's this actually supposed to do? And what's the type of |
| 664 | DESCRIPTION? When does the annotation stop having an effect? |
| 665 | */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 666 | #define ANNOTATE_BENIGN_RACE(pointer, description) \ |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 667 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE") |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 668 | |
| 669 | /* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to |
| 670 | the memory range [address, address+size). */ |
| 671 | #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ |
bart | 40d7c01 | 2012-03-08 18:58:41 +0000 | [diff] [blame] | 672 | VALGRIND_HG_DISABLE_CHECKING(address, size) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 673 | |
| 674 | /* Request the analysis tool to ignore all reads in the current thread |
| 675 | until ANNOTATE_IGNORE_READS_END is called. Useful to ignore |
| 676 | intentional racey reads, while still checking other reads and all |
| 677 | writes. */ |
| 678 | #define ANNOTATE_IGNORE_READS_BEGIN() \ |
| 679 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN") |
| 680 | |
| 681 | /* Stop ignoring reads. */ |
| 682 | #define ANNOTATE_IGNORE_READS_END() \ |
| 683 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END") |
| 684 | |
| 685 | /* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */ |
| 686 | #define ANNOTATE_IGNORE_WRITES_BEGIN() \ |
| 687 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN") |
| 688 | |
| 689 | /* Stop ignoring writes. */ |
| 690 | #define ANNOTATE_IGNORE_WRITES_END() \ |
| 691 | _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END") |
| 692 | |
| 693 | /* Start ignoring all memory accesses (reads and writes). */ |
| 694 | #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ |
| 695 | do { \ |
| 696 | ANNOTATE_IGNORE_READS_BEGIN(); \ |
| 697 | ANNOTATE_IGNORE_WRITES_BEGIN(); \ |
| 698 | } while (0) |
| 699 | |
| 700 | /* Stop ignoring all memory accesses. */ |
| 701 | #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ |
| 702 | do { \ |
| 703 | ANNOTATE_IGNORE_WRITES_END(); \ |
| 704 | ANNOTATE_IGNORE_READS_END(); \ |
| 705 | } while (0) |
| 706 | |
| 707 | |
| 708 | /* ---------------------------------------------------------------- |
| 709 | Annotations useful for debugging. |
| 710 | |
| 711 | Again, so for unimplemented, partly for performance reasons. |
| 712 | ---------------------------------------------------------------- |
| 713 | */ |
| 714 | |
| 715 | /* Request to trace every access to ADDRESS. */ |
| 716 | #define ANNOTATE_TRACE_MEMORY(address) \ |
| 717 | _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY") |
| 718 | |
| 719 | /* Report the current thread name to a race detector. */ |
| 720 | #define ANNOTATE_THREAD_NAME(name) \ |
| 721 | _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME") |
| 722 | |
| 723 | |
| 724 | /* ---------------------------------------------------------------- |
| 725 | Annotations for describing behaviour of user-implemented lock |
| 726 | primitives. In all cases, the LOCK argument is a completely |
| 727 | arbitrary machine word (unsigned long, or void*) and can be any |
| 728 | value which gives a unique identity to the lock objects being |
| 729 | modelled. |
| 730 | |
| 731 | We just pretend they're ordinary posix rwlocks. That'll probably |
| 732 | give some rather confusing wording in error messages, claiming that |
| 733 | the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact |
| 734 | they are not. Ah well. |
| 735 | ---------------------------------------------------------------- |
| 736 | */ |
| 737 | /* Report that a lock has just been created at address LOCK. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 738 | #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 739 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST, \ |
| 740 | void*,(lock)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 741 | |
| 742 | /* Report that the lock at address LOCK is about to be destroyed. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 743 | #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 744 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, \ |
| 745 | void*,(lock)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 746 | |
| 747 | /* Report that the lock at address LOCK has just been acquired. |
| 748 | is_w=1 for writer lock, is_w=0 for reader lock. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 749 | #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 750 | DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED, \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 751 | void*,(lock), unsigned long,(is_w)) |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 752 | |
| 753 | /* Report that the lock at address LOCK is about to be released. */ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 754 | #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
sewardj | 8eb8bab | 2015-07-21 14:44:28 +0000 | [diff] [blame^] | 755 | DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED, \ |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 756 | void*,(lock)) /* is_w is ignored */ |
| 757 | |
| 758 | |
| 759 | /* ------------------------------------------------------------- |
| 760 | Annotations useful when implementing barriers. They are not |
| 761 | normally needed by modules that merely use barriers. |
| 762 | The "barrier" argument is a pointer to the barrier object. |
| 763 | ---------------------------------------------------------------- |
| 764 | */ |
| 765 | |
| 766 | /* Report that the "barrier" has been initialized with initial |
| 767 | "count". If 'reinitialization_allowed' is true, initialization is |
| 768 | allowed to happen multiple times w/o calling barrier_destroy() */ |
| 769 | #define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \ |
| 770 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT") |
| 771 | |
| 772 | /* Report that we are about to enter barrier_wait("barrier"). */ |
| 773 | #define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \ |
| 774 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
| 775 | |
| 776 | /* Report that we just exited barrier_wait("barrier"). */ |
| 777 | #define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \ |
| 778 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
| 779 | |
| 780 | /* Report that the "barrier" has been destroyed. */ |
| 781 | #define ANNOTATE_BARRIER_DESTROY(barrier) \ |
| 782 | _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 783 | |
| 784 | |
| 785 | /* ---------------------------------------------------------------- |
| 786 | Annotations useful for testing race detectors. |
| 787 | ---------------------------------------------------------------- |
| 788 | */ |
| 789 | |
| 790 | /* Report that we expect a race on the variable at ADDRESS. Use only |
| 791 | in unit tests for a race detector. */ |
| 792 | #define ANNOTATE_EXPECT_RACE(address, description) \ |
| 793 | _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE") |
| 794 | |
| 795 | /* A no-op. Insert where you like to test the interceptors. */ |
| 796 | #define ANNOTATE_NO_OP(arg) \ |
| 797 | _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP") |
| 798 | |
sewardj | 406bac8 | 2010-03-03 23:03:40 +0000 | [diff] [blame] | 799 | /* Force the race detector to flush its state. The actual effect depends on |
| 800 | * the implementation of the detector. */ |
| 801 | #define ANNOTATE_FLUSH_STATE() \ |
| 802 | _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE") |
sewardj | ed2e72e | 2009-08-14 11:08:24 +0000 | [diff] [blame] | 803 | |
sewardj | b411202 | 2007-11-09 22:49:28 +0000 | [diff] [blame] | 804 | #endif /* __HELGRIND_H */ |