Andrey Churbanov | e5f4492 | 2015-04-29 16:22:07 +0000 | [diff] [blame] | 1 | //****************************************************************************** |
| 2 | // include files |
| 3 | //****************************************************************************** |
| 4 | |
| 5 | #include "kmp.h" |
| 6 | #include "ompt-internal.h" |
| 7 | #include "ompt-specific.h" |
| 8 | |
| 9 | //****************************************************************************** |
| 10 | // macros |
| 11 | //****************************************************************************** |
| 12 | |
| 13 | #define GTID_TO_OMPT_THREAD_ID(id) ((ompt_thread_id_t) (id >=0) ? id + 1: 0) |
| 14 | |
| 15 | #define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info; |
| 16 | |
| 17 | #define OMPT_THREAD_ID_BITS 16 |
| 18 | |
| 19 | // 2013 08 24 - John Mellor-Crummey |
| 20 | // ideally, a thread should assign its own ids based on thread private data. |
| 21 | // however, the way the intel runtime reinitializes thread data structures |
| 22 | // when it creates teams makes it difficult to maintain persistent thread |
| 23 | // data. using a shared variable instead is simple. I leave it to intel to |
| 24 | // sort out how to implement a higher performance version in their runtime. |
| 25 | |
| 26 | // when using fetch_and_add to generate the IDs, there isn't any reason to waste |
| 27 | // bits for thread id. |
| 28 | #if 0 |
| 29 | #define NEXT_ID(id_ptr,tid) \ |
| 30 | ((KMP_TEST_THEN_INC64(id_ptr) << OMPT_THREAD_ID_BITS) | (tid)) |
| 31 | #else |
Jonathan Peyton | 69e596a | 2015-10-29 20:56:24 +0000 | [diff] [blame] | 32 | #define NEXT_ID(id_ptr,tid) (KMP_TEST_THEN_INC64((volatile kmp_int64 *)id_ptr)) |
Andrey Churbanov | e5f4492 | 2015-04-29 16:22:07 +0000 | [diff] [blame] | 33 | #endif |
| 34 | |
| 35 | //****************************************************************************** |
| 36 | // private operations |
| 37 | //****************************************************************************** |
| 38 | |
| 39 | //---------------------------------------------------------- |
| 40 | // traverse the team and task hierarchy |
| 41 | // note: __ompt_get_teaminfo and __ompt_get_taskinfo |
| 42 | // traverse the hierarchy similarly and need to be |
| 43 | // kept consistent |
| 44 | //---------------------------------------------------------- |
| 45 | |
| 46 | ompt_team_info_t * |
| 47 | __ompt_get_teaminfo(int depth, int *size) |
| 48 | { |
| 49 | kmp_info_t *thr = ompt_get_thread(); |
| 50 | |
| 51 | if (thr) { |
| 52 | kmp_team *team = thr->th.th_team; |
Jonathan Peyton | f0344bb | 2015-10-09 17:42:52 +0000 | [diff] [blame] | 53 | if (team == NULL) return NULL; |
| 54 | |
Andrey Churbanov | e5f4492 | 2015-04-29 16:22:07 +0000 | [diff] [blame] | 55 | ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(team); |
| 56 | |
| 57 | while(depth > 0) { |
| 58 | // next lightweight team (if any) |
| 59 | if (lwt) lwt = lwt->parent; |
| 60 | |
| 61 | // next heavyweight team (if any) after |
| 62 | // lightweight teams are exhausted |
| 63 | if (!lwt && team) team=team->t.t_parent; |
| 64 | |
| 65 | depth--; |
| 66 | } |
| 67 | |
| 68 | if (lwt) { |
| 69 | // lightweight teams have one task |
| 70 | if (size) *size = 1; |
| 71 | |
| 72 | // return team info for lightweight team |
| 73 | return &lwt->ompt_team_info; |
| 74 | } else if (team) { |
| 75 | // extract size from heavyweight team |
| 76 | if (size) *size = team->t.t_nproc; |
| 77 | |
| 78 | // return team info for heavyweight team |
| 79 | return &team->t.ompt_team_info; |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | return NULL; |
| 84 | } |
| 85 | |
| 86 | |
| 87 | ompt_task_info_t * |
| 88 | __ompt_get_taskinfo(int depth) |
| 89 | { |
| 90 | ompt_task_info_t *info = NULL; |
| 91 | kmp_info_t *thr = ompt_get_thread(); |
| 92 | |
| 93 | if (thr) { |
| 94 | kmp_taskdata_t *taskdata = thr->th.th_current_task; |
| 95 | ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(taskdata->td_team); |
| 96 | |
| 97 | while (depth > 0) { |
| 98 | // next lightweight team (if any) |
| 99 | if (lwt) lwt = lwt->parent; |
| 100 | |
| 101 | // next heavyweight team (if any) after |
| 102 | // lightweight teams are exhausted |
| 103 | if (!lwt && taskdata) { |
| 104 | taskdata = taskdata->td_parent; |
| 105 | if (taskdata) { |
| 106 | lwt = LWT_FROM_TEAM(taskdata->td_team); |
| 107 | } |
| 108 | } |
| 109 | depth--; |
| 110 | } |
| 111 | |
| 112 | if (lwt) { |
| 113 | info = &lwt->ompt_task_info; |
| 114 | } else if (taskdata) { |
| 115 | info = &taskdata->ompt_task_info; |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | return info; |
| 120 | } |
| 121 | |
| 122 | |
| 123 | |
| 124 | //****************************************************************************** |
| 125 | // interface operations |
| 126 | //****************************************************************************** |
| 127 | |
| 128 | //---------------------------------------------------------- |
Andrey Churbanov | e5f4492 | 2015-04-29 16:22:07 +0000 | [diff] [blame] | 129 | // thread support |
| 130 | //---------------------------------------------------------- |
| 131 | |
| 132 | ompt_parallel_id_t |
| 133 | __ompt_thread_id_new() |
| 134 | { |
| 135 | static uint64_t ompt_thread_id = 1; |
| 136 | return NEXT_ID(&ompt_thread_id, 0); |
| 137 | } |
| 138 | |
| 139 | void |
| 140 | __ompt_thread_begin(ompt_thread_type_t thread_type, int gtid) |
| 141 | { |
| 142 | ompt_callbacks.ompt_callback(ompt_event_thread_begin)( |
| 143 | thread_type, GTID_TO_OMPT_THREAD_ID(gtid)); |
| 144 | } |
| 145 | |
| 146 | |
| 147 | void |
| 148 | __ompt_thread_end(ompt_thread_type_t thread_type, int gtid) |
| 149 | { |
| 150 | ompt_callbacks.ompt_callback(ompt_event_thread_end)( |
| 151 | thread_type, GTID_TO_OMPT_THREAD_ID(gtid)); |
| 152 | } |
| 153 | |
| 154 | |
| 155 | ompt_thread_id_t |
| 156 | __ompt_get_thread_id_internal() |
| 157 | { |
| 158 | // FIXME |
| 159 | // until we have a better way of assigning ids, use __kmp_get_gtid |
| 160 | // since the return value might be negative, we need to test that before |
| 161 | // assigning it to an ompt_thread_id_t, which is unsigned. |
| 162 | int id = __kmp_get_gtid(); |
| 163 | assert(id >= 0); |
| 164 | |
| 165 | return GTID_TO_OMPT_THREAD_ID(id); |
| 166 | } |
| 167 | |
| 168 | //---------------------------------------------------------- |
| 169 | // state support |
| 170 | //---------------------------------------------------------- |
| 171 | |
| 172 | void |
| 173 | __ompt_thread_assign_wait_id(void *variable) |
| 174 | { |
| 175 | int gtid = __kmp_gtid_get_specific(); |
| 176 | kmp_info_t *ti = ompt_get_thread_gtid(gtid); |
| 177 | |
| 178 | ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t) variable; |
| 179 | } |
| 180 | |
| 181 | ompt_state_t |
| 182 | __ompt_get_state_internal(ompt_wait_id_t *ompt_wait_id) |
| 183 | { |
| 184 | kmp_info_t *ti = ompt_get_thread(); |
| 185 | |
| 186 | if (ti) { |
| 187 | if (ompt_wait_id) |
| 188 | *ompt_wait_id = ti->th.ompt_thread_info.wait_id; |
| 189 | return ti->th.ompt_thread_info.state; |
| 190 | } |
| 191 | return ompt_state_undefined; |
| 192 | } |
| 193 | |
| 194 | //---------------------------------------------------------- |
| 195 | // idle frame support |
| 196 | //---------------------------------------------------------- |
| 197 | |
| 198 | void * |
| 199 | __ompt_get_idle_frame_internal(void) |
| 200 | { |
| 201 | kmp_info_t *ti = ompt_get_thread(); |
| 202 | return ti ? ti->th.ompt_thread_info.idle_frame : NULL; |
| 203 | } |
| 204 | |
| 205 | |
| 206 | //---------------------------------------------------------- |
| 207 | // parallel region support |
| 208 | //---------------------------------------------------------- |
| 209 | |
| 210 | ompt_parallel_id_t |
| 211 | __ompt_parallel_id_new(int gtid) |
| 212 | { |
| 213 | static uint64_t ompt_parallel_id = 1; |
| 214 | return gtid >= 0 ? NEXT_ID(&ompt_parallel_id, gtid) : 0; |
| 215 | } |
| 216 | |
| 217 | |
| 218 | void * |
| 219 | __ompt_get_parallel_function_internal(int depth) |
| 220 | { |
| 221 | ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL); |
| 222 | void *function = info ? info->microtask : NULL; |
| 223 | return function; |
| 224 | } |
| 225 | |
| 226 | |
| 227 | ompt_parallel_id_t |
| 228 | __ompt_get_parallel_id_internal(int depth) |
| 229 | { |
| 230 | ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL); |
| 231 | ompt_parallel_id_t id = info ? info->parallel_id : 0; |
| 232 | return id; |
| 233 | } |
| 234 | |
| 235 | |
| 236 | int |
| 237 | __ompt_get_parallel_team_size_internal(int depth) |
| 238 | { |
| 239 | // initialize the return value with the error value. |
| 240 | // if there is a team at the specified depth, the default |
| 241 | // value will be overwritten the size of that team. |
| 242 | int size = -1; |
| 243 | (void) __ompt_get_teaminfo(depth, &size); |
| 244 | return size; |
| 245 | } |
| 246 | |
| 247 | |
| 248 | //---------------------------------------------------------- |
| 249 | // lightweight task team support |
| 250 | //---------------------------------------------------------- |
| 251 | |
| 252 | void |
| 253 | __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, |
| 254 | int gtid, void *microtask, |
| 255 | ompt_parallel_id_t ompt_pid) |
| 256 | { |
| 257 | lwt->ompt_team_info.parallel_id = ompt_pid; |
| 258 | lwt->ompt_team_info.microtask = microtask; |
| 259 | lwt->ompt_task_info.task_id = 0; |
| 260 | lwt->ompt_task_info.frame.reenter_runtime_frame = 0; |
| 261 | lwt->ompt_task_info.frame.exit_runtime_frame = 0; |
| 262 | lwt->ompt_task_info.function = NULL; |
| 263 | lwt->parent = 0; |
| 264 | } |
| 265 | |
| 266 | |
| 267 | void |
| 268 | __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr) |
| 269 | { |
| 270 | ompt_lw_taskteam_t *my_parent = thr->th.th_team->t.ompt_serialized_team_info; |
| 271 | lwt->parent = my_parent; |
| 272 | thr->th.th_team->t.ompt_serialized_team_info = lwt; |
| 273 | } |
| 274 | |
| 275 | |
| 276 | ompt_lw_taskteam_t * |
| 277 | __ompt_lw_taskteam_unlink(kmp_info_t *thr) |
| 278 | { |
| 279 | ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info; |
| 280 | if (lwtask) thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent; |
| 281 | return lwtask; |
| 282 | } |
| 283 | |
| 284 | |
| 285 | //---------------------------------------------------------- |
| 286 | // task support |
| 287 | //---------------------------------------------------------- |
| 288 | |
| 289 | ompt_task_id_t |
| 290 | __ompt_task_id_new(int gtid) |
| 291 | { |
| 292 | static uint64_t ompt_task_id = 1; |
| 293 | return NEXT_ID(&ompt_task_id, gtid); |
| 294 | } |
| 295 | |
| 296 | |
| 297 | ompt_task_id_t |
| 298 | __ompt_get_task_id_internal(int depth) |
| 299 | { |
| 300 | ompt_task_info_t *info = __ompt_get_taskinfo(depth); |
| 301 | ompt_task_id_t task_id = info ? info->task_id : 0; |
| 302 | return task_id; |
| 303 | } |
| 304 | |
| 305 | |
| 306 | void * |
| 307 | __ompt_get_task_function_internal(int depth) |
| 308 | { |
| 309 | ompt_task_info_t *info = __ompt_get_taskinfo(depth); |
| 310 | void *function = info ? info->function : NULL; |
| 311 | return function; |
| 312 | } |
| 313 | |
| 314 | |
| 315 | ompt_frame_t * |
| 316 | __ompt_get_task_frame_internal(int depth) |
| 317 | { |
| 318 | ompt_task_info_t *info = __ompt_get_taskinfo(depth); |
| 319 | ompt_frame_t *frame = info ? frame = &info->frame : NULL; |
| 320 | return frame; |
| 321 | } |
| 322 | |
| 323 | |
| 324 | //---------------------------------------------------------- |
| 325 | // team support |
| 326 | //---------------------------------------------------------- |
| 327 | |
| 328 | void |
| 329 | __ompt_team_assign_id(kmp_team_t *team, ompt_parallel_id_t ompt_pid) |
| 330 | { |
| 331 | team->t.ompt_team_info.parallel_id = ompt_pid; |
| 332 | } |