blob: 9a962f4e0b6869ed1428bd9a5b868f585193ac6a [file] [log] [blame]
Andrey Churbanove5f44922015-04-29 16:22:07 +00001//******************************************************************************
2// include files
3//******************************************************************************
4
5#include "kmp.h"
6#include "ompt-internal.h"
7#include "ompt-specific.h"
8
9//******************************************************************************
10// macros
11//******************************************************************************
12
13#define GTID_TO_OMPT_THREAD_ID(id) ((ompt_thread_id_t) (id >=0) ? id + 1: 0)
14
15#define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info;
16
17#define OMPT_THREAD_ID_BITS 16
18
19// 2013 08 24 - John Mellor-Crummey
20// ideally, a thread should assign its own ids based on thread private data.
21// however, the way the intel runtime reinitializes thread data structures
22// when it creates teams makes it difficult to maintain persistent thread
23// data. using a shared variable instead is simple. I leave it to intel to
24// sort out how to implement a higher performance version in their runtime.
25
26// when using fetch_and_add to generate the IDs, there isn't any reason to waste
27// bits for thread id.
28#if 0
29#define NEXT_ID(id_ptr,tid) \
30 ((KMP_TEST_THEN_INC64(id_ptr) << OMPT_THREAD_ID_BITS) | (tid))
31#else
Jonathan Peyton69e596a2015-10-29 20:56:24 +000032#define NEXT_ID(id_ptr,tid) (KMP_TEST_THEN_INC64((volatile kmp_int64 *)id_ptr))
Andrey Churbanove5f44922015-04-29 16:22:07 +000033#endif
34
35//******************************************************************************
36// private operations
37//******************************************************************************
38
39//----------------------------------------------------------
40// traverse the team and task hierarchy
41// note: __ompt_get_teaminfo and __ompt_get_taskinfo
42// traverse the hierarchy similarly and need to be
43// kept consistent
44//----------------------------------------------------------
45
46ompt_team_info_t *
47__ompt_get_teaminfo(int depth, int *size)
48{
49 kmp_info_t *thr = ompt_get_thread();
50
51 if (thr) {
52 kmp_team *team = thr->th.th_team;
Jonathan Peytonf0344bb2015-10-09 17:42:52 +000053 if (team == NULL) return NULL;
54
Andrey Churbanove5f44922015-04-29 16:22:07 +000055 ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(team);
56
57 while(depth > 0) {
58 // next lightweight team (if any)
59 if (lwt) lwt = lwt->parent;
60
61 // next heavyweight team (if any) after
62 // lightweight teams are exhausted
Jonas Hahnfeld28ea24b2016-09-14 13:59:24 +000063 if (!lwt && team) {
64 team=team->t.t_parent;
65 if (team) {
66 lwt = LWT_FROM_TEAM(team);
67 }
68 }
Andrey Churbanove5f44922015-04-29 16:22:07 +000069
70 depth--;
71 }
72
73 if (lwt) {
74 // lightweight teams have one task
75 if (size) *size = 1;
76
77 // return team info for lightweight team
78 return &lwt->ompt_team_info;
79 } else if (team) {
80 // extract size from heavyweight team
81 if (size) *size = team->t.t_nproc;
82
83 // return team info for heavyweight team
84 return &team->t.ompt_team_info;
85 }
86 }
87
88 return NULL;
89}
90
91
92ompt_task_info_t *
93__ompt_get_taskinfo(int depth)
94{
95 ompt_task_info_t *info = NULL;
96 kmp_info_t *thr = ompt_get_thread();
97
98 if (thr) {
99 kmp_taskdata_t *taskdata = thr->th.th_current_task;
100 ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(taskdata->td_team);
101
102 while (depth > 0) {
103 // next lightweight team (if any)
104 if (lwt) lwt = lwt->parent;
105
106 // next heavyweight team (if any) after
107 // lightweight teams are exhausted
108 if (!lwt && taskdata) {
109 taskdata = taskdata->td_parent;
110 if (taskdata) {
111 lwt = LWT_FROM_TEAM(taskdata->td_team);
112 }
113 }
114 depth--;
115 }
116
117 if (lwt) {
118 info = &lwt->ompt_task_info;
119 } else if (taskdata) {
120 info = &taskdata->ompt_task_info;
121 }
122 }
123
124 return info;
125}
126
127
128
129//******************************************************************************
130// interface operations
131//******************************************************************************
132
133//----------------------------------------------------------
Andrey Churbanove5f44922015-04-29 16:22:07 +0000134// thread support
135//----------------------------------------------------------
136
137ompt_parallel_id_t
138__ompt_thread_id_new()
139{
140 static uint64_t ompt_thread_id = 1;
141 return NEXT_ID(&ompt_thread_id, 0);
142}
143
144void
145__ompt_thread_begin(ompt_thread_type_t thread_type, int gtid)
146{
147 ompt_callbacks.ompt_callback(ompt_event_thread_begin)(
148 thread_type, GTID_TO_OMPT_THREAD_ID(gtid));
149}
150
151
152void
153__ompt_thread_end(ompt_thread_type_t thread_type, int gtid)
154{
155 ompt_callbacks.ompt_callback(ompt_event_thread_end)(
156 thread_type, GTID_TO_OMPT_THREAD_ID(gtid));
157}
158
159
160ompt_thread_id_t
161__ompt_get_thread_id_internal()
162{
163 // FIXME
164 // until we have a better way of assigning ids, use __kmp_get_gtid
165 // since the return value might be negative, we need to test that before
166 // assigning it to an ompt_thread_id_t, which is unsigned.
167 int id = __kmp_get_gtid();
168 assert(id >= 0);
169
170 return GTID_TO_OMPT_THREAD_ID(id);
171}
172
173//----------------------------------------------------------
174// state support
175//----------------------------------------------------------
176
177void
178__ompt_thread_assign_wait_id(void *variable)
179{
180 int gtid = __kmp_gtid_get_specific();
181 kmp_info_t *ti = ompt_get_thread_gtid(gtid);
182
183 ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t) variable;
184}
185
186ompt_state_t
187__ompt_get_state_internal(ompt_wait_id_t *ompt_wait_id)
188{
189 kmp_info_t *ti = ompt_get_thread();
190
191 if (ti) {
192 if (ompt_wait_id)
193 *ompt_wait_id = ti->th.ompt_thread_info.wait_id;
194 return ti->th.ompt_thread_info.state;
195 }
196 return ompt_state_undefined;
197}
198
199//----------------------------------------------------------
200// idle frame support
201//----------------------------------------------------------
202
203void *
204__ompt_get_idle_frame_internal(void)
205{
206 kmp_info_t *ti = ompt_get_thread();
207 return ti ? ti->th.ompt_thread_info.idle_frame : NULL;
208}
209
210
211//----------------------------------------------------------
212// parallel region support
213//----------------------------------------------------------
214
215ompt_parallel_id_t
216__ompt_parallel_id_new(int gtid)
217{
218 static uint64_t ompt_parallel_id = 1;
219 return gtid >= 0 ? NEXT_ID(&ompt_parallel_id, gtid) : 0;
220}
221
222
223void *
224__ompt_get_parallel_function_internal(int depth)
225{
226 ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL);
227 void *function = info ? info->microtask : NULL;
228 return function;
229}
230
231
232ompt_parallel_id_t
233__ompt_get_parallel_id_internal(int depth)
234{
235 ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL);
236 ompt_parallel_id_t id = info ? info->parallel_id : 0;
237 return id;
238}
239
240
241int
242__ompt_get_parallel_team_size_internal(int depth)
243{
244 // initialize the return value with the error value.
245 // if there is a team at the specified depth, the default
246 // value will be overwritten the size of that team.
247 int size = -1;
248 (void) __ompt_get_teaminfo(depth, &size);
249 return size;
250}
251
252
253//----------------------------------------------------------
254// lightweight task team support
255//----------------------------------------------------------
256
257void
258__ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
259 int gtid, void *microtask,
260 ompt_parallel_id_t ompt_pid)
261{
262 lwt->ompt_team_info.parallel_id = ompt_pid;
263 lwt->ompt_team_info.microtask = microtask;
264 lwt->ompt_task_info.task_id = 0;
Jonas Hahnfeldfd0614d2016-09-14 13:59:13 +0000265 lwt->ompt_task_info.frame.reenter_runtime_frame = NULL;
266 lwt->ompt_task_info.frame.exit_runtime_frame = NULL;
Andrey Churbanove5f44922015-04-29 16:22:07 +0000267 lwt->ompt_task_info.function = NULL;
268 lwt->parent = 0;
269}
270
271
272void
273__ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr)
274{
275 ompt_lw_taskteam_t *my_parent = thr->th.th_team->t.ompt_serialized_team_info;
276 lwt->parent = my_parent;
277 thr->th.th_team->t.ompt_serialized_team_info = lwt;
278}
279
280
281ompt_lw_taskteam_t *
282__ompt_lw_taskteam_unlink(kmp_info_t *thr)
283{
284 ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
285 if (lwtask) thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
286 return lwtask;
287}
288
289
290//----------------------------------------------------------
291// task support
292//----------------------------------------------------------
293
294ompt_task_id_t
295__ompt_task_id_new(int gtid)
296{
297 static uint64_t ompt_task_id = 1;
298 return NEXT_ID(&ompt_task_id, gtid);
299}
300
301
302ompt_task_id_t
303__ompt_get_task_id_internal(int depth)
304{
305 ompt_task_info_t *info = __ompt_get_taskinfo(depth);
306 ompt_task_id_t task_id = info ? info->task_id : 0;
307 return task_id;
308}
309
310
311void *
312__ompt_get_task_function_internal(int depth)
313{
314 ompt_task_info_t *info = __ompt_get_taskinfo(depth);
315 void *function = info ? info->function : NULL;
316 return function;
317}
318
319
320ompt_frame_t *
321__ompt_get_task_frame_internal(int depth)
322{
323 ompt_task_info_t *info = __ompt_get_taskinfo(depth);
324 ompt_frame_t *frame = info ? frame = &info->frame : NULL;
325 return frame;
326}
327
328
329//----------------------------------------------------------
330// team support
331//----------------------------------------------------------
332
333void
334__ompt_team_assign_id(kmp_team_t *team, ompt_parallel_id_t ompt_pid)
335{
336 team->t.ompt_team_info.parallel_id = ompt_pid;
337}