blob: 529dc48bfd936fef51dc60469f984a615eaac3e5 [file] [log] [blame]
Jonathan Peytonff800772015-05-26 16:30:41 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_error.cpp -- KPTS functions for error checking at runtime
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000017#include "kmp_error.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000018#include "kmp_i18n.h"
19#include "kmp_str.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000020
21/* ------------------------------------------------------------------------ */
Jim Cownie5e8470a2013-09-27 10:38:44 +000022
Jonathan Peyton30419822017-05-12 18:01:32 +000023#define MIN_STACK 100
Jim Cownie5e8470a2013-09-27 10:38:44 +000024
Jonathan Peyton30419822017-05-12 18:01:32 +000025static char const *cons_text_c[] = {
26 "(none)", "\"parallel\"", "work-sharing", /* this is not called "for"
27 because of lowering of
28 "sections" pragmas */
29 "\"ordered\" work-sharing", /* this is not called "for ordered" because of
30 lowering of "sections" pragmas */
Jim Cownie5e8470a2013-09-27 10:38:44 +000031 "\"sections\"",
Jonathan Peyton30419822017-05-12 18:01:32 +000032 "work-sharing", /* this is not called "single" because of lowering of
33 "sections" pragmas */
34 "\"taskq\"", "\"taskq\"", "\"taskq ordered\"", "\"critical\"",
35 "\"ordered\"", /* in PARALLEL */
36 "\"ordered\"", /* in PDO */
37 "\"ordered\"", /* in TASKQ */
38 "\"master\"", "\"reduce\"", "\"barrier\""};
Jim Cownie5e8470a2013-09-27 10:38:44 +000039
Jonathan Peyton30419822017-05-12 18:01:32 +000040#define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
Jim Cownie5e8470a2013-09-27 10:38:44 +000041
Jonathan Peyton30419822017-05-12 18:01:32 +000042#define PUSH_MSG(ct, ident) \
43 "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
44#define POP_MSG(p) \
45 "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
46 get_src((p)->stack_data[tos].ident)
Jim Cownie5e8470a2013-09-27 10:38:44 +000047
Jonathan Peyton30419822017-05-12 18:01:32 +000048static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
Jim Cownie5e8470a2013-09-27 10:38:44 +000049
Jim Cownie5e8470a2013-09-27 10:38:44 +000050/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
Jim Cownie5e8470a2013-09-27 10:38:44 +000051
Jonathan Peyton30419822017-05-12 18:01:32 +000052static void __kmp_check_null_func(void) { /* nothing to do */
Jim Cownie5e8470a2013-09-27 10:38:44 +000053}
54
Jonathan Peyton30419822017-05-12 18:01:32 +000055static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
56 int i;
57 struct cons_data *d;
Jim Cownie5e8470a2013-09-27 10:38:44 +000058
Jonathan Peyton30419822017-05-12 18:01:32 +000059 /* TODO for monitor perhaps? */
60 if (gtid < 0)
61 __kmp_check_null_func();
Jim Cownie5e8470a2013-09-27 10:38:44 +000062
Jonathan Peyton30419822017-05-12 18:01:32 +000063 KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
Jim Cownie5e8470a2013-09-27 10:38:44 +000064
Jonathan Peyton30419822017-05-12 18:01:32 +000065 d = p->stack_data;
Jim Cownie5e8470a2013-09-27 10:38:44 +000066
Jonathan Peyton30419822017-05-12 18:01:32 +000067 p->stack_size = (p->stack_size * 2) + 100;
Jim Cownie5e8470a2013-09-27 10:38:44 +000068
Jonathan Peyton30419822017-05-12 18:01:32 +000069 /* TODO free the old data */
70 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
71 (p->stack_size + 1));
Jim Cownie5e8470a2013-09-27 10:38:44 +000072
Jonathan Peyton30419822017-05-12 18:01:32 +000073 for (i = p->stack_top; i >= 0; --i)
74 p->stack_data[i] = d[i];
Jim Cownie5e8470a2013-09-27 10:38:44 +000075
Jonathan Peyton30419822017-05-12 18:01:32 +000076 /* NOTE: we do not free the old stack_data */
Jim Cownie5e8470a2013-09-27 10:38:44 +000077}
78
79// NOTE: Function returns allocated memory, caller must free it!
Jonathan Peyton30419822017-05-12 18:01:32 +000080static char const *__kmp_pragma(int ct, ident_t const *ident) {
81 char const *cons = NULL; // Construct name.
82 char *file = NULL; // File name.
83 char *func = NULL; // Function (routine) name.
84 char *line = NULL; // Line number.
85 kmp_str_buf_t buffer;
86 kmp_msg_t prgm;
87 __kmp_str_buf_init(&buffer);
88 if (0 < ct && ct < cons_text_c_num) {
89 cons = cons_text_c[ct];
90 } else {
91 KMP_DEBUG_ASSERT(0);
92 };
93 if (ident != NULL && ident->psource != NULL) {
94 char *tail = NULL;
95 __kmp_str_buf_print(&buffer, "%s",
96 ident->psource); // Copy source to buffer.
97 // Split string in buffer to file, func, and line.
98 tail = buffer.str;
99 __kmp_str_split(tail, ';', NULL, &tail);
100 __kmp_str_split(tail, ';', &file, &tail);
101 __kmp_str_split(tail, ';', &func, &tail);
102 __kmp_str_split(tail, ';', &line, &tail);
103 }; // if
104 prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
105 __kmp_str_buf_free(&buffer);
106 return prgm.str;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000107} // __kmp_pragma
108
Jim Cownie5e8470a2013-09-27 10:38:44 +0000109/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110
Jonathan Peyton30419822017-05-12 18:01:32 +0000111void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
112 enum cons_type ct, // Construct type.
113 ident_t const *ident // Construct ident.
114 ) {
115 char const *construct = __kmp_pragma(ct, ident);
116 __kmp_msg(kmp_ms_fatal, __kmp_msg_format(id, construct), __kmp_msg_null);
117 KMP_INTERNAL_FREE((void *)construct);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118}
119
Jonathan Peyton30419822017-05-12 18:01:32 +0000120void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
121 enum cons_type ct, // First construct type.
122 ident_t const *ident, // First construct ident.
123 struct cons_data const *cons // Second construct.
124 ) {
125 char const *construct1 = __kmp_pragma(ct, ident);
126 char const *construct2 = __kmp_pragma(cons->type, cons->ident);
127 __kmp_msg(kmp_ms_fatal, __kmp_msg_format(id, construct1, construct2),
128 __kmp_msg_null);
129 KMP_INTERNAL_FREE((void *)construct1);
130 KMP_INTERNAL_FREE((void *)construct2);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000131}
132
Jonathan Peyton30419822017-05-12 18:01:32 +0000133struct cons_header *__kmp_allocate_cons_stack(int gtid) {
134 struct cons_header *p;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000135
Jonathan Peyton30419822017-05-12 18:01:32 +0000136 /* TODO for monitor perhaps? */
137 if (gtid < 0) {
138 __kmp_check_null_func();
139 }; // if
140 KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
141 p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
142 p->p_top = p->w_top = p->s_top = 0;
143 p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
144 (MIN_STACK + 1));
145 p->stack_size = MIN_STACK;
146 p->stack_top = 0;
147 p->stack_data[0].type = ct_none;
148 p->stack_data[0].prev = 0;
149 p->stack_data[0].ident = NULL;
150 return p;
151}
Jim Cownie5e8470a2013-09-27 10:38:44 +0000152
Jonathan Peyton30419822017-05-12 18:01:32 +0000153void __kmp_free_cons_stack(void *ptr) {
154 struct cons_header *p = (struct cons_header *)ptr;
155 if (p != NULL) {
156 if (p->stack_data != NULL) {
157 __kmp_free(p->stack_data);
158 p->stack_data = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000159 }; // if
Jonathan Peyton30419822017-05-12 18:01:32 +0000160 __kmp_free(p);
161 }; // if
Jim Cownie5e8470a2013-09-27 10:38:44 +0000162}
163
Jonathan Peyton2321d572015-06-08 19:25:25 +0000164#if KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000165static void dump_cons_stack(int gtid, struct cons_header *p) {
166 int i;
167 int tos = p->stack_top;
168 kmp_str_buf_t buffer;
169 __kmp_str_buf_init(&buffer);
170 __kmp_str_buf_print(
171 &buffer,
172 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
173 __kmp_str_buf_print(&buffer,
174 "Begin construct stack with %d items for thread %d\n",
175 tos, gtid);
176 __kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
177 p->p_top, p->w_top, p->s_top);
178 for (i = tos; i > 0; i--) {
179 struct cons_data *c = &(p->stack_data[i]);
180 __kmp_str_buf_print(
181 &buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i,
182 cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
183 }; // for i
184 __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
185 __kmp_str_buf_print(
186 &buffer,
187 "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
188 __kmp_debug_printf("%s", buffer.str);
189 __kmp_str_buf_free(&buffer);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000190}
Jonathan Peyton2321d572015-06-08 19:25:25 +0000191#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000192
Jonathan Peyton30419822017-05-12 18:01:32 +0000193void __kmp_push_parallel(int gtid, ident_t const *ident) {
194 int tos;
195 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000196
Jonathan Peyton30419822017-05-12 18:01:32 +0000197 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
198 KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
199 KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
200 if (p->stack_top >= p->stack_size) {
201 __kmp_expand_cons_stack(gtid, p);
202 }; // if
203 tos = ++p->stack_top;
204 p->stack_data[tos].type = ct_parallel;
205 p->stack_data[tos].prev = p->p_top;
206 p->stack_data[tos].ident = ident;
207 p->stack_data[tos].name = NULL;
208 p->p_top = tos;
209 KE_DUMP(1000, dump_cons_stack(gtid, p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000210}
211
Jonathan Peyton30419822017-05-12 18:01:32 +0000212void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
213 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000214
Jonathan Peyton30419822017-05-12 18:01:32 +0000215 KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
216 KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000217
Jonathan Peyton30419822017-05-12 18:01:32 +0000218 if (p->stack_top >= p->stack_size) {
219 __kmp_expand_cons_stack(gtid, p);
220 }; // if
221 if (p->w_top > p->p_top &&
222 !(IS_CONS_TYPE_TASKQ(p->stack_data[p->w_top].type) &&
223 IS_CONS_TYPE_TASKQ(ct))) {
224 // We are already in a WORKSHARE construct for this PARALLEL region.
225 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
226 &p->stack_data[p->w_top]);
227 }; // if
228 if (p->s_top > p->p_top) {
229 // We are already in a SYNC construct for this PARALLEL region.
230 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
231 &p->stack_data[p->s_top]);
232 }; // if
Jim Cownie5e8470a2013-09-27 10:38:44 +0000233}
234
Jonathan Peyton30419822017-05-12 18:01:32 +0000235void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
236 int tos;
237 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
238 KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
239 __kmp_check_workshare(gtid, ct, ident);
240 KE_TRACE(100, (PUSH_MSG(ct, ident)));
241 tos = ++p->stack_top;
242 p->stack_data[tos].type = ct;
243 p->stack_data[tos].prev = p->w_top;
244 p->stack_data[tos].ident = ident;
245 p->stack_data[tos].name = NULL;
246 p->w_top = tos;
247 KE_DUMP(1000, dump_cons_stack(gtid, p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000248}
249
250void
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000251#if KMP_USE_DYNAMIC_LOCK
252__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
253#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000254__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000255#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000256{
Jonathan Peyton30419822017-05-12 18:01:32 +0000257 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000258
Jonathan Peyton30419822017-05-12 18:01:32 +0000259 KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000260
Jonathan Peyton30419822017-05-12 18:01:32 +0000261 if (p->stack_top >= p->stack_size)
262 __kmp_expand_cons_stack(gtid, p);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000263
Jonathan Peyton30419822017-05-12 18:01:32 +0000264 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo ||
265 ct == ct_ordered_in_taskq) {
266 if (p->w_top <= p->p_top) {
267/* we are not in a worksharing construct */
268#ifdef BUILD_PARALLEL_ORDERED
269 /* do not report error messages for PARALLEL ORDERED */
270 KMP_ASSERT(ct == ct_ordered_in_parallel);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000271#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000272 __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
273#endif /* BUILD_PARALLEL_ORDERED */
274 } else {
275 /* inside a WORKSHARING construct for this PARALLEL region */
276 if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
277 if (p->stack_data[p->w_top].type == ct_taskq) {
278 __kmp_error_construct2(kmp_i18n_msg_CnsNotInTaskConstruct, ct, ident,
279 &p->stack_data[p->w_top]);
280 } else {
281 __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
282 &p->stack_data[p->w_top]);
283 }
284 }
285 }
286 if (p->s_top > p->p_top && p->s_top > p->w_top) {
287 /* inside a sync construct which is inside a worksharing construct */
288 int index = p->s_top;
289 enum cons_type stack_type;
290
291 stack_type = p->stack_data[index].type;
292
293 if (stack_type == ct_critical ||
294 ((stack_type == ct_ordered_in_parallel ||
295 stack_type == ct_ordered_in_pdo ||
296 stack_type ==
297 ct_ordered_in_taskq) && /* C doesn't allow named ordered;
298 ordered in ordered gets error */
299 p->stack_data[index].ident != NULL &&
300 (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
301 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
302 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
303 &p->stack_data[index]);
304 }
305 }
306 } else if (ct == ct_critical) {
307#if KMP_USE_DYNAMIC_LOCK
308 if (lck != NULL &&
309 __kmp_get_user_lock_owner(lck, seq) ==
310 gtid) { /* this thread already has lock for this critical section */
311#else
312 if (lck != NULL &&
313 __kmp_get_user_lock_owner(lck) ==
314 gtid) { /* this thread already has lock for this critical section */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000315#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000316 int index = p->s_top;
317 struct cons_data cons = {NULL, ct_critical, 0, NULL};
318 /* walk up construct stack and try to find critical with matching name */
319 while (index != 0 && p->stack_data[index].name != lck) {
320 index = p->stack_data[index].prev;
321 }
322 if (index != 0) {
323 /* found match on the stack (may not always because of interleaved
324 * critical for Fortran) */
325 cons = p->stack_data[index];
326 }
327 /* we are in CRITICAL which is inside a CRITICAL construct of same name */
328 __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
329 }
330 } else if (ct == ct_master || ct == ct_reduce) {
331 if (p->w_top > p->p_top) {
332 /* inside a WORKSHARING construct for this PARALLEL region */
333 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
334 &p->stack_data[p->w_top]);
335 }
336 if (ct == ct_reduce && p->s_top > p->p_top) {
337 /* inside a another SYNC construct for this PARALLEL region */
338 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
339 &p->stack_data[p->s_top]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000340 }; // if
Jonathan Peyton30419822017-05-12 18:01:32 +0000341 }; // if
Jim Cownie5e8470a2013-09-27 10:38:44 +0000342}
343
344void
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000345#if KMP_USE_DYNAMIC_LOCK
346__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
347#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000349#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350{
Jonathan Peyton30419822017-05-12 18:01:32 +0000351 int tos;
352 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000353
Jonathan Peyton30419822017-05-12 18:01:32 +0000354 KMP_ASSERT(gtid == __kmp_get_gtid());
355 KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000356#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +0000357 __kmp_check_sync(gtid, ct, ident, lck, seq);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000358#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000359 __kmp_check_sync(gtid, ct, ident, lck);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000360#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000361 KE_TRACE(100, (PUSH_MSG(ct, ident)));
362 tos = ++p->stack_top;
363 p->stack_data[tos].type = ct;
364 p->stack_data[tos].prev = p->s_top;
365 p->stack_data[tos].ident = ident;
366 p->stack_data[tos].name = lck;
367 p->s_top = tos;
368 KE_DUMP(1000, dump_cons_stack(gtid, p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000369}
370
371/* ------------------------------------------------------------------------ */
372
Jonathan Peyton30419822017-05-12 18:01:32 +0000373void __kmp_pop_parallel(int gtid, ident_t const *ident) {
374 int tos;
375 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
376 tos = p->stack_top;
377 KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
378 if (tos == 0 || p->p_top == 0) {
379 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
380 }
381 if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
382 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
383 &p->stack_data[tos]);
384 }
385 KE_TRACE(100, (POP_MSG(p)));
386 p->p_top = p->stack_data[tos].prev;
387 p->stack_data[tos].type = ct_none;
388 p->stack_data[tos].ident = NULL;
389 p->stack_top = tos - 1;
390 KE_DUMP(1000, dump_cons_stack(gtid, p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000391}
392
Jonathan Peyton30419822017-05-12 18:01:32 +0000393enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
394 ident_t const *ident) {
395 int tos;
396 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000397
Jonathan Peyton30419822017-05-12 18:01:32 +0000398 tos = p->stack_top;
399 KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
400 if (tos == 0 || p->w_top == 0) {
401 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
402 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000403
Jonathan Peyton30419822017-05-12 18:01:32 +0000404 if (tos != p->w_top ||
405 (p->stack_data[tos].type != ct &&
406 // below are two exceptions to the rule that construct types must match
407 !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo) &&
408 !(p->stack_data[tos].type == ct_task_ordered && ct == ct_task))) {
409 __kmp_check_null_func();
410 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
411 &p->stack_data[tos]);
412 }
413 KE_TRACE(100, (POP_MSG(p)));
414 p->w_top = p->stack_data[tos].prev;
415 p->stack_data[tos].type = ct_none;
416 p->stack_data[tos].ident = NULL;
417 p->stack_top = tos - 1;
418 KE_DUMP(1000, dump_cons_stack(gtid, p));
419 return p->stack_data[p->w_top].type;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000420}
421
Jonathan Peyton30419822017-05-12 18:01:32 +0000422void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
423 int tos;
424 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
425 tos = p->stack_top;
426 KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
427 if (tos == 0 || p->s_top == 0) {
428 __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
429 };
430 if (tos != p->s_top || p->stack_data[tos].type != ct) {
431 __kmp_check_null_func();
432 __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
433 &p->stack_data[tos]);
434 };
435 if (gtid < 0) {
436 __kmp_check_null_func();
437 };
438 KE_TRACE(100, (POP_MSG(p)));
439 p->s_top = p->stack_data[tos].prev;
440 p->stack_data[tos].type = ct_none;
441 p->stack_data[tos].ident = NULL;
442 p->stack_top = tos - 1;
443 KE_DUMP(1000, dump_cons_stack(gtid, p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000444}
445
446/* ------------------------------------------------------------------------ */
447
Jonathan Peyton30419822017-05-12 18:01:32 +0000448void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
449 struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
450 KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
451 __kmp_get_gtid()));
452 if (ident != 0) {
453 __kmp_check_null_func();
454 }
455 if (p->w_top > p->p_top) {
456 /* we are already in a WORKSHARING construct for this PARALLEL region */
457 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
458 &p->stack_data[p->w_top]);
459 }
460 if (p->s_top > p->p_top) {
461 /* we are already in a SYNC construct for this PARALLEL region */
462 __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
463 &p->stack_data[p->s_top]);
464 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000465}