blob: 22caf27bed88a205dd6e08f4e5614e1364232eed [file] [log] [blame]
Jonathan Peytonff800772015-05-26 16:30:41 +00001/*
Jim Cownie5e8470a2013-09-27 10:38:44 +00002 * kmp_error.c -- KPTS functions for error checking at runtime
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_i18n.h"
18#include "kmp_str.h"
19#include "kmp_error.h"
20
21/* ------------------------------------------------------------------------ */
22/* ------------------------------------------------------------------------ */
23
24#define MIN_STACK 100
25
26static char const * cons_text_fort[] = {
27 "(none)",
28 "PARALLEL",
29 "work-sharing", /* this is not called DO because of lowering of SECTIONS and WORKSHARE directives */
30 "ORDERED work-sharing", /* this is not called DO ORDERED because of lowering of SECTIONS directives */
31 "SECTIONS",
32 "work-sharing", /* this is not called SINGLE because of lowering of SECTIONS and WORKSHARE directives */
33 "TASKQ",
34 "TASKQ",
35 "TASKQ ORDERED",
36 "CRITICAL",
37 "ORDERED", /* in PARALLEL */
38 "ORDERED", /* in PDO */
39 "ORDERED", /* in TASKQ */
40 "MASTER",
41 "REDUCE",
42 "BARRIER"
43};
44
45static char const * cons_text_c[] = {
46 "(none)",
47 "\"parallel\"",
48 "work-sharing", /* this is not called "for" because of lowering of "sections" pragmas */
49 "\"ordered\" work-sharing", /* this is not called "for ordered" because of lowering of "sections" pragmas */
50 "\"sections\"",
51 "work-sharing", /* this is not called "single" because of lowering of "sections" pragmas */
52 "\"taskq\"",
53 "\"taskq\"",
54 "\"taskq ordered\"",
55 "\"critical\"",
56 "\"ordered\"", /* in PARALLEL */
57 "\"ordered\"", /* in PDO */
58 "\"ordered\"", /* in TASKQ */
59 "\"master\"",
60 "\"reduce\"",
61 "\"barrier\""
62};
63
64#define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
65
66#define PUSH_MSG( ct, ident ) \
67 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
68#define POP_MSG( p ) \
69 "\tpopping off stack: %s (%s)\n", \
70 cons_text_c[ (p)->stack_data[ tos ].type ], \
71 get_src( (p)->stack_data[ tos ].ident )
72
73static int const cons_text_fort_num = sizeof( cons_text_fort ) / sizeof( char const * );
74static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * );
75
76/* ------------------------------------------------------------------------ */
77/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
78/* ------------------------------------------------------------------------ */
79
80static void
81__kmp_check_null_func( void )
82{
83 /* nothing to do */
84}
85
86static void
87__kmp_expand_cons_stack( int gtid, struct cons_header *p )
88{
89 int i;
90 struct cons_data *d;
91
92 /* TODO for monitor perhaps? */
93 if (gtid < 0)
94 __kmp_check_null_func();
95
96 KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
97
98 d = p->stack_data;
99
100 p->stack_size = (p->stack_size * 2) + 100;
101
102 /* TODO free the old data */
103 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) );
104
105 for (i = p->stack_top; i >= 0; --i)
106 p->stack_data[i] = d[i];
107
108 /* NOTE: we do not free the old stack_data */
109}
110
111// NOTE: Function returns allocated memory, caller must free it!
112static char const *
113__kmp_pragma(
Andrey Churbanovbebb5402015-03-03 16:19:57 +0000114 int ct,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000115 ident_t const * ident
116) {
117 char const * cons = NULL; // Construct name.
118 char * file = NULL; // File name.
119 char * func = NULL; // Function (routine) name.
120 char * line = NULL; // Line number.
121 kmp_str_buf_t buffer;
122 kmp_msg_t prgm;
123 __kmp_str_buf_init( & buffer );
Jonathan Peytonff800772015-05-26 16:30:41 +0000124 if ( 0 < ct && ct < cons_text_c_num ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000125 cons = cons_text_c[ ct ];
126 } else {
127 KMP_DEBUG_ASSERT( 0 );
128 };
129 if ( ident != NULL && ident->psource != NULL ) {
130 char * tail = NULL;
131 __kmp_str_buf_print( & buffer, "%s", ident->psource ); // Copy source to buffer.
132 // Split string in buffer to file, func, and line.
133 tail = buffer.str;
134 __kmp_str_split( tail, ';', NULL, & tail );
135 __kmp_str_split( tail, ';', & file, & tail );
136 __kmp_str_split( tail, ';', & func, & tail );
137 __kmp_str_split( tail, ';', & line, & tail );
138 }; // if
139 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
140 __kmp_str_buf_free( & buffer );
141 return prgm.str;
142} // __kmp_pragma
143
144/* ------------------------------------------------------------------------ */
145/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
146/* ------------------------------------------------------------------------ */
147
148
149void
150__kmp_error_construct(
151 kmp_i18n_id_t id, // Message identifier.
152 enum cons_type ct, // Construct type.
153 ident_t const * ident // Construct ident.
154) {
155 char const * construct = __kmp_pragma( ct, ident );
156 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null );
157 KMP_INTERNAL_FREE( (void *) construct );
158}
159
160void
161__kmp_error_construct2(
162 kmp_i18n_id_t id, // Message identifier.
163 enum cons_type ct, // First construct type.
164 ident_t const * ident, // First construct ident.
165 struct cons_data const * cons // Second construct.
166) {
167 char const * construct1 = __kmp_pragma( ct, ident );
168 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
169 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null );
170 KMP_INTERNAL_FREE( (void *) construct1 );
171 KMP_INTERNAL_FREE( (void *) construct2 );
172}
173
174
175struct cons_header *
176__kmp_allocate_cons_stack( int gtid )
177{
178 struct cons_header *p;
179
180 /* TODO for monitor perhaps? */
181 if ( gtid < 0 ) {
182 __kmp_check_null_func();
183 }; // if
184 KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) );
185 p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) );
186 p->p_top = p->w_top = p->s_top = 0;
187 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) );
188 p->stack_size = MIN_STACK;
189 p->stack_top = 0;
190 p->stack_data[ 0 ].type = ct_none;
191 p->stack_data[ 0 ].prev = 0;
192 p->stack_data[ 0 ].ident = NULL;
193 return p;
194}
195
196void
197__kmp_free_cons_stack( void * ptr ) {
198 struct cons_header * p = (struct cons_header *) ptr;
199 if ( p != NULL ) {
200 if ( p->stack_data != NULL ) {
201 __kmp_free( p->stack_data );
202 p->stack_data = NULL;
203 }; // if
204 __kmp_free( p );
205 }; // if
206}
207
208
Jonathan Peyton2321d572015-06-08 19:25:25 +0000209#if KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000210static void
211dump_cons_stack( int gtid, struct cons_header * p ) {
212 int i;
213 int tos = p->stack_top;
214 kmp_str_buf_t buffer;
215 __kmp_str_buf_init( & buffer );
216 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
217 __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid );
218 __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
219 for ( i = tos; i > 0; i-- ) {
220 struct cons_data * c = & ( p->stack_data[ i ] );
221 __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
222 }; // for i
223 __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid );
224 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
225 __kmp_debug_printf( "%s", buffer.str );
226 __kmp_str_buf_free( & buffer );
227}
Jonathan Peyton2321d572015-06-08 19:25:25 +0000228#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000229
230void
231__kmp_push_parallel( int gtid, ident_t const * ident )
232{
233 int tos;
234 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
235
236 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
237 KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
238 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
239 if ( p->stack_top >= p->stack_size ) {
240 __kmp_expand_cons_stack( gtid, p );
241 }; // if
242 tos = ++p->stack_top;
243 p->stack_data[ tos ].type = ct_parallel;
244 p->stack_data[ tos ].prev = p->p_top;
245 p->stack_data[ tos ].ident = ident;
246 p->stack_data[ tos ].name = NULL;
247 p->p_top = tos;
248 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
249}
250
251void
252__kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident )
253{
254 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
255
256 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
257 KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
258
259
260 if ( p->stack_top >= p->stack_size ) {
261 __kmp_expand_cons_stack( gtid, p );
262 }; // if
263 if ( p->w_top > p->p_top &&
264 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
265 // We are already in a WORKSHARE construct for this PARALLEL region.
266 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
267 }; // if
268 if ( p->s_top > p->p_top ) {
269 // We are already in a SYNC construct for this PARALLEL region.
270 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
271 }; // if
272}
273
274void
275__kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident )
276{
277 int tos;
278 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
279 KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
280 __kmp_check_workshare( gtid, ct, ident );
281 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
282 tos = ++p->stack_top;
283 p->stack_data[ tos ].type = ct;
284 p->stack_data[ tos ].prev = p->w_top;
285 p->stack_data[ tos ].ident = ident;
286 p->stack_data[ tos ].name = NULL;
287 p->w_top = tos;
288 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
289}
290
291void
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000292#if KMP_USE_DYNAMIC_LOCK
293__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
294#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000295__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000296#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000297{
298 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
299
300 KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
301
302 if (p->stack_top >= p->stack_size)
303 __kmp_expand_cons_stack( gtid, p );
304
305 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
306 if (p->w_top <= p->p_top) {
307 /* we are not in a worksharing construct */
308 #ifdef BUILD_PARALLEL_ORDERED
309 /* do not report error messages for PARALLEL ORDERED */
310 KMP_ASSERT( ct == ct_ordered_in_parallel );
311 #else
312 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
313 #endif /* BUILD_PARALLEL_ORDERED */
314 } else {
315 /* inside a WORKSHARING construct for this PARALLEL region */
316 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
317 if (p->stack_data[ p->w_top ].type == ct_taskq) {
318 __kmp_error_construct2(
319 kmp_i18n_msg_CnsNotInTaskConstruct,
320 ct, ident,
321 & p->stack_data[ p->w_top ]
322 );
323 } else {
324 __kmp_error_construct2(
325 kmp_i18n_msg_CnsNoOrderedClause,
326 ct, ident,
327 & p->stack_data[ p->w_top ]
328 );
329 }
330 }
331 }
332 if (p->s_top > p->p_top && p->s_top > p->w_top) {
333 /* inside a sync construct which is inside a worksharing construct */
334 int index = p->s_top;
335 enum cons_type stack_type;
336
337 stack_type = p->stack_data[ index ].type;
338
339 if (stack_type == ct_critical ||
340 ( ( stack_type == ct_ordered_in_parallel ||
341 stack_type == ct_ordered_in_pdo ||
342 stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */
343 p->stack_data[ index ].ident != NULL &&
344 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
345 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
346 __kmp_error_construct2(
347 kmp_i18n_msg_CnsInvalidNesting,
348 ct, ident,
349 & p->stack_data[ index ]
350 );
351 }
352 }
353 } else if ( ct == ct_critical ) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000354#if KMP_USE_DYNAMIC_LOCK
355 if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) { /* this same thread already has lock for this critical section */
356#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000357 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000358#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000359 int index = p->s_top;
360 struct cons_data cons = { NULL, ct_critical, 0, NULL };
361 /* walk up construct stack and try to find critical with matching name */
362 while ( index != 0 && p->stack_data[ index ].name != lck ) {
363 index = p->stack_data[ index ].prev;
364 }
365 if ( index != 0 ) {
366 /* found match on the stack (may not always because of interleaved critical for Fortran) */
367 cons = p->stack_data[ index ];
368 }
369 /* we are in CRITICAL which is inside a CRITICAL construct of the same name */
370 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
371 }
372 } else if ( ct == ct_master || ct == ct_reduce ) {
373 if (p->w_top > p->p_top) {
374 /* inside a WORKSHARING construct for this PARALLEL region */
375 __kmp_error_construct2(
376 kmp_i18n_msg_CnsInvalidNesting,
377 ct, ident,
378 & p->stack_data[ p->w_top ]
379 );
380 }
381 if (ct == ct_reduce && p->s_top > p->p_top) {
382 /* inside a another SYNC construct for this PARALLEL region */
383 __kmp_error_construct2(
384 kmp_i18n_msg_CnsInvalidNesting,
385 ct, ident,
386 & p->stack_data[ p->s_top ]
387 );
388 }; // if
389 }; // if
390}
391
392void
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000393#if KMP_USE_DYNAMIC_LOCK
394__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
395#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000396__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000397#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000398{
399 int tos;
400 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
401
402 KMP_ASSERT( gtid == __kmp_get_gtid() );
403 KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000404#if KMP_USE_DYNAMIC_LOCK
405 __kmp_check_sync( gtid, ct, ident, lck, seq );
406#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000407 __kmp_check_sync( gtid, ct, ident, lck );
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000408#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000409 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
410 tos = ++ p->stack_top;
411 p->stack_data[ tos ].type = ct;
412 p->stack_data[ tos ].prev = p->s_top;
413 p->stack_data[ tos ].ident = ident;
414 p->stack_data[ tos ].name = lck;
415 p->s_top = tos;
416 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
417}
418
419/* ------------------------------------------------------------------------ */
420
421void
422__kmp_pop_parallel( int gtid, ident_t const * ident )
423{
424 int tos;
425 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
426 tos = p->stack_top;
427 KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
428 if ( tos == 0 || p->p_top == 0 ) {
429 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
430 }
431 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
432 __kmp_error_construct2(
433 kmp_i18n_msg_CnsExpectedEnd,
434 ct_parallel, ident,
435 & p->stack_data[ tos ]
436 );
437 }
438 KE_TRACE( 100, ( POP_MSG( p ) ) );
439 p->p_top = p->stack_data[ tos ].prev;
440 p->stack_data[ tos ].type = ct_none;
441 p->stack_data[ tos ].ident = NULL;
442 p->stack_top = tos - 1;
443 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
444}
445
446enum cons_type
447__kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident )
448{
449 int tos;
450 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
451
452 tos = p->stack_top;
453 KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
454 if ( tos == 0 || p->w_top == 0 ) {
455 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
456 }
457
458 if ( tos != p->w_top ||
459 ( p->stack_data[ tos ].type != ct &&
460 /* below are two exceptions to the rule that construct types must match */
461 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
462 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
463 )
464 ) {
465 __kmp_check_null_func();
466 __kmp_error_construct2(
467 kmp_i18n_msg_CnsExpectedEnd,
468 ct, ident,
469 & p->stack_data[ tos ]
470 );
471 }
472 KE_TRACE( 100, ( POP_MSG( p ) ) );
473 p->w_top = p->stack_data[ tos ].prev;
474 p->stack_data[ tos ].type = ct_none;
475 p->stack_data[ tos ].ident = NULL;
476 p->stack_top = tos - 1;
477 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
478 return p->stack_data[ p->w_top ].type;
479}
480
481void
482__kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident )
483{
484 int tos;
485 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
486 tos = p->stack_top;
487 KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
488 if ( tos == 0 || p->s_top == 0 ) {
489 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
490 };
491 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
492 __kmp_check_null_func();
493 __kmp_error_construct2(
494 kmp_i18n_msg_CnsExpectedEnd,
495 ct, ident,
496 & p->stack_data[ tos ]
497 );
498 };
499 if ( gtid < 0 ) {
500 __kmp_check_null_func();
501 };
502 KE_TRACE( 100, ( POP_MSG( p ) ) );
503 p->s_top = p->stack_data[ tos ].prev;
504 p->stack_data[ tos ].type = ct_none;
505 p->stack_data[ tos ].ident = NULL;
506 p->stack_top = tos - 1;
507 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
508}
509
510/* ------------------------------------------------------------------------ */
511
512void
513__kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident )
514{
515 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
516 KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
517 if ( ident != 0 ) {
518 __kmp_check_null_func();
519 }
520 if ( p->w_top > p->p_top ) {
521 /* we are already in a WORKSHARING construct for this PARALLEL region */
522 __kmp_error_construct2(
523 kmp_i18n_msg_CnsInvalidNesting,
524 ct, ident,
525 & p->stack_data[ p->w_top ]
526 );
527 }
528 if (p->s_top > p->p_top) {
529 /* we are already in a SYNC construct for this PARALLEL region */
530 __kmp_error_construct2(
531 kmp_i18n_msg_CnsInvalidNesting,
532 ct, ident,
533 & p->stack_data[ p->s_top ]
534 );
535 }
536}
537
538/* ------------------------------------------------------------------------ */
539
540
541/* ------------------------------------------------------------------------ */
542/* ------------------------------------------------------------------------ */