blob: bb69b2e2eeb2838ad7d090a47e1e8b59c83cedde [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_error.c -- KPTS functions for error checking at runtime
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003 * $Revision: 42951 $
4 * $Date: 2014-01-21 14:41:41 -0600 (Tue, 21 Jan 2014) $
Jim Cownie5e8470a2013-09-27 10:38:44 +00005 */
6
7
8//===----------------------------------------------------------------------===//
9//
10// The LLVM Compiler Infrastructure
11//
12// This file is dual licensed under the MIT and the University of Illinois Open
13// Source Licenses. See LICENSE.txt for details.
14//
15//===----------------------------------------------------------------------===//
16
17
18#include "kmp.h"
19#include "kmp_i18n.h"
20#include "kmp_str.h"
21#include "kmp_error.h"
22
23/* ------------------------------------------------------------------------ */
24/* ------------------------------------------------------------------------ */
25
26#define MIN_STACK 100
27
28static char const * cons_text_fort[] = {
29 "(none)",
30 "PARALLEL",
31 "work-sharing", /* this is not called DO because of lowering of SECTIONS and WORKSHARE directives */
32 "ORDERED work-sharing", /* this is not called DO ORDERED because of lowering of SECTIONS directives */
33 "SECTIONS",
34 "work-sharing", /* this is not called SINGLE because of lowering of SECTIONS and WORKSHARE directives */
35 "TASKQ",
36 "TASKQ",
37 "TASKQ ORDERED",
38 "CRITICAL",
39 "ORDERED", /* in PARALLEL */
40 "ORDERED", /* in PDO */
41 "ORDERED", /* in TASKQ */
42 "MASTER",
43 "REDUCE",
44 "BARRIER"
45};
46
47static char const * cons_text_c[] = {
48 "(none)",
49 "\"parallel\"",
50 "work-sharing", /* this is not called "for" because of lowering of "sections" pragmas */
51 "\"ordered\" work-sharing", /* this is not called "for ordered" because of lowering of "sections" pragmas */
52 "\"sections\"",
53 "work-sharing", /* this is not called "single" because of lowering of "sections" pragmas */
54 "\"taskq\"",
55 "\"taskq\"",
56 "\"taskq ordered\"",
57 "\"critical\"",
58 "\"ordered\"", /* in PARALLEL */
59 "\"ordered\"", /* in PDO */
60 "\"ordered\"", /* in TASKQ */
61 "\"master\"",
62 "\"reduce\"",
63 "\"barrier\""
64};
65
66#define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
67
68#define PUSH_MSG( ct, ident ) \
69 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
70#define POP_MSG( p ) \
71 "\tpopping off stack: %s (%s)\n", \
72 cons_text_c[ (p)->stack_data[ tos ].type ], \
73 get_src( (p)->stack_data[ tos ].ident )
74
75static int const cons_text_fort_num = sizeof( cons_text_fort ) / sizeof( char const * );
76static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * );
77
78/* ------------------------------------------------------------------------ */
79/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
80/* ------------------------------------------------------------------------ */
81
82static void
83__kmp_check_null_func( void )
84{
85 /* nothing to do */
86}
87
88static void
89__kmp_expand_cons_stack( int gtid, struct cons_header *p )
90{
91 int i;
92 struct cons_data *d;
93
94 /* TODO for monitor perhaps? */
95 if (gtid < 0)
96 __kmp_check_null_func();
97
98 KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
99
100 d = p->stack_data;
101
102 p->stack_size = (p->stack_size * 2) + 100;
103
104 /* TODO free the old data */
105 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) );
106
107 for (i = p->stack_top; i >= 0; --i)
108 p->stack_data[i] = d[i];
109
110 /* NOTE: we do not free the old stack_data */
111}
112
113// NOTE: Function returns allocated memory, caller must free it!
114static char const *
115__kmp_pragma(
116 enum cons_type ct,
117 ident_t const * ident
118) {
119 char const * cons = NULL; // Construct name.
120 char * file = NULL; // File name.
121 char * func = NULL; // Function (routine) name.
122 char * line = NULL; // Line number.
123 kmp_str_buf_t buffer;
124 kmp_msg_t prgm;
125 __kmp_str_buf_init( & buffer );
126 if ( 0 < ct && ct <= cons_text_c_num ) {;
127 cons = cons_text_c[ ct ];
128 } else {
129 KMP_DEBUG_ASSERT( 0 );
130 };
131 if ( ident != NULL && ident->psource != NULL ) {
132 char * tail = NULL;
133 __kmp_str_buf_print( & buffer, "%s", ident->psource ); // Copy source to buffer.
134 // Split string in buffer to file, func, and line.
135 tail = buffer.str;
136 __kmp_str_split( tail, ';', NULL, & tail );
137 __kmp_str_split( tail, ';', & file, & tail );
138 __kmp_str_split( tail, ';', & func, & tail );
139 __kmp_str_split( tail, ';', & line, & tail );
140 }; // if
141 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
142 __kmp_str_buf_free( & buffer );
143 return prgm.str;
144} // __kmp_pragma
145
146/* ------------------------------------------------------------------------ */
147/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
148/* ------------------------------------------------------------------------ */
149
150
151void
152__kmp_error_construct(
153 kmp_i18n_id_t id, // Message identifier.
154 enum cons_type ct, // Construct type.
155 ident_t const * ident // Construct ident.
156) {
157 char const * construct = __kmp_pragma( ct, ident );
158 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null );
159 KMP_INTERNAL_FREE( (void *) construct );
160}
161
162void
163__kmp_error_construct2(
164 kmp_i18n_id_t id, // Message identifier.
165 enum cons_type ct, // First construct type.
166 ident_t const * ident, // First construct ident.
167 struct cons_data const * cons // Second construct.
168) {
169 char const * construct1 = __kmp_pragma( ct, ident );
170 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
171 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null );
172 KMP_INTERNAL_FREE( (void *) construct1 );
173 KMP_INTERNAL_FREE( (void *) construct2 );
174}
175
176
177struct cons_header *
178__kmp_allocate_cons_stack( int gtid )
179{
180 struct cons_header *p;
181
182 /* TODO for monitor perhaps? */
183 if ( gtid < 0 ) {
184 __kmp_check_null_func();
185 }; // if
186 KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) );
187 p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) );
188 p->p_top = p->w_top = p->s_top = 0;
189 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) );
190 p->stack_size = MIN_STACK;
191 p->stack_top = 0;
192 p->stack_data[ 0 ].type = ct_none;
193 p->stack_data[ 0 ].prev = 0;
194 p->stack_data[ 0 ].ident = NULL;
195 return p;
196}
197
198void
199__kmp_free_cons_stack( void * ptr ) {
200 struct cons_header * p = (struct cons_header *) ptr;
201 if ( p != NULL ) {
202 if ( p->stack_data != NULL ) {
203 __kmp_free( p->stack_data );
204 p->stack_data = NULL;
205 }; // if
206 __kmp_free( p );
207 }; // if
208}
209
210
211static void
212dump_cons_stack( int gtid, struct cons_header * p ) {
213 int i;
214 int tos = p->stack_top;
215 kmp_str_buf_t buffer;
216 __kmp_str_buf_init( & buffer );
217 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
218 __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid );
219 __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
220 for ( i = tos; i > 0; i-- ) {
221 struct cons_data * c = & ( p->stack_data[ i ] );
222 __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
223 }; // for i
224 __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid );
225 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
226 __kmp_debug_printf( "%s", buffer.str );
227 __kmp_str_buf_free( & buffer );
228}
229
230void
231__kmp_push_parallel( int gtid, ident_t const * ident )
232{
233 int tos;
234 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
235
236 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
237 KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
238 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
239 if ( p->stack_top >= p->stack_size ) {
240 __kmp_expand_cons_stack( gtid, p );
241 }; // if
242 tos = ++p->stack_top;
243 p->stack_data[ tos ].type = ct_parallel;
244 p->stack_data[ tos ].prev = p->p_top;
245 p->stack_data[ tos ].ident = ident;
246 p->stack_data[ tos ].name = NULL;
247 p->p_top = tos;
248 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
249}
250
251void
252__kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident )
253{
254 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
255
256 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
257 KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
258
259
260 if ( p->stack_top >= p->stack_size ) {
261 __kmp_expand_cons_stack( gtid, p );
262 }; // if
263 if ( p->w_top > p->p_top &&
264 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
265 // We are already in a WORKSHARE construct for this PARALLEL region.
266 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
267 }; // if
268 if ( p->s_top > p->p_top ) {
269 // We are already in a SYNC construct for this PARALLEL region.
270 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
271 }; // if
272}
273
274void
275__kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident )
276{
277 int tos;
278 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
279 KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
280 __kmp_check_workshare( gtid, ct, ident );
281 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
282 tos = ++p->stack_top;
283 p->stack_data[ tos ].type = ct;
284 p->stack_data[ tos ].prev = p->w_top;
285 p->stack_data[ tos ].ident = ident;
286 p->stack_data[ tos ].name = NULL;
287 p->w_top = tos;
288 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
289}
290
291void
292__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
293{
294 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
295
296 KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
297
298 if (p->stack_top >= p->stack_size)
299 __kmp_expand_cons_stack( gtid, p );
300
301 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
302 if (p->w_top <= p->p_top) {
303 /* we are not in a worksharing construct */
304 #ifdef BUILD_PARALLEL_ORDERED
305 /* do not report error messages for PARALLEL ORDERED */
306 KMP_ASSERT( ct == ct_ordered_in_parallel );
307 #else
308 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
309 #endif /* BUILD_PARALLEL_ORDERED */
310 } else {
311 /* inside a WORKSHARING construct for this PARALLEL region */
312 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
313 if (p->stack_data[ p->w_top ].type == ct_taskq) {
314 __kmp_error_construct2(
315 kmp_i18n_msg_CnsNotInTaskConstruct,
316 ct, ident,
317 & p->stack_data[ p->w_top ]
318 );
319 } else {
320 __kmp_error_construct2(
321 kmp_i18n_msg_CnsNoOrderedClause,
322 ct, ident,
323 & p->stack_data[ p->w_top ]
324 );
325 }
326 }
327 }
328 if (p->s_top > p->p_top && p->s_top > p->w_top) {
329 /* inside a sync construct which is inside a worksharing construct */
330 int index = p->s_top;
331 enum cons_type stack_type;
332
333 stack_type = p->stack_data[ index ].type;
334
335 if (stack_type == ct_critical ||
336 ( ( stack_type == ct_ordered_in_parallel ||
337 stack_type == ct_ordered_in_pdo ||
338 stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */
339 p->stack_data[ index ].ident != NULL &&
340 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
341 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
342 __kmp_error_construct2(
343 kmp_i18n_msg_CnsInvalidNesting,
344 ct, ident,
345 & p->stack_data[ index ]
346 );
347 }
348 }
349 } else if ( ct == ct_critical ) {
350 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */
351 int index = p->s_top;
352 struct cons_data cons = { NULL, ct_critical, 0, NULL };
353 /* walk up construct stack and try to find critical with matching name */
354 while ( index != 0 && p->stack_data[ index ].name != lck ) {
355 index = p->stack_data[ index ].prev;
356 }
357 if ( index != 0 ) {
358 /* found match on the stack (may not always because of interleaved critical for Fortran) */
359 cons = p->stack_data[ index ];
360 }
361 /* we are in CRITICAL which is inside a CRITICAL construct of the same name */
362 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
363 }
364 } else if ( ct == ct_master || ct == ct_reduce ) {
365 if (p->w_top > p->p_top) {
366 /* inside a WORKSHARING construct for this PARALLEL region */
367 __kmp_error_construct2(
368 kmp_i18n_msg_CnsInvalidNesting,
369 ct, ident,
370 & p->stack_data[ p->w_top ]
371 );
372 }
373 if (ct == ct_reduce && p->s_top > p->p_top) {
374 /* inside a another SYNC construct for this PARALLEL region */
375 __kmp_error_construct2(
376 kmp_i18n_msg_CnsInvalidNesting,
377 ct, ident,
378 & p->stack_data[ p->s_top ]
379 );
380 }; // if
381 }; // if
382}
383
384void
385__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
386{
387 int tos;
388 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
389
390 KMP_ASSERT( gtid == __kmp_get_gtid() );
391 KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
392 __kmp_check_sync( gtid, ct, ident, lck );
393 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
394 tos = ++ p->stack_top;
395 p->stack_data[ tos ].type = ct;
396 p->stack_data[ tos ].prev = p->s_top;
397 p->stack_data[ tos ].ident = ident;
398 p->stack_data[ tos ].name = lck;
399 p->s_top = tos;
400 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
401}
402
403/* ------------------------------------------------------------------------ */
404
405void
406__kmp_pop_parallel( int gtid, ident_t const * ident )
407{
408 int tos;
409 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
410 tos = p->stack_top;
411 KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
412 if ( tos == 0 || p->p_top == 0 ) {
413 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
414 }
415 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
416 __kmp_error_construct2(
417 kmp_i18n_msg_CnsExpectedEnd,
418 ct_parallel, ident,
419 & p->stack_data[ tos ]
420 );
421 }
422 KE_TRACE( 100, ( POP_MSG( p ) ) );
423 p->p_top = p->stack_data[ tos ].prev;
424 p->stack_data[ tos ].type = ct_none;
425 p->stack_data[ tos ].ident = NULL;
426 p->stack_top = tos - 1;
427 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
428}
429
430enum cons_type
431__kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident )
432{
433 int tos;
434 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
435
436 tos = p->stack_top;
437 KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
438 if ( tos == 0 || p->w_top == 0 ) {
439 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
440 }
441
442 if ( tos != p->w_top ||
443 ( p->stack_data[ tos ].type != ct &&
444 /* below are two exceptions to the rule that construct types must match */
445 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
446 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
447 )
448 ) {
449 __kmp_check_null_func();
450 __kmp_error_construct2(
451 kmp_i18n_msg_CnsExpectedEnd,
452 ct, ident,
453 & p->stack_data[ tos ]
454 );
455 }
456 KE_TRACE( 100, ( POP_MSG( p ) ) );
457 p->w_top = p->stack_data[ tos ].prev;
458 p->stack_data[ tos ].type = ct_none;
459 p->stack_data[ tos ].ident = NULL;
460 p->stack_top = tos - 1;
461 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
462 return p->stack_data[ p->w_top ].type;
463}
464
465void
466__kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident )
467{
468 int tos;
469 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
470 tos = p->stack_top;
471 KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
472 if ( tos == 0 || p->s_top == 0 ) {
473 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
474 };
475 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
476 __kmp_check_null_func();
477 __kmp_error_construct2(
478 kmp_i18n_msg_CnsExpectedEnd,
479 ct, ident,
480 & p->stack_data[ tos ]
481 );
482 };
483 if ( gtid < 0 ) {
484 __kmp_check_null_func();
485 };
486 KE_TRACE( 100, ( POP_MSG( p ) ) );
487 p->s_top = p->stack_data[ tos ].prev;
488 p->stack_data[ tos ].type = ct_none;
489 p->stack_data[ tos ].ident = NULL;
490 p->stack_top = tos - 1;
491 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
492}
493
494/* ------------------------------------------------------------------------ */
495
496void
497__kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident )
498{
499 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
500 KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
501 if ( ident != 0 ) {
502 __kmp_check_null_func();
503 }
504 if ( p->w_top > p->p_top ) {
505 /* we are already in a WORKSHARING construct for this PARALLEL region */
506 __kmp_error_construct2(
507 kmp_i18n_msg_CnsInvalidNesting,
508 ct, ident,
509 & p->stack_data[ p->w_top ]
510 );
511 }
512 if (p->s_top > p->p_top) {
513 /* we are already in a SYNC construct for this PARALLEL region */
514 __kmp_error_construct2(
515 kmp_i18n_msg_CnsInvalidNesting,
516 ct, ident,
517 & p->stack_data[ p->s_top ]
518 );
519 }
520}
521
522/* ------------------------------------------------------------------------ */
523
524
525/* ------------------------------------------------------------------------ */
526/* ------------------------------------------------------------------------ */