blob: 825157afb47ee8be5bcba13fd067205e9ff94e42 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_error.c -- KPTS functions for error checking at runtime
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_i18n.h"
18#include "kmp_str.h"
19#include "kmp_error.h"
20
21/* ------------------------------------------------------------------------ */
22/* ------------------------------------------------------------------------ */
23
24#define MIN_STACK 100
25
26static char const * cons_text_fort[] = {
27 "(none)",
28 "PARALLEL",
29 "work-sharing", /* this is not called DO because of lowering of SECTIONS and WORKSHARE directives */
30 "ORDERED work-sharing", /* this is not called DO ORDERED because of lowering of SECTIONS directives */
31 "SECTIONS",
32 "work-sharing", /* this is not called SINGLE because of lowering of SECTIONS and WORKSHARE directives */
33 "TASKQ",
34 "TASKQ",
35 "TASKQ ORDERED",
36 "CRITICAL",
37 "ORDERED", /* in PARALLEL */
38 "ORDERED", /* in PDO */
39 "ORDERED", /* in TASKQ */
40 "MASTER",
41 "REDUCE",
42 "BARRIER"
43};
44
45static char const * cons_text_c[] = {
46 "(none)",
47 "\"parallel\"",
48 "work-sharing", /* this is not called "for" because of lowering of "sections" pragmas */
49 "\"ordered\" work-sharing", /* this is not called "for ordered" because of lowering of "sections" pragmas */
50 "\"sections\"",
51 "work-sharing", /* this is not called "single" because of lowering of "sections" pragmas */
52 "\"taskq\"",
53 "\"taskq\"",
54 "\"taskq ordered\"",
55 "\"critical\"",
56 "\"ordered\"", /* in PARALLEL */
57 "\"ordered\"", /* in PDO */
58 "\"ordered\"", /* in TASKQ */
59 "\"master\"",
60 "\"reduce\"",
61 "\"barrier\""
62};
63
64#define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource )
65
66#define PUSH_MSG( ct, ident ) \
67 "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) )
68#define POP_MSG( p ) \
69 "\tpopping off stack: %s (%s)\n", \
70 cons_text_c[ (p)->stack_data[ tos ].type ], \
71 get_src( (p)->stack_data[ tos ].ident )
72
73static int const cons_text_fort_num = sizeof( cons_text_fort ) / sizeof( char const * );
74static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * );
75
76/* ------------------------------------------------------------------------ */
77/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
78/* ------------------------------------------------------------------------ */
79
80static void
81__kmp_check_null_func( void )
82{
83 /* nothing to do */
84}
85
86static void
87__kmp_expand_cons_stack( int gtid, struct cons_header *p )
88{
89 int i;
90 struct cons_data *d;
91
92 /* TODO for monitor perhaps? */
93 if (gtid < 0)
94 __kmp_check_null_func();
95
96 KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) );
97
98 d = p->stack_data;
99
100 p->stack_size = (p->stack_size * 2) + 100;
101
102 /* TODO free the old data */
103 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) );
104
105 for (i = p->stack_top; i >= 0; --i)
106 p->stack_data[i] = d[i];
107
108 /* NOTE: we do not free the old stack_data */
109}
110
111// NOTE: Function returns allocated memory, caller must free it!
112static char const *
113__kmp_pragma(
Andrey Churbanovbebb5402015-03-03 16:19:57 +0000114 int ct,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000115 ident_t const * ident
116) {
117 char const * cons = NULL; // Construct name.
118 char * file = NULL; // File name.
119 char * func = NULL; // Function (routine) name.
120 char * line = NULL; // Line number.
121 kmp_str_buf_t buffer;
122 kmp_msg_t prgm;
123 __kmp_str_buf_init( & buffer );
Andrey Churbanovbebb5402015-03-03 16:19:57 +0000124 if ( 0 < ct && ct <= cons_text_c_num ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000125 cons = cons_text_c[ ct ];
126 } else {
127 KMP_DEBUG_ASSERT( 0 );
128 };
129 if ( ident != NULL && ident->psource != NULL ) {
130 char * tail = NULL;
131 __kmp_str_buf_print( & buffer, "%s", ident->psource ); // Copy source to buffer.
132 // Split string in buffer to file, func, and line.
133 tail = buffer.str;
134 __kmp_str_split( tail, ';', NULL, & tail );
135 __kmp_str_split( tail, ';', & file, & tail );
136 __kmp_str_split( tail, ';', & func, & tail );
137 __kmp_str_split( tail, ';', & line, & tail );
138 }; // if
139 prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line );
140 __kmp_str_buf_free( & buffer );
141 return prgm.str;
142} // __kmp_pragma
143
144/* ------------------------------------------------------------------------ */
145/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
146/* ------------------------------------------------------------------------ */
147
148
149void
150__kmp_error_construct(
151 kmp_i18n_id_t id, // Message identifier.
152 enum cons_type ct, // Construct type.
153 ident_t const * ident // Construct ident.
154) {
155 char const * construct = __kmp_pragma( ct, ident );
156 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null );
157 KMP_INTERNAL_FREE( (void *) construct );
158}
159
160void
161__kmp_error_construct2(
162 kmp_i18n_id_t id, // Message identifier.
163 enum cons_type ct, // First construct type.
164 ident_t const * ident, // First construct ident.
165 struct cons_data const * cons // Second construct.
166) {
167 char const * construct1 = __kmp_pragma( ct, ident );
168 char const * construct2 = __kmp_pragma( cons->type, cons->ident );
169 __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null );
170 KMP_INTERNAL_FREE( (void *) construct1 );
171 KMP_INTERNAL_FREE( (void *) construct2 );
172}
173
174
175struct cons_header *
176__kmp_allocate_cons_stack( int gtid )
177{
178 struct cons_header *p;
179
180 /* TODO for monitor perhaps? */
181 if ( gtid < 0 ) {
182 __kmp_check_null_func();
183 }; // if
184 KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) );
185 p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) );
186 p->p_top = p->w_top = p->s_top = 0;
187 p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) );
188 p->stack_size = MIN_STACK;
189 p->stack_top = 0;
190 p->stack_data[ 0 ].type = ct_none;
191 p->stack_data[ 0 ].prev = 0;
192 p->stack_data[ 0 ].ident = NULL;
193 return p;
194}
195
196void
197__kmp_free_cons_stack( void * ptr ) {
198 struct cons_header * p = (struct cons_header *) ptr;
199 if ( p != NULL ) {
200 if ( p->stack_data != NULL ) {
201 __kmp_free( p->stack_data );
202 p->stack_data = NULL;
203 }; // if
204 __kmp_free( p );
205 }; // if
206}
207
208
209static void
210dump_cons_stack( int gtid, struct cons_header * p ) {
211 int i;
212 int tos = p->stack_top;
213 kmp_str_buf_t buffer;
214 __kmp_str_buf_init( & buffer );
215 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
216 __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid );
217 __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top );
218 for ( i = tos; i > 0; i-- ) {
219 struct cons_data * c = & ( p->stack_data[ i ] );
220 __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name );
221 }; // for i
222 __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid );
223 __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" );
224 __kmp_debug_printf( "%s", buffer.str );
225 __kmp_str_buf_free( & buffer );
226}
227
228void
229__kmp_push_parallel( int gtid, ident_t const * ident )
230{
231 int tos;
232 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
233
234 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
235 KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
236 KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) );
237 if ( p->stack_top >= p->stack_size ) {
238 __kmp_expand_cons_stack( gtid, p );
239 }; // if
240 tos = ++p->stack_top;
241 p->stack_data[ tos ].type = ct_parallel;
242 p->stack_data[ tos ].prev = p->p_top;
243 p->stack_data[ tos ].ident = ident;
244 p->stack_data[ tos ].name = NULL;
245 p->p_top = tos;
246 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
247}
248
249void
250__kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident )
251{
252 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
253
254 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons );
255 KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
256
257
258 if ( p->stack_top >= p->stack_size ) {
259 __kmp_expand_cons_stack( gtid, p );
260 }; // if
261 if ( p->w_top > p->p_top &&
262 !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) {
263 // We are already in a WORKSHARE construct for this PARALLEL region.
264 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] );
265 }; // if
266 if ( p->s_top > p->p_top ) {
267 // We are already in a SYNC construct for this PARALLEL region.
268 __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] );
269 }; // if
270}
271
272void
273__kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident )
274{
275 int tos;
276 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
277 KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
278 __kmp_check_workshare( gtid, ct, ident );
279 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
280 tos = ++p->stack_top;
281 p->stack_data[ tos ].type = ct;
282 p->stack_data[ tos ].prev = p->w_top;
283 p->stack_data[ tos ].ident = ident;
284 p->stack_data[ tos ].name = NULL;
285 p->w_top = tos;
286 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
287}
288
289void
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000290#if KMP_USE_DYNAMIC_LOCK
291__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
292#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000293__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000294#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000295{
296 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
297
298 KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) );
299
300 if (p->stack_top >= p->stack_size)
301 __kmp_expand_cons_stack( gtid, p );
302
303 if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) {
304 if (p->w_top <= p->p_top) {
305 /* we are not in a worksharing construct */
306 #ifdef BUILD_PARALLEL_ORDERED
307 /* do not report error messages for PARALLEL ORDERED */
308 KMP_ASSERT( ct == ct_ordered_in_parallel );
309 #else
310 __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident );
311 #endif /* BUILD_PARALLEL_ORDERED */
312 } else {
313 /* inside a WORKSHARING construct for this PARALLEL region */
314 if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) {
315 if (p->stack_data[ p->w_top ].type == ct_taskq) {
316 __kmp_error_construct2(
317 kmp_i18n_msg_CnsNotInTaskConstruct,
318 ct, ident,
319 & p->stack_data[ p->w_top ]
320 );
321 } else {
322 __kmp_error_construct2(
323 kmp_i18n_msg_CnsNoOrderedClause,
324 ct, ident,
325 & p->stack_data[ p->w_top ]
326 );
327 }
328 }
329 }
330 if (p->s_top > p->p_top && p->s_top > p->w_top) {
331 /* inside a sync construct which is inside a worksharing construct */
332 int index = p->s_top;
333 enum cons_type stack_type;
334
335 stack_type = p->stack_data[ index ].type;
336
337 if (stack_type == ct_critical ||
338 ( ( stack_type == ct_ordered_in_parallel ||
339 stack_type == ct_ordered_in_pdo ||
340 stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */
341 p->stack_data[ index ].ident != NULL &&
342 (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) {
343 /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
344 __kmp_error_construct2(
345 kmp_i18n_msg_CnsInvalidNesting,
346 ct, ident,
347 & p->stack_data[ index ]
348 );
349 }
350 }
351 } else if ( ct == ct_critical ) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000352#if KMP_USE_DYNAMIC_LOCK
353 if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) { /* this same thread already has lock for this critical section */
354#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000355 if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000356#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000357 int index = p->s_top;
358 struct cons_data cons = { NULL, ct_critical, 0, NULL };
359 /* walk up construct stack and try to find critical with matching name */
360 while ( index != 0 && p->stack_data[ index ].name != lck ) {
361 index = p->stack_data[ index ].prev;
362 }
363 if ( index != 0 ) {
364 /* found match on the stack (may not always because of interleaved critical for Fortran) */
365 cons = p->stack_data[ index ];
366 }
367 /* we are in CRITICAL which is inside a CRITICAL construct of the same name */
368 __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons );
369 }
370 } else if ( ct == ct_master || ct == ct_reduce ) {
371 if (p->w_top > p->p_top) {
372 /* inside a WORKSHARING construct for this PARALLEL region */
373 __kmp_error_construct2(
374 kmp_i18n_msg_CnsInvalidNesting,
375 ct, ident,
376 & p->stack_data[ p->w_top ]
377 );
378 }
379 if (ct == ct_reduce && p->s_top > p->p_top) {
380 /* inside a another SYNC construct for this PARALLEL region */
381 __kmp_error_construct2(
382 kmp_i18n_msg_CnsInvalidNesting,
383 ct, ident,
384 & p->stack_data[ p->s_top ]
385 );
386 }; // if
387 }; // if
388}
389
390void
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000391#if KMP_USE_DYNAMIC_LOCK
392__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
393#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000394__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000395#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000396{
397 int tos;
398 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
399
400 KMP_ASSERT( gtid == __kmp_get_gtid() );
401 KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) );
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000402#if KMP_USE_DYNAMIC_LOCK
403 __kmp_check_sync( gtid, ct, ident, lck, seq );
404#else
Jim Cownie5e8470a2013-09-27 10:38:44 +0000405 __kmp_check_sync( gtid, ct, ident, lck );
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000406#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000407 KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) );
408 tos = ++ p->stack_top;
409 p->stack_data[ tos ].type = ct;
410 p->stack_data[ tos ].prev = p->s_top;
411 p->stack_data[ tos ].ident = ident;
412 p->stack_data[ tos ].name = lck;
413 p->s_top = tos;
414 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
415}
416
417/* ------------------------------------------------------------------------ */
418
419void
420__kmp_pop_parallel( int gtid, ident_t const * ident )
421{
422 int tos;
423 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
424 tos = p->stack_top;
425 KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) );
426 if ( tos == 0 || p->p_top == 0 ) {
427 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident );
428 }
429 if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) {
430 __kmp_error_construct2(
431 kmp_i18n_msg_CnsExpectedEnd,
432 ct_parallel, ident,
433 & p->stack_data[ tos ]
434 );
435 }
436 KE_TRACE( 100, ( POP_MSG( p ) ) );
437 p->p_top = p->stack_data[ tos ].prev;
438 p->stack_data[ tos ].type = ct_none;
439 p->stack_data[ tos ].ident = NULL;
440 p->stack_top = tos - 1;
441 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
442}
443
444enum cons_type
445__kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident )
446{
447 int tos;
448 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
449
450 tos = p->stack_top;
451 KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) );
452 if ( tos == 0 || p->w_top == 0 ) {
453 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
454 }
455
456 if ( tos != p->w_top ||
457 ( p->stack_data[ tos ].type != ct &&
458 /* below are two exceptions to the rule that construct types must match */
459 ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) &&
460 ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task )
461 )
462 ) {
463 __kmp_check_null_func();
464 __kmp_error_construct2(
465 kmp_i18n_msg_CnsExpectedEnd,
466 ct, ident,
467 & p->stack_data[ tos ]
468 );
469 }
470 KE_TRACE( 100, ( POP_MSG( p ) ) );
471 p->w_top = p->stack_data[ tos ].prev;
472 p->stack_data[ tos ].type = ct_none;
473 p->stack_data[ tos ].ident = NULL;
474 p->stack_top = tos - 1;
475 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
476 return p->stack_data[ p->w_top ].type;
477}
478
479void
480__kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident )
481{
482 int tos;
483 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
484 tos = p->stack_top;
485 KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) );
486 if ( tos == 0 || p->s_top == 0 ) {
487 __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident );
488 };
489 if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) {
490 __kmp_check_null_func();
491 __kmp_error_construct2(
492 kmp_i18n_msg_CnsExpectedEnd,
493 ct, ident,
494 & p->stack_data[ tos ]
495 );
496 };
497 if ( gtid < 0 ) {
498 __kmp_check_null_func();
499 };
500 KE_TRACE( 100, ( POP_MSG( p ) ) );
501 p->s_top = p->stack_data[ tos ].prev;
502 p->stack_data[ tos ].type = ct_none;
503 p->stack_data[ tos ].ident = NULL;
504 p->stack_top = tos - 1;
505 KE_DUMP( 1000, dump_cons_stack( gtid, p ) );
506}
507
508/* ------------------------------------------------------------------------ */
509
510void
511__kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident )
512{
513 struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons;
514 KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) );
515 if ( ident != 0 ) {
516 __kmp_check_null_func();
517 }
518 if ( p->w_top > p->p_top ) {
519 /* we are already in a WORKSHARING construct for this PARALLEL region */
520 __kmp_error_construct2(
521 kmp_i18n_msg_CnsInvalidNesting,
522 ct, ident,
523 & p->stack_data[ p->w_top ]
524 );
525 }
526 if (p->s_top > p->p_top) {
527 /* we are already in a SYNC construct for this PARALLEL region */
528 __kmp_error_construct2(
529 kmp_i18n_msg_CnsInvalidNesting,
530 ct, ident,
531 & p->stack_data[ p->s_top ]
532 );
533 }
534}
535
536/* ------------------------------------------------------------------------ */
537
538
539/* ------------------------------------------------------------------------ */
540/* ------------------------------------------------------------------------ */