blob: 6ab79690eb5c6c458f7c0aeff44ac36690994c42 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_threadprivate.c -- OpenMP threadprivate support library
3 * $Revision: 42618 $
4 * $Date: 2013-08-27 09:15:45 -0500 (Tue, 27 Aug 2013) $
5 */
6
7
8//===----------------------------------------------------------------------===//
9//
10// The LLVM Compiler Infrastructure
11//
12// This file is dual licensed under the MIT and the University of Illinois Open
13// Source Licenses. See LICENSE.txt for details.
14//
15//===----------------------------------------------------------------------===//
16
17
18#include "kmp.h"
19#include "kmp_itt.h"
20#include "kmp_i18n.h"
21
22/* ------------------------------------------------------------------------ */
23/* ------------------------------------------------------------------------ */
24
25#define USE_CHECKS_COMMON
26
27#define KMP_INLINE_SUBR 1
28
29
30/* ------------------------------------------------------------------------ */
31/* ------------------------------------------------------------------------ */
32
33void
34kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
35struct private_common *
36kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
37
38struct shared_table __kmp_threadprivate_d_table;
39
40/* ------------------------------------------------------------------------ */
41/* ------------------------------------------------------------------------ */
42
43static
44#ifdef KMP_INLINE_SUBR
45__forceinline
46#endif
47struct private_common *
48__kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr )
49
50{
51 struct private_common *tn;
52
53#ifdef KMP_TASK_COMMON_DEBUG
54 KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
55 gtid, pc_addr ) );
56 dump_list();
57#endif
58
59 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
60 if (tn->gbl_addr == pc_addr) {
61#ifdef KMP_TASK_COMMON_DEBUG
62 KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
63 gtid, pc_addr ) );
64#endif
65 return tn;
66 }
67 }
68 return 0;
69}
70
71static
72#ifdef KMP_INLINE_SUBR
73__forceinline
74#endif
75struct shared_common *
76__kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr )
77{
78 struct shared_common *tn;
79
80 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
81 if (tn->gbl_addr == pc_addr) {
82#ifdef KMP_TASK_COMMON_DEBUG
83 KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
84 gtid, pc_addr ) );
85#endif
86 return tn;
87 }
88 }
89 return 0;
90}
91
92
93/*
94 * Create a template for the data initialized storage.
95 * Either the template is NULL indicating zero fill,
96 * or the template is a copy of the original data.
97 */
98
99static struct private_data *
100__kmp_init_common_data( void *pc_addr, size_t pc_size )
101{
102 struct private_data *d;
103 size_t i;
104 char *p;
105
106 d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) );
107/*
108 d->data = 0; // AC: commented out because __kmp_allocate zeroes the memory
109 d->next = 0;
110*/
111 d->size = pc_size;
112 d->more = 1;
113
114 p = (char*)pc_addr;
115
116 for (i = pc_size; i > 0; --i) {
117 if (*p++ != '\0') {
118 d->data = __kmp_allocate( pc_size );
119 memcpy( d->data, pc_addr, pc_size );
120 break;
121 }
122 }
123
124 return d;
125}
126
127/*
128 * Initialize the data area from the template.
129 */
130
131static void
132__kmp_copy_common_data( void *pc_addr, struct private_data *d )
133{
134 char *addr = (char *) pc_addr;
135 int i, offset;
136
137 for (offset = 0; d != 0; d = d->next) {
138 for (i = d->more; i > 0; --i) {
139 if (d->data == 0)
140 memset( & addr[ offset ], '\0', d->size );
141 else
142 memcpy( & addr[ offset ], d->data, d->size );
143 offset += d->size;
144 }
145 }
146}
147
148/* ------------------------------------------------------------------------ */
149/* ------------------------------------------------------------------------ */
150
151/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
152void
153__kmp_common_initialize( void )
154{
155 if( ! TCR_4(__kmp_init_common) ) {
156 int q;
157#ifdef KMP_DEBUG
158 int gtid;
159#endif
160
161 __kmp_threadpriv_cache_list = NULL;
162
163#ifdef KMP_DEBUG
164 /* verify the uber masters were initialized */
165 for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
166 if( __kmp_root[gtid] ) {
167 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
168 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
169 KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
170/* __kmp_root[ gitd ]-> r.r_uber_thread -> th.th_pri_common -> data[ q ] = 0;*/
171 }
172#endif /* KMP_DEBUG */
173
174 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
175 __kmp_threadprivate_d_table.data[ q ] = 0;
176
177 TCW_4(__kmp_init_common, TRUE);
178 }
179}
180
181/* Call all destructors for threadprivate data belonging to all threads.
182 Currently unused! */
183void
184__kmp_common_destroy( void )
185{
186 if( TCR_4(__kmp_init_common) ) {
187 int q;
188
189 TCW_4(__kmp_init_common, FALSE);
190
191 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
192 int gtid;
193 struct private_common *tn;
194 struct shared_common *d_tn;
195
196 /* C++ destructors need to be called once per thread before exiting */
197 /* don't call destructors for master thread though unless we used copy constructor */
198
199 for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
200 if (d_tn->is_vec) {
201 if (d_tn->dt.dtorv != 0) {
202 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
203 if( __kmp_threads[gtid] ) {
204 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
205 (! KMP_UBER_GTID (gtid)) ) {
206 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
207 gtid, d_tn->gbl_addr );
208 if (tn) {
209 (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
210 }
211 }
212 }
213 }
214 if (d_tn->obj_init != 0) {
215 (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
216 }
217 }
218 } else {
219 if (d_tn->dt.dtor != 0) {
220 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
221 if( __kmp_threads[gtid] ) {
222 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
223 (! KMP_UBER_GTID (gtid)) ) {
224 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
225 gtid, d_tn->gbl_addr );
226 if (tn) {
227 (*d_tn->dt.dtor) (tn->par_addr);
228 }
229 }
230 }
231 }
232 if (d_tn->obj_init != 0) {
233 (*d_tn->dt.dtor) (d_tn->obj_init);
234 }
235 }
236 }
237 }
238 __kmp_threadprivate_d_table.data[ q ] = 0;
239 }
240 }
241}
242
243/* Call all destructors for threadprivate data belonging to this thread */
244void
245__kmp_common_destroy_gtid( int gtid )
246{
247 struct private_common *tn;
248 struct shared_common *d_tn;
249
250 KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
251 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
252 (! KMP_UBER_GTID (gtid)) ) {
253
254 if( TCR_4(__kmp_init_common) ) {
255
256 /* Cannot do this here since not all threads have destroyed their data */
257 /* TCW_4(__kmp_init_common, FALSE); */
258
259 for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
260
261 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
262 gtid, tn->gbl_addr );
263
264 KMP_DEBUG_ASSERT( d_tn );
265
266 if (d_tn->is_vec) {
267 if (d_tn->dt.dtorv != 0) {
268 (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
269 }
270 if (d_tn->obj_init != 0) {
271 (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
272 }
273 } else {
274 if (d_tn->dt.dtor != 0) {
275 (void) (*d_tn->dt.dtor) (tn->par_addr);
276 }
277 if (d_tn->obj_init != 0) {
278 (void) (*d_tn->dt.dtor) (d_tn->obj_init);
279 }
280 }
281 }
282 KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
283 gtid ) );
284 }
285 }
286}
287
288/* ------------------------------------------------------------------------ */
289/* ------------------------------------------------------------------------ */
290
291#ifdef KMP_TASK_COMMON_DEBUG
292static void
293dump_list( void )
294{
295 int p, q;
296
297 for (p = 0; p < __kmp_all_nth; ++p) {
298 if( !__kmp_threads[p] ) continue;
299 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
300 if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
301 struct private_common *tn;
302
303 KC_TRACE( 10, ( "\tdump_list: gtid:%d addresses\n", p ) );
304
305 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
306 KC_TRACE( 10, ( "\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
307 tn->gbl_addr, tn->par_addr ) );
308 }
309 }
310 }
311 }
312}
313#endif /* KMP_TASK_COMMON_DEBUG */
314
315
316/*
317 * NOTE: this routine is to be called only from the serial part of the program.
318 */
319
320void
321kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
322{
323 struct shared_common **lnk_tn, *d_tn;
324 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
325 __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
326
327 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
328 gtid, pc_addr );
329
330 if (d_tn == 0) {
331 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
332
333 d_tn->gbl_addr = pc_addr;
334 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
335/*
336 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
337 d_tn->ct.ctor = 0;
338 d_tn->cct.cctor = 0;;
339 d_tn->dt.dtor = 0;
340 d_tn->is_vec = FALSE;
341 d_tn->vec_len = 0L;
342*/
343 d_tn->cmn_size = pc_size;
344
345 __kmp_acquire_lock( &__kmp_global_lock, gtid );
346
347 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
348
349 d_tn->next = *lnk_tn;
350 *lnk_tn = d_tn;
351
352 __kmp_release_lock( &__kmp_global_lock, gtid );
353 }
354}
355
356struct private_common *
357kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
358{
359 struct private_common *tn, **tt;
360 struct shared_common *d_tn;
361
362 /* +++++++++ START OF CRITICAL SECTION +++++++++ */
363
364 __kmp_acquire_lock( & __kmp_global_lock, gtid );
365
366 tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) );
367
368 tn->gbl_addr = pc_addr;
369
370 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
371 gtid, pc_addr ); /* Only the MASTER data table exists. */
372
373 if (d_tn != 0) {
374 /* This threadprivate variable has already been seen. */
375
376 if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
377 d_tn->cmn_size = pc_size;
378
379 if (d_tn->is_vec) {
380 if (d_tn->ct.ctorv != 0) {
381 /* Construct from scratch so no prototype exists */
382 d_tn->obj_init = 0;
383 }
384 else if (d_tn->cct.cctorv != 0) {
385 /* Now data initialize the prototype since it was previously registered */
386 d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
387 (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
388 }
389 else {
390 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
391 }
392 } else {
393 if (d_tn->ct.ctor != 0) {
394 /* Construct from scratch so no prototype exists */
395 d_tn->obj_init = 0;
396 }
397 else if (d_tn->cct.cctor != 0) {
398 /* Now data initialize the prototype since it was previously registered */
399 d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
400 (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
401 }
402 else {
403 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
404 }
405 }
406 }
407 }
408 else {
409 struct shared_common **lnk_tn;
410
411 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
412 d_tn->gbl_addr = pc_addr;
413 d_tn->cmn_size = pc_size;
414 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
415/*
416 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
417 d_tn->ct.ctor = 0;
418 d_tn->cct.cctor = 0;
419 d_tn->dt.dtor = 0;
420 d_tn->is_vec = FALSE;
421 d_tn->vec_len = 0L;
422*/
423 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
424
425 d_tn->next = *lnk_tn;
426 *lnk_tn = d_tn;
427 }
428
429 tn->cmn_size = d_tn->cmn_size;
430
431 if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
432 tn->par_addr = (void *) pc_addr;
433 }
434 else {
435 tn->par_addr = (void *) __kmp_allocate( tn->cmn_size );
436 }
437
438 __kmp_release_lock( & __kmp_global_lock, gtid );
439
440 /* +++++++++ END OF CRITICAL SECTION +++++++++ */
441
442#ifdef USE_CHECKS_COMMON
443 if (pc_size > d_tn->cmn_size) {
444 KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
445 KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
446 pc_addr, pc_size, d_tn->cmn_size ) );
447 KMP_FATAL( TPCommonBlocksInconsist );
448 }
449#endif /* USE_CHECKS_COMMON */
450
451 tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
452
453#ifdef KMP_TASK_COMMON_DEBUG
454 if (*tt != 0) {
455 KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
456 gtid, pc_addr ) );
457 }
458#endif
459 tn->next = *tt;
460 *tt = tn;
461
462#ifdef KMP_TASK_COMMON_DEBUG
463 KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
464 gtid, pc_addr ) );
465 dump_list( );
466#endif
467
468 /* Link the node into a simple list */
469
470 tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
471 __kmp_threads[ gtid ]->th.th_pri_head = tn;
472
473#ifdef BUILD_TV
474 __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
475#endif
476
477 if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
478 return tn;
479
480 /*
481 * if C++ object with copy constructor, use it;
482 * else if C++ object with constructor, use it for the non-master copies only;
483 * else use pod_init and memcpy
484 *
485 * C++ constructors need to be called once for each non-master thread on allocate
486 * C++ copy constructors need to be called once for each thread on allocate
487 */
488
489 /*
490 * C++ object with constructors/destructors;
491 * don't call constructors for master thread though
492 */
493 if (d_tn->is_vec) {
494 if ( d_tn->ct.ctorv != 0) {
495 (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
496 } else if (d_tn->cct.cctorv != 0) {
497 (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
498 } else if (tn->par_addr != tn->gbl_addr) {
499 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
500 }
501 } else {
502 if ( d_tn->ct.ctor != 0 ) {
503 (void) (*d_tn->ct.ctor) (tn->par_addr);
504 } else if (d_tn->cct.cctor != 0) {
505 (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
506 } else if (tn->par_addr != tn->gbl_addr) {
507 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
508 }
509 }
510/* !BUILD_OPENMP_C
511 if (tn->par_addr != tn->gbl_addr)
512 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
513
514 return tn;
515}
516
517/* ------------------------------------------------------------------------ */
518/* We are currently parallel, and we know the thread id. */
519/* ------------------------------------------------------------------------ */
520
521/*!
522 @ingroup THREADPRIVATE
523
524 @param loc source location information
525 @param data pointer to data being privatized
526 @param ctor pointer to constructor function for data
527 @param cctor pointer to copy constructor function for data
528 @param dtor pointer to destructor function for data
529
530 Register constructors and destructors for thread private data.
531 This function is called when executing in parallel, when we know the thread id.
532*/
533void
534__kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
535{
536 struct shared_common *d_tn, **lnk_tn;
537
538 KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );
539
540#ifdef USE_CHECKS_COMMON
541 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
542 KMP_ASSERT( cctor == 0);
543#endif /* USE_CHECKS_COMMON */
544
545 /* Only the global data table exists. */
546 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
547
548 if (d_tn == 0) {
549 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
550 d_tn->gbl_addr = data;
551
552 d_tn->ct.ctor = ctor;
553 d_tn->cct.cctor = cctor;
554 d_tn->dt.dtor = dtor;
555/*
556 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory
557 d_tn->vec_len = 0L;
558 d_tn->obj_init = 0;
559 d_tn->pod_init = 0;
560*/
561 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
562
563 d_tn->next = *lnk_tn;
564 *lnk_tn = d_tn;
565 }
566}
567
568void *
569__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size)
570{
571 void *ret;
572 struct private_common *tn;
573
574 KC_TRACE( 10, ("__kmpc_threadprivate: T#%d called\n", global_tid ) );
575
576#ifdef USE_CHECKS_COMMON
577 if (! __kmp_init_serial)
578 KMP_FATAL( RTLNotInitialized );
579#endif /* USE_CHECKS_COMMON */
580
581 if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
582 /* The parallel address will NEVER overlap with the data_address */
583 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the data_address; use data_address = data */
584
585 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
586 kmp_threadprivate_insert_private_data( global_tid, data, data, size );
587
588 ret = data;
589 }
590 else {
591 KC_TRACE( 50, ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
592 global_tid, data ) );
593 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
594
595 if ( tn ) {
596 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d found data\n", global_tid ) );
597#ifdef USE_CHECKS_COMMON
598 if ((size_t) size > tn->cmn_size) {
599 KC_TRACE( 10, ( "THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
600 data, size, tn->cmn_size ) );
601 KMP_FATAL( TPCommonBlocksInconsist );
602 }
603#endif /* USE_CHECKS_COMMON */
604 }
605 else {
606 /* The parallel address will NEVER overlap with the data_address */
607 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use data_address = data */
608 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
609 tn = kmp_threadprivate_insert( global_tid, data, data, size );
610 }
611
612 ret = tn->par_addr;
613 }
614 KC_TRACE( 10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
615 global_tid, ret ) );
616
617 return ret;
618}
619
620/*!
621 @ingroup THREADPRIVATE
622 @param loc source location information
623 @param global_tid global thread number
624 @param data pointer to data to privatize
625 @param size size of data to privatize
626 @param cache pointer to cache
627 @return pointer to private storage
628
629 Allocate private storage for threadprivate data.
630*/
631void *
632__kmpc_threadprivate_cached(
633 ident_t * loc,
634 kmp_int32 global_tid, // gtid.
635 void * data, // Pointer to original global variable.
636 size_t size, // Size of original global variable.
637 void *** cache
638) {
639 KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
640 KMP_SIZE_T_SPEC "\n",
641 global_tid, *cache, data, size ) );
642
643 if ( TCR_PTR(*cache) == 0) {
644 __kmp_acquire_lock( & __kmp_global_lock, global_tid );
645
646 if ( TCR_PTR(*cache) == 0) {
647 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
648 __kmp_tp_cached = 1;
649 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
650 void ** my_cache;
651 KMP_ITT_IGNORE(
652 my_cache = (void**)
653 __kmp_allocate(sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t ));
654 );
655 // No need to zero the allocated memory; __kmp_allocate does that.
656 KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
657 global_tid, my_cache ) );
658
659 /* TODO: free all this memory in __kmp_common_destroy using __kmp_threadpriv_cache_list */
660 /* Add address of mycache to linked list for cleanup later */
661 kmp_cached_addr_t *tp_cache_addr;
662
663 tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
664 tp_cache_addr -> addr = my_cache;
665 tp_cache_addr -> next = __kmp_threadpriv_cache_list;
666 __kmp_threadpriv_cache_list = tp_cache_addr;
667
668 KMP_MB();
669
670 TCW_PTR( *cache, my_cache);
671
672 KMP_MB();
673 }
674
675 __kmp_release_lock( & __kmp_global_lock, global_tid );
676 }
677
678 void *ret;
679 if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
680 ret = __kmpc_threadprivate( loc, global_tid, data, (size_t) size);
681
682 TCW_PTR( (*cache)[ global_tid ], ret);
683 }
684 KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
685 global_tid, ret ) );
686
687 return ret;
688}
689
690/*!
691 @ingroup THREADPRIVATE
692 @param loc source location information
693 @param data pointer to data being privatized
694 @param ctor pointer to constructor function for data
695 @param cctor pointer to copy constructor function for data
696 @param dtor pointer to destructor function for data
697 @param vector_length length of the vector (bytes or elements?)
698 Register vector constructors and destructors for thread private data.
699*/
700void
701__kmpc_threadprivate_register_vec( ident_t *loc, void *data, kmpc_ctor_vec ctor,
702 kmpc_cctor_vec cctor, kmpc_dtor_vec dtor,
703 size_t vector_length )
704{
705 struct shared_common *d_tn, **lnk_tn;
706
707 KC_TRACE( 10, ("__kmpc_threadprivate_register_vec: called\n" ) );
708
709#ifdef USE_CHECKS_COMMON
710 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
711 KMP_ASSERT( cctor == 0);
712#endif /* USE_CHECKS_COMMON */
713
714 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
715 -1, data ); /* Only the global data table exists. */
716
717 if (d_tn == 0) {
718 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
719 d_tn->gbl_addr = data;
720
721 d_tn->ct.ctorv = ctor;
722 d_tn->cct.cctorv = cctor;
723 d_tn->dt.dtorv = dtor;
724 d_tn->is_vec = TRUE;
725 d_tn->vec_len = (size_t) vector_length;
726/*
727 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
728 d_tn->pod_init = 0;
729*/
730 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
731
732 d_tn->next = *lnk_tn;
733 *lnk_tn = d_tn;
734 }
735}