blob: 2a18d808e62eca7f81a69956595033d29a52275f [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_threadprivate.c -- OpenMP threadprivate support library
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_itt.h"
18#include "kmp_i18n.h"
19
20/* ------------------------------------------------------------------------ */
21/* ------------------------------------------------------------------------ */
22
23#define USE_CHECKS_COMMON
24
25#define KMP_INLINE_SUBR 1
26
27
28/* ------------------------------------------------------------------------ */
29/* ------------------------------------------------------------------------ */
30
31void
32kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
33struct private_common *
34kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size );
35
36struct shared_table __kmp_threadprivate_d_table;
37
38/* ------------------------------------------------------------------------ */
39/* ------------------------------------------------------------------------ */
40
41static
42#ifdef KMP_INLINE_SUBR
43__forceinline
44#endif
45struct private_common *
46__kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr )
47
48{
49 struct private_common *tn;
50
51#ifdef KMP_TASK_COMMON_DEBUG
52 KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
53 gtid, pc_addr ) );
54 dump_list();
55#endif
56
57 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
58 if (tn->gbl_addr == pc_addr) {
59#ifdef KMP_TASK_COMMON_DEBUG
60 KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
61 gtid, pc_addr ) );
62#endif
63 return tn;
64 }
65 }
66 return 0;
67}
68
69static
70#ifdef KMP_INLINE_SUBR
71__forceinline
72#endif
73struct shared_common *
74__kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr )
75{
76 struct shared_common *tn;
77
78 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
79 if (tn->gbl_addr == pc_addr) {
80#ifdef KMP_TASK_COMMON_DEBUG
81 KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
82 gtid, pc_addr ) );
83#endif
84 return tn;
85 }
86 }
87 return 0;
88}
89
90
91/*
92 * Create a template for the data initialized storage.
93 * Either the template is NULL indicating zero fill,
94 * or the template is a copy of the original data.
95 */
96
97static struct private_data *
98__kmp_init_common_data( void *pc_addr, size_t pc_size )
99{
100 struct private_data *d;
101 size_t i;
102 char *p;
103
104 d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) );
105/*
106 d->data = 0; // AC: commented out because __kmp_allocate zeroes the memory
107 d->next = 0;
108*/
109 d->size = pc_size;
110 d->more = 1;
111
112 p = (char*)pc_addr;
113
114 for (i = pc_size; i > 0; --i) {
115 if (*p++ != '\0') {
116 d->data = __kmp_allocate( pc_size );
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000117 KMP_MEMCPY( d->data, pc_addr, pc_size );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118 break;
119 }
120 }
121
122 return d;
123}
124
125/*
126 * Initialize the data area from the template.
127 */
128
129static void
130__kmp_copy_common_data( void *pc_addr, struct private_data *d )
131{
132 char *addr = (char *) pc_addr;
133 int i, offset;
134
135 for (offset = 0; d != 0; d = d->next) {
136 for (i = d->more; i > 0; --i) {
137 if (d->data == 0)
138 memset( & addr[ offset ], '\0', d->size );
139 else
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000140 KMP_MEMCPY( & addr[ offset ], d->data, d->size );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000141 offset += d->size;
142 }
143 }
144}
145
146/* ------------------------------------------------------------------------ */
147/* ------------------------------------------------------------------------ */
148
149/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
150void
151__kmp_common_initialize( void )
152{
153 if( ! TCR_4(__kmp_init_common) ) {
154 int q;
155#ifdef KMP_DEBUG
156 int gtid;
157#endif
158
159 __kmp_threadpriv_cache_list = NULL;
160
161#ifdef KMP_DEBUG
162 /* verify the uber masters were initialized */
163 for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
164 if( __kmp_root[gtid] ) {
165 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
166 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
167 KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
168/* __kmp_root[ gitd ]-> r.r_uber_thread -> th.th_pri_common -> data[ q ] = 0;*/
169 }
170#endif /* KMP_DEBUG */
171
172 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
173 __kmp_threadprivate_d_table.data[ q ] = 0;
174
175 TCW_4(__kmp_init_common, TRUE);
176 }
177}
178
179/* Call all destructors for threadprivate data belonging to all threads.
180 Currently unused! */
181void
182__kmp_common_destroy( void )
183{
184 if( TCR_4(__kmp_init_common) ) {
185 int q;
186
187 TCW_4(__kmp_init_common, FALSE);
188
189 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
190 int gtid;
191 struct private_common *tn;
192 struct shared_common *d_tn;
193
194 /* C++ destructors need to be called once per thread before exiting */
195 /* don't call destructors for master thread though unless we used copy constructor */
196
197 for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
198 if (d_tn->is_vec) {
199 if (d_tn->dt.dtorv != 0) {
200 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
201 if( __kmp_threads[gtid] ) {
202 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
203 (! KMP_UBER_GTID (gtid)) ) {
204 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
205 gtid, d_tn->gbl_addr );
206 if (tn) {
207 (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
208 }
209 }
210 }
211 }
212 if (d_tn->obj_init != 0) {
213 (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
214 }
215 }
216 } else {
217 if (d_tn->dt.dtor != 0) {
218 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
219 if( __kmp_threads[gtid] ) {
220 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
221 (! KMP_UBER_GTID (gtid)) ) {
222 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
223 gtid, d_tn->gbl_addr );
224 if (tn) {
225 (*d_tn->dt.dtor) (tn->par_addr);
226 }
227 }
228 }
229 }
230 if (d_tn->obj_init != 0) {
231 (*d_tn->dt.dtor) (d_tn->obj_init);
232 }
233 }
234 }
235 }
236 __kmp_threadprivate_d_table.data[ q ] = 0;
237 }
238 }
239}
240
241/* Call all destructors for threadprivate data belonging to this thread */
242void
243__kmp_common_destroy_gtid( int gtid )
244{
245 struct private_common *tn;
246 struct shared_common *d_tn;
247
248 KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
249 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
250 (! KMP_UBER_GTID (gtid)) ) {
251
252 if( TCR_4(__kmp_init_common) ) {
253
254 /* Cannot do this here since not all threads have destroyed their data */
255 /* TCW_4(__kmp_init_common, FALSE); */
256
257 for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
258
259 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
260 gtid, tn->gbl_addr );
261
262 KMP_DEBUG_ASSERT( d_tn );
263
264 if (d_tn->is_vec) {
265 if (d_tn->dt.dtorv != 0) {
266 (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
267 }
268 if (d_tn->obj_init != 0) {
269 (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
270 }
271 } else {
272 if (d_tn->dt.dtor != 0) {
273 (void) (*d_tn->dt.dtor) (tn->par_addr);
274 }
275 if (d_tn->obj_init != 0) {
276 (void) (*d_tn->dt.dtor) (d_tn->obj_init);
277 }
278 }
279 }
280 KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
281 gtid ) );
282 }
283 }
284}
285
286/* ------------------------------------------------------------------------ */
287/* ------------------------------------------------------------------------ */
288
289#ifdef KMP_TASK_COMMON_DEBUG
290static void
291dump_list( void )
292{
293 int p, q;
294
295 for (p = 0; p < __kmp_all_nth; ++p) {
296 if( !__kmp_threads[p] ) continue;
297 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
298 if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
299 struct private_common *tn;
300
301 KC_TRACE( 10, ( "\tdump_list: gtid:%d addresses\n", p ) );
302
303 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
304 KC_TRACE( 10, ( "\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
305 tn->gbl_addr, tn->par_addr ) );
306 }
307 }
308 }
309 }
310}
311#endif /* KMP_TASK_COMMON_DEBUG */
312
313
314/*
315 * NOTE: this routine is to be called only from the serial part of the program.
316 */
317
318void
319kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
320{
321 struct shared_common **lnk_tn, *d_tn;
322 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
323 __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
324
325 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
326 gtid, pc_addr );
327
328 if (d_tn == 0) {
329 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
330
331 d_tn->gbl_addr = pc_addr;
332 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
333/*
334 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
335 d_tn->ct.ctor = 0;
336 d_tn->cct.cctor = 0;;
337 d_tn->dt.dtor = 0;
338 d_tn->is_vec = FALSE;
339 d_tn->vec_len = 0L;
340*/
341 d_tn->cmn_size = pc_size;
342
343 __kmp_acquire_lock( &__kmp_global_lock, gtid );
344
345 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
346
347 d_tn->next = *lnk_tn;
348 *lnk_tn = d_tn;
349
350 __kmp_release_lock( &__kmp_global_lock, gtid );
351 }
352}
353
354struct private_common *
355kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size )
356{
357 struct private_common *tn, **tt;
358 struct shared_common *d_tn;
359
360 /* +++++++++ START OF CRITICAL SECTION +++++++++ */
361
362 __kmp_acquire_lock( & __kmp_global_lock, gtid );
363
364 tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) );
365
366 tn->gbl_addr = pc_addr;
367
368 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
369 gtid, pc_addr ); /* Only the MASTER data table exists. */
370
371 if (d_tn != 0) {
372 /* This threadprivate variable has already been seen. */
373
374 if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
375 d_tn->cmn_size = pc_size;
376
377 if (d_tn->is_vec) {
378 if (d_tn->ct.ctorv != 0) {
379 /* Construct from scratch so no prototype exists */
380 d_tn->obj_init = 0;
381 }
382 else if (d_tn->cct.cctorv != 0) {
383 /* Now data initialize the prototype since it was previously registered */
384 d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
385 (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
386 }
387 else {
388 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
389 }
390 } else {
391 if (d_tn->ct.ctor != 0) {
392 /* Construct from scratch so no prototype exists */
393 d_tn->obj_init = 0;
394 }
395 else if (d_tn->cct.cctor != 0) {
396 /* Now data initialize the prototype since it was previously registered */
397 d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size );
398 (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
399 }
400 else {
401 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
402 }
403 }
404 }
405 }
406 else {
407 struct shared_common **lnk_tn;
408
409 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
410 d_tn->gbl_addr = pc_addr;
411 d_tn->cmn_size = pc_size;
412 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
413/*
414 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
415 d_tn->ct.ctor = 0;
416 d_tn->cct.cctor = 0;
417 d_tn->dt.dtor = 0;
418 d_tn->is_vec = FALSE;
419 d_tn->vec_len = 0L;
420*/
421 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
422
423 d_tn->next = *lnk_tn;
424 *lnk_tn = d_tn;
425 }
426
427 tn->cmn_size = d_tn->cmn_size;
428
429 if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
430 tn->par_addr = (void *) pc_addr;
431 }
432 else {
433 tn->par_addr = (void *) __kmp_allocate( tn->cmn_size );
434 }
435
436 __kmp_release_lock( & __kmp_global_lock, gtid );
437
438 /* +++++++++ END OF CRITICAL SECTION +++++++++ */
439
440#ifdef USE_CHECKS_COMMON
441 if (pc_size > d_tn->cmn_size) {
442 KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
443 KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
444 pc_addr, pc_size, d_tn->cmn_size ) );
445 KMP_FATAL( TPCommonBlocksInconsist );
446 }
447#endif /* USE_CHECKS_COMMON */
448
449 tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
450
451#ifdef KMP_TASK_COMMON_DEBUG
452 if (*tt != 0) {
453 KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
454 gtid, pc_addr ) );
455 }
456#endif
457 tn->next = *tt;
458 *tt = tn;
459
460#ifdef KMP_TASK_COMMON_DEBUG
461 KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
462 gtid, pc_addr ) );
463 dump_list( );
464#endif
465
466 /* Link the node into a simple list */
467
468 tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
469 __kmp_threads[ gtid ]->th.th_pri_head = tn;
470
471#ifdef BUILD_TV
472 __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
473#endif
474
475 if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
476 return tn;
477
478 /*
479 * if C++ object with copy constructor, use it;
480 * else if C++ object with constructor, use it for the non-master copies only;
481 * else use pod_init and memcpy
482 *
483 * C++ constructors need to be called once for each non-master thread on allocate
484 * C++ copy constructors need to be called once for each thread on allocate
485 */
486
487 /*
488 * C++ object with constructors/destructors;
489 * don't call constructors for master thread though
490 */
491 if (d_tn->is_vec) {
492 if ( d_tn->ct.ctorv != 0) {
493 (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
494 } else if (d_tn->cct.cctorv != 0) {
495 (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
496 } else if (tn->par_addr != tn->gbl_addr) {
497 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
498 }
499 } else {
500 if ( d_tn->ct.ctor != 0 ) {
501 (void) (*d_tn->ct.ctor) (tn->par_addr);
502 } else if (d_tn->cct.cctor != 0) {
503 (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
504 } else if (tn->par_addr != tn->gbl_addr) {
505 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
506 }
507 }
508/* !BUILD_OPENMP_C
509 if (tn->par_addr != tn->gbl_addr)
510 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
511
512 return tn;
513}
514
515/* ------------------------------------------------------------------------ */
516/* We are currently parallel, and we know the thread id. */
517/* ------------------------------------------------------------------------ */
518
519/*!
520 @ingroup THREADPRIVATE
521
Jonathan Peyton61118492016-05-20 19:03:38 +0000522 @param loc source location information
523 @param data pointer to data being privatized
524 @param ctor pointer to constructor function for data
525 @param cctor pointer to copy constructor function for data
526 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000527
528 Register constructors and destructors for thread private data.
529 This function is called when executing in parallel, when we know the thread id.
530*/
531void
532__kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
533{
534 struct shared_common *d_tn, **lnk_tn;
535
536 KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) );
537
538#ifdef USE_CHECKS_COMMON
539 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
540 KMP_ASSERT( cctor == 0);
541#endif /* USE_CHECKS_COMMON */
542
543 /* Only the global data table exists. */
544 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
545
546 if (d_tn == 0) {
547 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
548 d_tn->gbl_addr = data;
549
550 d_tn->ct.ctor = ctor;
551 d_tn->cct.cctor = cctor;
552 d_tn->dt.dtor = dtor;
553/*
554 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory
555 d_tn->vec_len = 0L;
556 d_tn->obj_init = 0;
557 d_tn->pod_init = 0;
558*/
559 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
560
561 d_tn->next = *lnk_tn;
562 *lnk_tn = d_tn;
563 }
564}
565
566void *
567__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size)
568{
569 void *ret;
570 struct private_common *tn;
571
572 KC_TRACE( 10, ("__kmpc_threadprivate: T#%d called\n", global_tid ) );
573
574#ifdef USE_CHECKS_COMMON
575 if (! __kmp_init_serial)
576 KMP_FATAL( RTLNotInitialized );
577#endif /* USE_CHECKS_COMMON */
578
579 if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
580 /* The parallel address will NEVER overlap with the data_address */
581 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the data_address; use data_address = data */
582
583 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
584 kmp_threadprivate_insert_private_data( global_tid, data, data, size );
585
586 ret = data;
587 }
588 else {
589 KC_TRACE( 50, ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
590 global_tid, data ) );
591 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
592
593 if ( tn ) {
594 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d found data\n", global_tid ) );
595#ifdef USE_CHECKS_COMMON
596 if ((size_t) size > tn->cmn_size) {
597 KC_TRACE( 10, ( "THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n",
598 data, size, tn->cmn_size ) );
599 KMP_FATAL( TPCommonBlocksInconsist );
600 }
601#endif /* USE_CHECKS_COMMON */
602 }
603 else {
604 /* The parallel address will NEVER overlap with the data_address */
605 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use data_address = data */
606 KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
607 tn = kmp_threadprivate_insert( global_tid, data, data, size );
608 }
609
610 ret = tn->par_addr;
611 }
612 KC_TRACE( 10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
613 global_tid, ret ) );
614
615 return ret;
616}
617
618/*!
619 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000620 @param loc source location information
621 @param global_tid global thread number
622 @param data pointer to data to privatize
623 @param size size of data to privatize
624 @param cache pointer to cache
625 @return pointer to private storage
Jim Cownie5e8470a2013-09-27 10:38:44 +0000626
Jonathan Peyton61118492016-05-20 19:03:38 +0000627 Allocate private storage for threadprivate data.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000628*/
629void *
630__kmpc_threadprivate_cached(
631 ident_t * loc,
632 kmp_int32 global_tid, // gtid.
633 void * data, // Pointer to original global variable.
634 size_t size, // Size of original global variable.
635 void *** cache
636) {
637 KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
638 KMP_SIZE_T_SPEC "\n",
639 global_tid, *cache, data, size ) );
640
641 if ( TCR_PTR(*cache) == 0) {
642 __kmp_acquire_lock( & __kmp_global_lock, global_tid );
643
644 if ( TCR_PTR(*cache) == 0) {
645 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
646 __kmp_tp_cached = 1;
647 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
648 void ** my_cache;
649 KMP_ITT_IGNORE(
650 my_cache = (void**)
651 __kmp_allocate(sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t ));
652 );
653 // No need to zero the allocated memory; __kmp_allocate does that.
654 KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
655 global_tid, my_cache ) );
Jonathan Peyton61118492016-05-20 19:03:38 +0000656
Jim Cownie5e8470a2013-09-27 10:38:44 +0000657 /* TODO: free all this memory in __kmp_common_destroy using __kmp_threadpriv_cache_list */
658 /* Add address of mycache to linked list for cleanup later */
659 kmp_cached_addr_t *tp_cache_addr;
660
661 tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
662 tp_cache_addr -> addr = my_cache;
663 tp_cache_addr -> next = __kmp_threadpriv_cache_list;
664 __kmp_threadpriv_cache_list = tp_cache_addr;
665
666 KMP_MB();
667
668 TCW_PTR( *cache, my_cache);
669
670 KMP_MB();
671 }
672
673 __kmp_release_lock( & __kmp_global_lock, global_tid );
674 }
675
676 void *ret;
677 if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
678 ret = __kmpc_threadprivate( loc, global_tid, data, (size_t) size);
679
680 TCW_PTR( (*cache)[ global_tid ], ret);
681 }
682 KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
683 global_tid, ret ) );
684
685 return ret;
686}
687
688/*!
689 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000690 @param loc source location information
691 @param data pointer to data being privatized
692 @param ctor pointer to constructor function for data
693 @param cctor pointer to copy constructor function for data
694 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000695 @param vector_length length of the vector (bytes or elements?)
696 Register vector constructors and destructors for thread private data.
697*/
698void
699__kmpc_threadprivate_register_vec( ident_t *loc, void *data, kmpc_ctor_vec ctor,
700 kmpc_cctor_vec cctor, kmpc_dtor_vec dtor,
701 size_t vector_length )
702{
703 struct shared_common *d_tn, **lnk_tn;
704
705 KC_TRACE( 10, ("__kmpc_threadprivate_register_vec: called\n" ) );
706
707#ifdef USE_CHECKS_COMMON
708 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
709 KMP_ASSERT( cctor == 0);
710#endif /* USE_CHECKS_COMMON */
711
712 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
713 -1, data ); /* Only the global data table exists. */
714
715 if (d_tn == 0) {
716 d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) );
717 d_tn->gbl_addr = data;
718
719 d_tn->ct.ctorv = ctor;
720 d_tn->cct.cctorv = cctor;
721 d_tn->dt.dtorv = dtor;
722 d_tn->is_vec = TRUE;
723 d_tn->vec_len = (size_t) vector_length;
724/*
725 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory
726 d_tn->pod_init = 0;
727*/
728 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
729
730 d_tn->next = *lnk_tn;
731 *lnk_tn = d_tn;
732 }
733}