blob: caa00eaa3c6e61072e9898dc41aa0119446798df [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_threadprivate.cpp -- OpenMP threadprivate support library
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000017#include "kmp_i18n.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000018#include "kmp_itt.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000019
20#define USE_CHECKS_COMMON
21
Jonathan Peyton30419822017-05-12 18:01:32 +000022#define KMP_INLINE_SUBR 1
Jim Cownie5e8470a2013-09-27 10:38:44 +000023
Jonathan Peyton30419822017-05-12 18:01:32 +000024void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
25 void *data_addr, size_t pc_size);
26struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
27 void *data_addr,
28 size_t pc_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +000029
Jonathan Peyton30419822017-05-12 18:01:32 +000030struct shared_table __kmp_threadprivate_d_table;
Jim Cownie5e8470a2013-09-27 10:38:44 +000031
32static
33#ifdef KMP_INLINE_SUBR
Jonathan Peyton30419822017-05-12 18:01:32 +000034 __forceinline
Jim Cownie5e8470a2013-09-27 10:38:44 +000035#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000036 struct private_common *
37 __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid,
38 void *pc_addr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000039
40{
Jonathan Peyton30419822017-05-12 18:01:32 +000041 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000042
43#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000044 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "
45 "address %p\n",
46 gtid, pc_addr));
47 dump_list();
Jim Cownie5e8470a2013-09-27 10:38:44 +000048#endif
49
Jonathan Peyton30419822017-05-12 18:01:32 +000050 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
51 if (tn->gbl_addr == pc_addr) {
Jim Cownie5e8470a2013-09-27 10:38:44 +000052#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000053 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "
54 "node %p on list\n",
55 gtid, pc_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +000056#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000057 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000058 }
Jonathan Peyton30419822017-05-12 18:01:32 +000059 }
60 return 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +000061}
62
63static
64#ifdef KMP_INLINE_SUBR
Jonathan Peyton30419822017-05-12 18:01:32 +000065 __forceinline
Jim Cownie5e8470a2013-09-27 10:38:44 +000066#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000067 struct shared_common *
68 __kmp_find_shared_task_common(struct shared_table *tbl, int gtid,
69 void *pc_addr) {
70 struct shared_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000071
Jonathan Peyton30419822017-05-12 18:01:32 +000072 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
73 if (tn->gbl_addr == pc_addr) {
Jim Cownie5e8470a2013-09-27 10:38:44 +000074#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000075 KC_TRACE(
76 10,
77 ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
78 gtid, pc_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +000079#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000080 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000081 }
Jonathan Peyton30419822017-05-12 18:01:32 +000082 }
83 return 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +000084}
85
Jonathan Peyton30419822017-05-12 18:01:32 +000086// Create a template for the data initialized storage. Either the template is
87// NULL indicating zero fill, or the template is a copy of the original data.
88static struct private_data *__kmp_init_common_data(void *pc_addr,
89 size_t pc_size) {
90 struct private_data *d;
91 size_t i;
92 char *p;
Jim Cownie5e8470a2013-09-27 10:38:44 +000093
Jonathan Peyton30419822017-05-12 18:01:32 +000094 d = (struct private_data *)__kmp_allocate(sizeof(struct private_data));
95 /*
96 d->data = 0; // AC: commented out because __kmp_allocate zeroes the
97 memory
98 d->next = 0;
99 */
100 d->size = pc_size;
101 d->more = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000102
Jonathan Peyton30419822017-05-12 18:01:32 +0000103 p = (char *)pc_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000104
Jonathan Peyton30419822017-05-12 18:01:32 +0000105 for (i = pc_size; i > 0; --i) {
106 if (*p++ != '\0') {
107 d->data = __kmp_allocate(pc_size);
108 KMP_MEMCPY(d->data, pc_addr, pc_size);
109 break;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000111 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000112
Jonathan Peyton30419822017-05-12 18:01:32 +0000113 return d;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000114}
115
Jonathan Peyton30419822017-05-12 18:01:32 +0000116// Initialize the data area from the template.
117static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) {
118 char *addr = (char *)pc_addr;
119 int i, offset;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000120
Jonathan Peyton30419822017-05-12 18:01:32 +0000121 for (offset = 0; d != 0; d = d->next) {
122 for (i = d->more; i > 0; --i) {
123 if (d->data == 0)
124 memset(&addr[offset], '\0', d->size);
125 else
126 KMP_MEMCPY(&addr[offset], d->data, d->size);
127 offset += d->size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000128 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000129 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000130}
131
Jim Cownie5e8470a2013-09-27 10:38:44 +0000132/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
Jonathan Peyton30419822017-05-12 18:01:32 +0000133void __kmp_common_initialize(void) {
134 if (!TCR_4(__kmp_init_common)) {
135 int q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000136#ifdef KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000137 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000138#endif
139
Jonathan Peyton30419822017-05-12 18:01:32 +0000140 __kmp_threadpriv_cache_list = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000141
142#ifdef KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000143 /* verify the uber masters were initialized */
144 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
145 if (__kmp_root[gtid]) {
146 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
147 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
148 KMP_DEBUG_ASSERT(
149 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
150 /* __kmp_root[ gitd ]-> r.r_uber_thread ->
151 * th.th_pri_common -> data[ q ] = 0;*/
152 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000153#endif /* KMP_DEBUG */
154
Jonathan Peyton30419822017-05-12 18:01:32 +0000155 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
156 __kmp_threadprivate_d_table.data[q] = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000157
Jonathan Peyton30419822017-05-12 18:01:32 +0000158 TCW_4(__kmp_init_common, TRUE);
159 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000160}
161
162/* Call all destructors for threadprivate data belonging to all threads.
163 Currently unused! */
Jonathan Peyton30419822017-05-12 18:01:32 +0000164void __kmp_common_destroy(void) {
165 if (TCR_4(__kmp_init_common)) {
166 int q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000167
Jonathan Peyton30419822017-05-12 18:01:32 +0000168 TCW_4(__kmp_init_common, FALSE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000169
Jonathan Peyton30419822017-05-12 18:01:32 +0000170 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
171 int gtid;
172 struct private_common *tn;
173 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000174
Jonathan Peyton30419822017-05-12 18:01:32 +0000175 /* C++ destructors need to be called once per thread before exiting.
176 Don't call destructors for master thread though unless we used copy
177 constructor */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000178
Jonathan Peyton30419822017-05-12 18:01:32 +0000179 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
180 d_tn = d_tn->next) {
181 if (d_tn->is_vec) {
182 if (d_tn->dt.dtorv != 0) {
183 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
184 if (__kmp_threads[gtid]) {
185 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
186 : (!KMP_UBER_GTID(gtid))) {
187 tn = __kmp_threadprivate_find_task_common(
188 __kmp_threads[gtid]->th.th_pri_common, gtid,
189 d_tn->gbl_addr);
190 if (tn) {
191 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
192 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000193 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000194 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000195 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000196 if (d_tn->obj_init != 0) {
197 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
198 }
199 }
200 } else {
201 if (d_tn->dt.dtor != 0) {
202 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
203 if (__kmp_threads[gtid]) {
204 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
205 : (!KMP_UBER_GTID(gtid))) {
206 tn = __kmp_threadprivate_find_task_common(
207 __kmp_threads[gtid]->th.th_pri_common, gtid,
208 d_tn->gbl_addr);
209 if (tn) {
210 (*d_tn->dt.dtor)(tn->par_addr);
211 }
212 }
213 }
214 }
215 if (d_tn->obj_init != 0) {
216 (*d_tn->dt.dtor)(d_tn->obj_init);
217 }
218 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000219 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000220 }
221 __kmp_threadprivate_d_table.data[q] = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000222 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000223 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224}
225
226/* Call all destructors for threadprivate data belonging to this thread */
Jonathan Peyton30419822017-05-12 18:01:32 +0000227void __kmp_common_destroy_gtid(int gtid) {
228 struct private_common *tn;
229 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000230
Jonathan Peyton30419822017-05-12 18:01:32 +0000231 KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid));
232 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000233
Jonathan Peyton30419822017-05-12 18:01:32 +0000234 if (TCR_4(__kmp_init_common)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000235
Jonathan Peyton30419822017-05-12 18:01:32 +0000236 /* Cannot do this here since not all threads have destroyed their data */
237 /* TCW_4(__kmp_init_common, FALSE); */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000238
Jonathan Peyton30419822017-05-12 18:01:32 +0000239 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000240
Jonathan Peyton30419822017-05-12 18:01:32 +0000241 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
242 tn->gbl_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000243
Jonathan Peyton30419822017-05-12 18:01:32 +0000244 KMP_DEBUG_ASSERT(d_tn);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000245
Jonathan Peyton30419822017-05-12 18:01:32 +0000246 if (d_tn->is_vec) {
247 if (d_tn->dt.dtorv != 0) {
248 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
249 }
250 if (d_tn->obj_init != 0) {
251 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
252 }
253 } else {
254 if (d_tn->dt.dtor != 0) {
255 (void)(*d_tn->dt.dtor)(tn->par_addr);
256 }
257 if (d_tn->obj_init != 0) {
258 (void)(*d_tn->dt.dtor)(d_tn->obj_init);
259 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000260 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000261 }
262 KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "
263 "complete\n",
264 gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000265 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000266 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000267}
268
Jim Cownie5e8470a2013-09-27 10:38:44 +0000269#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000270static void dump_list(void) {
271 int p, q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000272
Jonathan Peyton30419822017-05-12 18:01:32 +0000273 for (p = 0; p < __kmp_all_nth; ++p) {
274 if (!__kmp_threads[p])
275 continue;
276 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
277 if (__kmp_threads[p]->th.th_pri_common->data[q]) {
278 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000279
Jonathan Peyton30419822017-05-12 18:01:32 +0000280 KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000281
Jonathan Peyton30419822017-05-12 18:01:32 +0000282 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
283 tn = tn->next) {
284 KC_TRACE(10,
285 ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
286 tn->gbl_addr, tn->par_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000287 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000288 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000289 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000290 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000291}
292#endif /* KMP_TASK_COMMON_DEBUG */
293
Jonathan Peyton30419822017-05-12 18:01:32 +0000294// NOTE: this routine is to be called only from the serial part of the program.
295void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
296 void *data_addr, size_t pc_size) {
297 struct shared_common **lnk_tn, *d_tn;
298 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
299 __kmp_threads[gtid]->th.th_root->r.r_active == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000300
Jonathan Peyton30419822017-05-12 18:01:32 +0000301 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
302 pc_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000303
Jonathan Peyton30419822017-05-12 18:01:32 +0000304 if (d_tn == 0) {
305 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000306
Jonathan Peyton30419822017-05-12 18:01:32 +0000307 d_tn->gbl_addr = pc_addr;
308 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
309 /*
310 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
311 zeroes the memory
312 d_tn->ct.ctor = 0;
313 d_tn->cct.cctor = 0;;
314 d_tn->dt.dtor = 0;
315 d_tn->is_vec = FALSE;
316 d_tn->vec_len = 0L;
317 */
318 d_tn->cmn_size = pc_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000319
Jonathan Peyton30419822017-05-12 18:01:32 +0000320 __kmp_acquire_lock(&__kmp_global_lock, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000321
Jonathan Peyton30419822017-05-12 18:01:32 +0000322 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000323
Jonathan Peyton30419822017-05-12 18:01:32 +0000324 d_tn->next = *lnk_tn;
325 *lnk_tn = d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000326
Jonathan Peyton30419822017-05-12 18:01:32 +0000327 __kmp_release_lock(&__kmp_global_lock, gtid);
328 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000329}
330
Jonathan Peyton30419822017-05-12 18:01:32 +0000331struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
332 void *data_addr,
333 size_t pc_size) {
334 struct private_common *tn, **tt;
335 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000336
Jonathan Peyton30419822017-05-12 18:01:32 +0000337 /* +++++++++ START OF CRITICAL SECTION +++++++++ */
338 __kmp_acquire_lock(&__kmp_global_lock, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000339
Jonathan Peyton30419822017-05-12 18:01:32 +0000340 tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000341
Jonathan Peyton30419822017-05-12 18:01:32 +0000342 tn->gbl_addr = pc_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000343
Jonathan Peyton30419822017-05-12 18:01:32 +0000344 d_tn = __kmp_find_shared_task_common(
345 &__kmp_threadprivate_d_table, gtid,
346 pc_addr); /* Only the MASTER data table exists. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000347
Jonathan Peyton30419822017-05-12 18:01:32 +0000348 if (d_tn != 0) {
349 /* This threadprivate variable has already been seen. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350
Jonathan Peyton30419822017-05-12 18:01:32 +0000351 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
352 d_tn->cmn_size = pc_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000353
Jonathan Peyton30419822017-05-12 18:01:32 +0000354 if (d_tn->is_vec) {
355 if (d_tn->ct.ctorv != 0) {
356 /* Construct from scratch so no prototype exists */
357 d_tn->obj_init = 0;
358 } else if (d_tn->cct.cctorv != 0) {
359 /* Now data initialize the prototype since it was previously
360 * registered */
361 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
362 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
363 } else {
364 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000365 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000366 } else {
367 if (d_tn->ct.ctor != 0) {
368 /* Construct from scratch so no prototype exists */
369 d_tn->obj_init = 0;
370 } else if (d_tn->cct.cctor != 0) {
371 /* Now data initialize the prototype since it was previously
372 registered */
373 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
374 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
375 } else {
376 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
377 }
378 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000379 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000380 } else {
381 struct shared_common **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000382
Jonathan Peyton30419822017-05-12 18:01:32 +0000383 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
384 d_tn->gbl_addr = pc_addr;
385 d_tn->cmn_size = pc_size;
386 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
387 /*
388 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
389 zeroes the memory
390 d_tn->ct.ctor = 0;
391 d_tn->cct.cctor = 0;
392 d_tn->dt.dtor = 0;
393 d_tn->is_vec = FALSE;
394 d_tn->vec_len = 0L;
395 */
396 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000397
Jonathan Peyton30419822017-05-12 18:01:32 +0000398 d_tn->next = *lnk_tn;
399 *lnk_tn = d_tn;
400 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000401
Jonathan Peyton30419822017-05-12 18:01:32 +0000402 tn->cmn_size = d_tn->cmn_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000403
Jonathan Peyton30419822017-05-12 18:01:32 +0000404 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
405 tn->par_addr = (void *)pc_addr;
406 } else {
407 tn->par_addr = (void *)__kmp_allocate(tn->cmn_size);
408 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000409
Jonathan Peyton30419822017-05-12 18:01:32 +0000410 __kmp_release_lock(&__kmp_global_lock, gtid);
411/* +++++++++ END OF CRITICAL SECTION +++++++++ */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000412
413#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000414 if (pc_size > d_tn->cmn_size) {
415 KC_TRACE(
416 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
417 " ,%" KMP_UINTPTR_SPEC ")\n",
418 pc_addr, pc_size, d_tn->cmn_size));
419 KMP_FATAL(TPCommonBlocksInconsist);
420 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000421#endif /* USE_CHECKS_COMMON */
422
Jonathan Peyton30419822017-05-12 18:01:32 +0000423 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000424
425#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000426 if (*tt != 0) {
427 KC_TRACE(
428 10,
429 ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
430 gtid, pc_addr));
431 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000432#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000433 tn->next = *tt;
434 *tt = tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000435
436#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000437 KC_TRACE(10,
438 ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
439 gtid, pc_addr));
440 dump_list();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000441#endif
442
Jonathan Peyton30419822017-05-12 18:01:32 +0000443 /* Link the node into a simple list */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000444
Jonathan Peyton30419822017-05-12 18:01:32 +0000445 tn->link = __kmp_threads[gtid]->th.th_pri_head;
446 __kmp_threads[gtid]->th.th_pri_head = tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000447
Jonathan Peyton30419822017-05-12 18:01:32 +0000448 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
Jim Cownie5e8470a2013-09-27 10:38:44 +0000449 return tn;
Jonathan Peyton30419822017-05-12 18:01:32 +0000450
451 /* if C++ object with copy constructor, use it;
452 * else if C++ object with constructor, use it for the non-master copies only;
453 * else use pod_init and memcpy
454 *
455 * C++ constructors need to be called once for each non-master thread on
456 * allocate
457 * C++ copy constructors need to be called once for each thread on allocate */
458
459 /* C++ object with constructors/destructors; don't call constructors for
460 master thread though */
461 if (d_tn->is_vec) {
462 if (d_tn->ct.ctorv != 0) {
463 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
464 } else if (d_tn->cct.cctorv != 0) {
465 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
466 } else if (tn->par_addr != tn->gbl_addr) {
467 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
468 }
469 } else {
470 if (d_tn->ct.ctor != 0) {
471 (void)(*d_tn->ct.ctor)(tn->par_addr);
472 } else if (d_tn->cct.cctor != 0) {
473 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
474 } else if (tn->par_addr != tn->gbl_addr) {
475 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
476 }
477 }
478 /* !BUILD_OPENMP_C
479 if (tn->par_addr != tn->gbl_addr)
480 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
481
482 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000483}
484
485/* ------------------------------------------------------------------------ */
486/* We are currently parallel, and we know the thread id. */
487/* ------------------------------------------------------------------------ */
488
489/*!
490 @ingroup THREADPRIVATE
491
Jonathan Peyton61118492016-05-20 19:03:38 +0000492 @param loc source location information
493 @param data pointer to data being privatized
494 @param ctor pointer to constructor function for data
495 @param cctor pointer to copy constructor function for data
496 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000497
498 Register constructors and destructors for thread private data.
499 This function is called when executing in parallel, when we know the thread id.
500*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000501void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor,
502 kmpc_cctor cctor, kmpc_dtor dtor) {
503 struct shared_common *d_tn, **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000504
Jonathan Peyton30419822017-05-12 18:01:32 +0000505 KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000506
507#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000508 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
509 KMP_ASSERT(cctor == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000510#endif /* USE_CHECKS_COMMON */
511
Jonathan Peyton30419822017-05-12 18:01:32 +0000512 /* Only the global data table exists. */
513 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000514
Jonathan Peyton30419822017-05-12 18:01:32 +0000515 if (d_tn == 0) {
516 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
517 d_tn->gbl_addr = data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000518
Jonathan Peyton30419822017-05-12 18:01:32 +0000519 d_tn->ct.ctor = ctor;
520 d_tn->cct.cctor = cctor;
521 d_tn->dt.dtor = dtor;
522 /*
523 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate
524 zeroes the memory
525 d_tn->vec_len = 0L;
526 d_tn->obj_init = 0;
527 d_tn->pod_init = 0;
528 */
529 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000530
Jonathan Peyton30419822017-05-12 18:01:32 +0000531 d_tn->next = *lnk_tn;
532 *lnk_tn = d_tn;
533 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000534}
535
Jonathan Peyton30419822017-05-12 18:01:32 +0000536void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data,
537 size_t size) {
538 void *ret;
539 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000540
Jonathan Peyton30419822017-05-12 18:01:32 +0000541 KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000542
543#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000544 if (!__kmp_init_serial)
545 KMP_FATAL(RTLNotInitialized);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000546#endif /* USE_CHECKS_COMMON */
547
Jonathan Peyton30419822017-05-12 18:01:32 +0000548 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
549 /* The parallel address will NEVER overlap with the data_address */
550 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the
551 * data_address; use data_address = data */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000552
Jonathan Peyton30419822017-05-12 18:01:32 +0000553 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",
554 global_tid));
555 kmp_threadprivate_insert_private_data(global_tid, data, data, size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000556
Jonathan Peyton30419822017-05-12 18:01:32 +0000557 ret = data;
558 } else {
559 KC_TRACE(
560 50,
561 ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
562 global_tid, data));
563 tn = __kmp_threadprivate_find_task_common(
564 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000565
Jonathan Peyton30419822017-05-12 18:01:32 +0000566 if (tn) {
567 KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000568#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000569 if ((size_t)size > tn->cmn_size) {
570 KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
571 " ,%" KMP_UINTPTR_SPEC ")\n",
572 data, size, tn->cmn_size));
573 KMP_FATAL(TPCommonBlocksInconsist);
574 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000575#endif /* USE_CHECKS_COMMON */
Jonathan Peyton30419822017-05-12 18:01:32 +0000576 } else {
577 /* The parallel address will NEVER overlap with the data_address */
578 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use
579 * data_address = data */
580 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid));
581 tn = kmp_threadprivate_insert(global_tid, data, data, size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000582 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000583
Jonathan Peyton30419822017-05-12 18:01:32 +0000584 ret = tn->par_addr;
585 }
586 KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
587 global_tid, ret));
588
589 return ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000590}
591
592/*!
593 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000594 @param loc source location information
595 @param global_tid global thread number
596 @param data pointer to data to privatize
597 @param size size of data to privatize
598 @param cache pointer to cache
599 @return pointer to private storage
Jim Cownie5e8470a2013-09-27 10:38:44 +0000600
Jonathan Peyton61118492016-05-20 19:03:38 +0000601 Allocate private storage for threadprivate data.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000602*/
603void *
Jonathan Peyton30419822017-05-12 18:01:32 +0000604__kmpc_threadprivate_cached(ident_t *loc,
605 kmp_int32 global_tid, // gtid.
606 void *data, // Pointer to original global variable.
607 size_t size, // Size of original global variable.
608 void ***cache) {
609 KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "
610 "address: %p, size: %" KMP_SIZE_T_SPEC "\n",
611 global_tid, *cache, data, size));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000612
Jonathan Peyton30419822017-05-12 18:01:32 +0000613 if (TCR_PTR(*cache) == 0) {
614 __kmp_acquire_lock(&__kmp_global_lock, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000615
Jonathan Peyton30419822017-05-12 18:01:32 +0000616 if (TCR_PTR(*cache) == 0) {
617 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
618 __kmp_tp_cached = 1;
619 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
620 void **my_cache;
621 KMP_ITT_IGNORE(
622 my_cache = (void **)__kmp_allocate(
623 sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)););
624 // No need to zero the allocated memory; __kmp_allocate does that.
625 KC_TRACE(
626 50,
627 ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
628 global_tid, my_cache));
Jonathan Peyton61118492016-05-20 19:03:38 +0000629
Jonathan Peyton30419822017-05-12 18:01:32 +0000630 /* TODO: free all this memory in __kmp_common_destroy using
631 * __kmp_threadpriv_cache_list */
632 /* Add address of mycache to linked list for cleanup later */
633 kmp_cached_addr_t *tp_cache_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000634
Jonathan Peyton30419822017-05-12 18:01:32 +0000635 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
636 tp_cache_addr->addr = my_cache;
637 tp_cache_addr->next = __kmp_threadpriv_cache_list;
638 __kmp_threadpriv_cache_list = tp_cache_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000639
Jonathan Peyton30419822017-05-12 18:01:32 +0000640 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000641
Jonathan Peyton30419822017-05-12 18:01:32 +0000642 TCW_PTR(*cache, my_cache);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000643
Jonathan Peyton30419822017-05-12 18:01:32 +0000644 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000645 }
646
Jonathan Peyton30419822017-05-12 18:01:32 +0000647 __kmp_release_lock(&__kmp_global_lock, global_tid);
648 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000649
Jonathan Peyton30419822017-05-12 18:01:32 +0000650 void *ret;
651 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
652 ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000653
Jonathan Peyton30419822017-05-12 18:01:32 +0000654 TCW_PTR((*cache)[global_tid], ret);
655 }
656 KC_TRACE(10,
657 ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
658 global_tid, ret));
659
660 return ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000661}
662
663/*!
664 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000665 @param loc source location information
666 @param data pointer to data being privatized
667 @param ctor pointer to constructor function for data
668 @param cctor pointer to copy constructor function for data
669 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000670 @param vector_length length of the vector (bytes or elements?)
671 Register vector constructors and destructors for thread private data.
672*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000673void __kmpc_threadprivate_register_vec(ident_t *loc, void *data,
674 kmpc_ctor_vec ctor, kmpc_cctor_vec cctor,
675 kmpc_dtor_vec dtor,
676 size_t vector_length) {
677 struct shared_common *d_tn, **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000678
Jonathan Peyton30419822017-05-12 18:01:32 +0000679 KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000680
681#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000682 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
683 KMP_ASSERT(cctor == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000684#endif /* USE_CHECKS_COMMON */
685
Jonathan Peyton30419822017-05-12 18:01:32 +0000686 d_tn = __kmp_find_shared_task_common(
687 &__kmp_threadprivate_d_table, -1,
688 data); /* Only the global data table exists. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000689
Jonathan Peyton30419822017-05-12 18:01:32 +0000690 if (d_tn == 0) {
691 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
692 d_tn->gbl_addr = data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693
Jonathan Peyton30419822017-05-12 18:01:32 +0000694 d_tn->ct.ctorv = ctor;
695 d_tn->cct.cctorv = cctor;
696 d_tn->dt.dtorv = dtor;
697 d_tn->is_vec = TRUE;
698 d_tn->vec_len = (size_t)vector_length;
699 /*
700 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
701 zeroes the memory
702 d_tn->pod_init = 0;
703 */
704 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000705
Jonathan Peyton30419822017-05-12 18:01:32 +0000706 d_tn->next = *lnk_tn;
707 *lnk_tn = d_tn;
708 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000709}