blob: b1faf1c9a66234c9c0ccd51e9dc630293855b810 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_threadprivate.cpp -- OpenMP threadprivate support library
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000017#include "kmp_i18n.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000018#include "kmp_itt.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000019
20#define USE_CHECKS_COMMON
21
Jonathan Peyton30419822017-05-12 18:01:32 +000022#define KMP_INLINE_SUBR 1
Jim Cownie5e8470a2013-09-27 10:38:44 +000023
Jonathan Peyton30419822017-05-12 18:01:32 +000024void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
25 void *data_addr, size_t pc_size);
26struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
27 void *data_addr,
28 size_t pc_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +000029
Jonathan Peyton30419822017-05-12 18:01:32 +000030struct shared_table __kmp_threadprivate_d_table;
Jim Cownie5e8470a2013-09-27 10:38:44 +000031
32static
33#ifdef KMP_INLINE_SUBR
Jonathan Peyton30419822017-05-12 18:01:32 +000034 __forceinline
Jim Cownie5e8470a2013-09-27 10:38:44 +000035#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000036 struct private_common *
37 __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid,
38 void *pc_addr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000039
40{
Jonathan Peyton30419822017-05-12 18:01:32 +000041 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000042
43#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000044 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "
45 "address %p\n",
46 gtid, pc_addr));
47 dump_list();
Jim Cownie5e8470a2013-09-27 10:38:44 +000048#endif
49
Jonathan Peyton30419822017-05-12 18:01:32 +000050 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
51 if (tn->gbl_addr == pc_addr) {
Jim Cownie5e8470a2013-09-27 10:38:44 +000052#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000053 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "
54 "node %p on list\n",
55 gtid, pc_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +000056#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000057 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000058 }
Jonathan Peyton30419822017-05-12 18:01:32 +000059 }
60 return 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +000061}
62
63static
64#ifdef KMP_INLINE_SUBR
Jonathan Peyton30419822017-05-12 18:01:32 +000065 __forceinline
Jim Cownie5e8470a2013-09-27 10:38:44 +000066#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000067 struct shared_common *
68 __kmp_find_shared_task_common(struct shared_table *tbl, int gtid,
69 void *pc_addr) {
70 struct shared_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000071
Jonathan Peyton30419822017-05-12 18:01:32 +000072 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
73 if (tn->gbl_addr == pc_addr) {
Jim Cownie5e8470a2013-09-27 10:38:44 +000074#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000075 KC_TRACE(
76 10,
77 ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
78 gtid, pc_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +000079#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000080 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000081 }
Jonathan Peyton30419822017-05-12 18:01:32 +000082 }
83 return 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +000084}
85
Jonathan Peyton30419822017-05-12 18:01:32 +000086// Create a template for the data initialized storage. Either the template is
87// NULL indicating zero fill, or the template is a copy of the original data.
88static struct private_data *__kmp_init_common_data(void *pc_addr,
89 size_t pc_size) {
90 struct private_data *d;
91 size_t i;
92 char *p;
Jim Cownie5e8470a2013-09-27 10:38:44 +000093
Jonathan Peyton30419822017-05-12 18:01:32 +000094 d = (struct private_data *)__kmp_allocate(sizeof(struct private_data));
95 /*
96 d->data = 0; // AC: commented out because __kmp_allocate zeroes the
97 memory
98 d->next = 0;
99 */
100 d->size = pc_size;
101 d->more = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000102
Jonathan Peyton30419822017-05-12 18:01:32 +0000103 p = (char *)pc_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000104
Jonathan Peyton30419822017-05-12 18:01:32 +0000105 for (i = pc_size; i > 0; --i) {
106 if (*p++ != '\0') {
107 d->data = __kmp_allocate(pc_size);
108 KMP_MEMCPY(d->data, pc_addr, pc_size);
109 break;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000111 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000112
Jonathan Peyton30419822017-05-12 18:01:32 +0000113 return d;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000114}
115
Jonathan Peyton30419822017-05-12 18:01:32 +0000116// Initialize the data area from the template.
117static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) {
118 char *addr = (char *)pc_addr;
119 int i, offset;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000120
Jonathan Peyton30419822017-05-12 18:01:32 +0000121 for (offset = 0; d != 0; d = d->next) {
122 for (i = d->more; i > 0; --i) {
123 if (d->data == 0)
124 memset(&addr[offset], '\0', d->size);
125 else
126 KMP_MEMCPY(&addr[offset], d->data, d->size);
127 offset += d->size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000128 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000129 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000130}
131
Jim Cownie5e8470a2013-09-27 10:38:44 +0000132/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
Jonathan Peyton30419822017-05-12 18:01:32 +0000133void __kmp_common_initialize(void) {
134 if (!TCR_4(__kmp_init_common)) {
135 int q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000136#ifdef KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000137 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000138#endif
139
Jonathan Peyton30419822017-05-12 18:01:32 +0000140 __kmp_threadpriv_cache_list = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000141
142#ifdef KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000143 /* verify the uber masters were initialized */
144 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
145 if (__kmp_root[gtid]) {
146 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
147 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
148 KMP_DEBUG_ASSERT(
149 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
150 /* __kmp_root[ gitd ]-> r.r_uber_thread ->
151 * th.th_pri_common -> data[ q ] = 0;*/
152 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000153#endif /* KMP_DEBUG */
154
Jonathan Peyton30419822017-05-12 18:01:32 +0000155 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
156 __kmp_threadprivate_d_table.data[q] = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000157
Jonathan Peyton30419822017-05-12 18:01:32 +0000158 TCW_4(__kmp_init_common, TRUE);
159 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000160}
161
162/* Call all destructors for threadprivate data belonging to all threads.
163 Currently unused! */
Jonathan Peyton30419822017-05-12 18:01:32 +0000164void __kmp_common_destroy(void) {
165 if (TCR_4(__kmp_init_common)) {
166 int q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000167
Jonathan Peyton30419822017-05-12 18:01:32 +0000168 TCW_4(__kmp_init_common, FALSE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000169
Jonathan Peyton30419822017-05-12 18:01:32 +0000170 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
171 int gtid;
172 struct private_common *tn;
173 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000174
Jonathan Peyton30419822017-05-12 18:01:32 +0000175 /* C++ destructors need to be called once per thread before exiting.
176 Don't call destructors for master thread though unless we used copy
177 constructor */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000178
Jonathan Peyton30419822017-05-12 18:01:32 +0000179 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
180 d_tn = d_tn->next) {
181 if (d_tn->is_vec) {
182 if (d_tn->dt.dtorv != 0) {
183 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
184 if (__kmp_threads[gtid]) {
185 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
186 : (!KMP_UBER_GTID(gtid))) {
187 tn = __kmp_threadprivate_find_task_common(
188 __kmp_threads[gtid]->th.th_pri_common, gtid,
189 d_tn->gbl_addr);
190 if (tn) {
191 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
192 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000193 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000194 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000195 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000196 if (d_tn->obj_init != 0) {
197 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
198 }
199 }
200 } else {
201 if (d_tn->dt.dtor != 0) {
202 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
203 if (__kmp_threads[gtid]) {
204 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
205 : (!KMP_UBER_GTID(gtid))) {
206 tn = __kmp_threadprivate_find_task_common(
207 __kmp_threads[gtid]->th.th_pri_common, gtid,
208 d_tn->gbl_addr);
209 if (tn) {
210 (*d_tn->dt.dtor)(tn->par_addr);
211 }
212 }
213 }
214 }
215 if (d_tn->obj_init != 0) {
216 (*d_tn->dt.dtor)(d_tn->obj_init);
217 }
218 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000219 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000220 }
221 __kmp_threadprivate_d_table.data[q] = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000222 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000223 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224}
225
226/* Call all destructors for threadprivate data belonging to this thread */
Jonathan Peyton30419822017-05-12 18:01:32 +0000227void __kmp_common_destroy_gtid(int gtid) {
228 struct private_common *tn;
229 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000230
Jonathan Peyton30419822017-05-12 18:01:32 +0000231 KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid));
232 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000233
Jonathan Peyton30419822017-05-12 18:01:32 +0000234 if (TCR_4(__kmp_init_common)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000235
Jonathan Peyton30419822017-05-12 18:01:32 +0000236 /* Cannot do this here since not all threads have destroyed their data */
237 /* TCW_4(__kmp_init_common, FALSE); */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000238
Jonathan Peyton30419822017-05-12 18:01:32 +0000239 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000240
Jonathan Peyton30419822017-05-12 18:01:32 +0000241 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
242 tn->gbl_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000243
Jonathan Peyton30419822017-05-12 18:01:32 +0000244 KMP_DEBUG_ASSERT(d_tn);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000245
Jonathan Peyton30419822017-05-12 18:01:32 +0000246 if (d_tn->is_vec) {
247 if (d_tn->dt.dtorv != 0) {
248 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
249 }
250 if (d_tn->obj_init != 0) {
251 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
252 }
253 } else {
254 if (d_tn->dt.dtor != 0) {
255 (void)(*d_tn->dt.dtor)(tn->par_addr);
256 }
257 if (d_tn->obj_init != 0) {
258 (void)(*d_tn->dt.dtor)(d_tn->obj_init);
259 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000260 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000261 }
262 KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "
263 "complete\n",
264 gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000265 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000266 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000267}
268
Jim Cownie5e8470a2013-09-27 10:38:44 +0000269#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000270static void dump_list(void) {
271 int p, q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000272
Jonathan Peyton30419822017-05-12 18:01:32 +0000273 for (p = 0; p < __kmp_all_nth; ++p) {
274 if (!__kmp_threads[p])
275 continue;
276 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
277 if (__kmp_threads[p]->th.th_pri_common->data[q]) {
278 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000279
Jonathan Peyton30419822017-05-12 18:01:32 +0000280 KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000281
Jonathan Peyton30419822017-05-12 18:01:32 +0000282 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
283 tn = tn->next) {
284 KC_TRACE(10,
285 ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
286 tn->gbl_addr, tn->par_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000287 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000288 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000289 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000290 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000291}
292#endif /* KMP_TASK_COMMON_DEBUG */
293
Jonathan Peyton30419822017-05-12 18:01:32 +0000294// NOTE: this routine is to be called only from the serial part of the program.
295void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
296 void *data_addr, size_t pc_size) {
297 struct shared_common **lnk_tn, *d_tn;
298 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
299 __kmp_threads[gtid]->th.th_root->r.r_active == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000300
Jonathan Peyton30419822017-05-12 18:01:32 +0000301 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
302 pc_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000303
Jonathan Peyton30419822017-05-12 18:01:32 +0000304 if (d_tn == 0) {
305 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000306
Jonathan Peyton30419822017-05-12 18:01:32 +0000307 d_tn->gbl_addr = pc_addr;
308 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
309 /*
310 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
311 zeroes the memory
312 d_tn->ct.ctor = 0;
313 d_tn->cct.cctor = 0;;
314 d_tn->dt.dtor = 0;
315 d_tn->is_vec = FALSE;
316 d_tn->vec_len = 0L;
317 */
318 d_tn->cmn_size = pc_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000319
Jonathan Peyton30419822017-05-12 18:01:32 +0000320 __kmp_acquire_lock(&__kmp_global_lock, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000321
Jonathan Peyton30419822017-05-12 18:01:32 +0000322 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000323
Jonathan Peyton30419822017-05-12 18:01:32 +0000324 d_tn->next = *lnk_tn;
325 *lnk_tn = d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000326
Jonathan Peyton30419822017-05-12 18:01:32 +0000327 __kmp_release_lock(&__kmp_global_lock, gtid);
328 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000329}
330
Jonathan Peyton30419822017-05-12 18:01:32 +0000331struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
332 void *data_addr,
333 size_t pc_size) {
334 struct private_common *tn, **tt;
335 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000336
Jonathan Peyton30419822017-05-12 18:01:32 +0000337 /* +++++++++ START OF CRITICAL SECTION +++++++++ */
338 __kmp_acquire_lock(&__kmp_global_lock, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000339
Jonathan Peyton30419822017-05-12 18:01:32 +0000340 tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000341
Jonathan Peyton30419822017-05-12 18:01:32 +0000342 tn->gbl_addr = pc_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000343
Jonathan Peyton30419822017-05-12 18:01:32 +0000344 d_tn = __kmp_find_shared_task_common(
345 &__kmp_threadprivate_d_table, gtid,
346 pc_addr); /* Only the MASTER data table exists. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000347
Jonathan Peyton30419822017-05-12 18:01:32 +0000348 if (d_tn != 0) {
349 /* This threadprivate variable has already been seen. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000350
Jonathan Peyton30419822017-05-12 18:01:32 +0000351 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
352 d_tn->cmn_size = pc_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000353
Jonathan Peyton30419822017-05-12 18:01:32 +0000354 if (d_tn->is_vec) {
355 if (d_tn->ct.ctorv != 0) {
356 /* Construct from scratch so no prototype exists */
357 d_tn->obj_init = 0;
358 } else if (d_tn->cct.cctorv != 0) {
359 /* Now data initialize the prototype since it was previously
360 * registered */
361 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
362 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
363 } else {
364 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000365 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000366 } else {
367 if (d_tn->ct.ctor != 0) {
368 /* Construct from scratch so no prototype exists */
369 d_tn->obj_init = 0;
370 } else if (d_tn->cct.cctor != 0) {
371 /* Now data initialize the prototype since it was previously
372 registered */
373 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
374 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
375 } else {
376 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
377 }
378 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000379 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000380 } else {
381 struct shared_common **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000382
Jonathan Peyton30419822017-05-12 18:01:32 +0000383 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
384 d_tn->gbl_addr = pc_addr;
385 d_tn->cmn_size = pc_size;
386 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
387 /*
388 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
389 zeroes the memory
390 d_tn->ct.ctor = 0;
391 d_tn->cct.cctor = 0;
392 d_tn->dt.dtor = 0;
393 d_tn->is_vec = FALSE;
394 d_tn->vec_len = 0L;
395 */
396 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000397
Jonathan Peyton30419822017-05-12 18:01:32 +0000398 d_tn->next = *lnk_tn;
399 *lnk_tn = d_tn;
400 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000401
Jonathan Peyton30419822017-05-12 18:01:32 +0000402 tn->cmn_size = d_tn->cmn_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000403
Jonathan Peyton30419822017-05-12 18:01:32 +0000404 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
405 tn->par_addr = (void *)pc_addr;
406 } else {
407 tn->par_addr = (void *)__kmp_allocate(tn->cmn_size);
408 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000409
Jonathan Peyton30419822017-05-12 18:01:32 +0000410 __kmp_release_lock(&__kmp_global_lock, gtid);
411/* +++++++++ END OF CRITICAL SECTION +++++++++ */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000412
413#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000414 if (pc_size > d_tn->cmn_size) {
415 KC_TRACE(
416 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
417 " ,%" KMP_UINTPTR_SPEC ")\n",
418 pc_addr, pc_size, d_tn->cmn_size));
419 KMP_FATAL(TPCommonBlocksInconsist);
420 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000421#endif /* USE_CHECKS_COMMON */
422
Jonathan Peyton30419822017-05-12 18:01:32 +0000423 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000424
425#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000426 if (*tt != 0) {
427 KC_TRACE(
428 10,
429 ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
430 gtid, pc_addr));
431 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000432#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000433 tn->next = *tt;
434 *tt = tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000435
436#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000437 KC_TRACE(10,
438 ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
439 gtid, pc_addr));
440 dump_list();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000441#endif
442
Jonathan Peyton30419822017-05-12 18:01:32 +0000443 /* Link the node into a simple list */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000444
Jonathan Peyton30419822017-05-12 18:01:32 +0000445 tn->link = __kmp_threads[gtid]->th.th_pri_head;
446 __kmp_threads[gtid]->th.th_pri_head = tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000447
448#ifdef BUILD_TV
Jonathan Peyton30419822017-05-12 18:01:32 +0000449 __kmp_tv_threadprivate_store(__kmp_threads[gtid], tn->gbl_addr, tn->par_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000450#endif
451
Jonathan Peyton30419822017-05-12 18:01:32 +0000452 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
Jim Cownie5e8470a2013-09-27 10:38:44 +0000453 return tn;
Jonathan Peyton30419822017-05-12 18:01:32 +0000454
455 /* if C++ object with copy constructor, use it;
456 * else if C++ object with constructor, use it for the non-master copies only;
457 * else use pod_init and memcpy
458 *
459 * C++ constructors need to be called once for each non-master thread on
460 * allocate
461 * C++ copy constructors need to be called once for each thread on allocate */
462
463 /* C++ object with constructors/destructors; don't call constructors for
464 master thread though */
465 if (d_tn->is_vec) {
466 if (d_tn->ct.ctorv != 0) {
467 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
468 } else if (d_tn->cct.cctorv != 0) {
469 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
470 } else if (tn->par_addr != tn->gbl_addr) {
471 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
472 }
473 } else {
474 if (d_tn->ct.ctor != 0) {
475 (void)(*d_tn->ct.ctor)(tn->par_addr);
476 } else if (d_tn->cct.cctor != 0) {
477 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
478 } else if (tn->par_addr != tn->gbl_addr) {
479 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
480 }
481 }
482 /* !BUILD_OPENMP_C
483 if (tn->par_addr != tn->gbl_addr)
484 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
485
486 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000487}
488
489/* ------------------------------------------------------------------------ */
490/* We are currently parallel, and we know the thread id. */
491/* ------------------------------------------------------------------------ */
492
493/*!
494 @ingroup THREADPRIVATE
495
Jonathan Peyton61118492016-05-20 19:03:38 +0000496 @param loc source location information
497 @param data pointer to data being privatized
498 @param ctor pointer to constructor function for data
499 @param cctor pointer to copy constructor function for data
500 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000501
502 Register constructors and destructors for thread private data.
503 This function is called when executing in parallel, when we know the thread id.
504*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000505void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor,
506 kmpc_cctor cctor, kmpc_dtor dtor) {
507 struct shared_common *d_tn, **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000508
Jonathan Peyton30419822017-05-12 18:01:32 +0000509 KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000510
511#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000512 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
513 KMP_ASSERT(cctor == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000514#endif /* USE_CHECKS_COMMON */
515
Jonathan Peyton30419822017-05-12 18:01:32 +0000516 /* Only the global data table exists. */
517 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000518
Jonathan Peyton30419822017-05-12 18:01:32 +0000519 if (d_tn == 0) {
520 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
521 d_tn->gbl_addr = data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000522
Jonathan Peyton30419822017-05-12 18:01:32 +0000523 d_tn->ct.ctor = ctor;
524 d_tn->cct.cctor = cctor;
525 d_tn->dt.dtor = dtor;
526 /*
527 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate
528 zeroes the memory
529 d_tn->vec_len = 0L;
530 d_tn->obj_init = 0;
531 d_tn->pod_init = 0;
532 */
533 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000534
Jonathan Peyton30419822017-05-12 18:01:32 +0000535 d_tn->next = *lnk_tn;
536 *lnk_tn = d_tn;
537 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000538}
539
Jonathan Peyton30419822017-05-12 18:01:32 +0000540void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data,
541 size_t size) {
542 void *ret;
543 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000544
Jonathan Peyton30419822017-05-12 18:01:32 +0000545 KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000546
547#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000548 if (!__kmp_init_serial)
549 KMP_FATAL(RTLNotInitialized);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000550#endif /* USE_CHECKS_COMMON */
551
Jonathan Peyton30419822017-05-12 18:01:32 +0000552 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
553 /* The parallel address will NEVER overlap with the data_address */
554 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the
555 * data_address; use data_address = data */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000556
Jonathan Peyton30419822017-05-12 18:01:32 +0000557 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",
558 global_tid));
559 kmp_threadprivate_insert_private_data(global_tid, data, data, size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000560
Jonathan Peyton30419822017-05-12 18:01:32 +0000561 ret = data;
562 } else {
563 KC_TRACE(
564 50,
565 ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
566 global_tid, data));
567 tn = __kmp_threadprivate_find_task_common(
568 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000569
Jonathan Peyton30419822017-05-12 18:01:32 +0000570 if (tn) {
571 KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000572#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000573 if ((size_t)size > tn->cmn_size) {
574 KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
575 " ,%" KMP_UINTPTR_SPEC ")\n",
576 data, size, tn->cmn_size));
577 KMP_FATAL(TPCommonBlocksInconsist);
578 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000579#endif /* USE_CHECKS_COMMON */
Jonathan Peyton30419822017-05-12 18:01:32 +0000580 } else {
581 /* The parallel address will NEVER overlap with the data_address */
582 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use
583 * data_address = data */
584 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid));
585 tn = kmp_threadprivate_insert(global_tid, data, data, size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000586 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000587
Jonathan Peyton30419822017-05-12 18:01:32 +0000588 ret = tn->par_addr;
589 }
590 KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
591 global_tid, ret));
592
593 return ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000594}
595
596/*!
597 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000598 @param loc source location information
599 @param global_tid global thread number
600 @param data pointer to data to privatize
601 @param size size of data to privatize
602 @param cache pointer to cache
603 @return pointer to private storage
Jim Cownie5e8470a2013-09-27 10:38:44 +0000604
Jonathan Peyton61118492016-05-20 19:03:38 +0000605 Allocate private storage for threadprivate data.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000606*/
607void *
Jonathan Peyton30419822017-05-12 18:01:32 +0000608__kmpc_threadprivate_cached(ident_t *loc,
609 kmp_int32 global_tid, // gtid.
610 void *data, // Pointer to original global variable.
611 size_t size, // Size of original global variable.
612 void ***cache) {
613 KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "
614 "address: %p, size: %" KMP_SIZE_T_SPEC "\n",
615 global_tid, *cache, data, size));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000616
Jonathan Peyton30419822017-05-12 18:01:32 +0000617 if (TCR_PTR(*cache) == 0) {
618 __kmp_acquire_lock(&__kmp_global_lock, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000619
Jonathan Peyton30419822017-05-12 18:01:32 +0000620 if (TCR_PTR(*cache) == 0) {
621 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
622 __kmp_tp_cached = 1;
623 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
624 void **my_cache;
625 KMP_ITT_IGNORE(
626 my_cache = (void **)__kmp_allocate(
627 sizeof(void *) * __kmp_tp_capacity + sizeof(kmp_cached_addr_t)););
628 // No need to zero the allocated memory; __kmp_allocate does that.
629 KC_TRACE(
630 50,
631 ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
632 global_tid, my_cache));
Jonathan Peyton61118492016-05-20 19:03:38 +0000633
Jonathan Peyton30419822017-05-12 18:01:32 +0000634 /* TODO: free all this memory in __kmp_common_destroy using
635 * __kmp_threadpriv_cache_list */
636 /* Add address of mycache to linked list for cleanup later */
637 kmp_cached_addr_t *tp_cache_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000638
Jonathan Peyton30419822017-05-12 18:01:32 +0000639 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
640 tp_cache_addr->addr = my_cache;
641 tp_cache_addr->next = __kmp_threadpriv_cache_list;
642 __kmp_threadpriv_cache_list = tp_cache_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000643
Jonathan Peyton30419822017-05-12 18:01:32 +0000644 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000645
Jonathan Peyton30419822017-05-12 18:01:32 +0000646 TCW_PTR(*cache, my_cache);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000647
Jonathan Peyton30419822017-05-12 18:01:32 +0000648 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000649 }
650
Jonathan Peyton30419822017-05-12 18:01:32 +0000651 __kmp_release_lock(&__kmp_global_lock, global_tid);
652 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000653
Jonathan Peyton30419822017-05-12 18:01:32 +0000654 void *ret;
655 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
656 ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000657
Jonathan Peyton30419822017-05-12 18:01:32 +0000658 TCW_PTR((*cache)[global_tid], ret);
659 }
660 KC_TRACE(10,
661 ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
662 global_tid, ret));
663
664 return ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000665}
666
667/*!
668 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000669 @param loc source location information
670 @param data pointer to data being privatized
671 @param ctor pointer to constructor function for data
672 @param cctor pointer to copy constructor function for data
673 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000674 @param vector_length length of the vector (bytes or elements?)
675 Register vector constructors and destructors for thread private data.
676*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000677void __kmpc_threadprivate_register_vec(ident_t *loc, void *data,
678 kmpc_ctor_vec ctor, kmpc_cctor_vec cctor,
679 kmpc_dtor_vec dtor,
680 size_t vector_length) {
681 struct shared_common *d_tn, **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000682
Jonathan Peyton30419822017-05-12 18:01:32 +0000683 KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000684
685#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000686 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
687 KMP_ASSERT(cctor == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000688#endif /* USE_CHECKS_COMMON */
689
Jonathan Peyton30419822017-05-12 18:01:32 +0000690 d_tn = __kmp_find_shared_task_common(
691 &__kmp_threadprivate_d_table, -1,
692 data); /* Only the global data table exists. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693
Jonathan Peyton30419822017-05-12 18:01:32 +0000694 if (d_tn == 0) {
695 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
696 d_tn->gbl_addr = data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000697
Jonathan Peyton30419822017-05-12 18:01:32 +0000698 d_tn->ct.ctorv = ctor;
699 d_tn->cct.cctorv = cctor;
700 d_tn->dt.dtorv = dtor;
701 d_tn->is_vec = TRUE;
702 d_tn->vec_len = (size_t)vector_length;
703 /*
704 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
705 zeroes the memory
706 d_tn->pod_init = 0;
707 */
708 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000709
Jonathan Peyton30419822017-05-12 18:01:32 +0000710 d_tn->next = *lnk_tn;
711 *lnk_tn = d_tn;
712 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000713}