blob: e64097b55467a05d43541029e12e7a0e3f8ea560 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_threadprivate.cpp -- OpenMP threadprivate support library
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
Jim Cownie5e8470a2013-09-27 10:38:44 +00005//===----------------------------------------------------------------------===//
6//
7// The LLVM Compiler Infrastructure
8//
9// This file is dual licensed under the MIT and the University of Illinois Open
10// Source Licenses. See LICENSE.txt for details.
11//
12//===----------------------------------------------------------------------===//
13
Jim Cownie5e8470a2013-09-27 10:38:44 +000014#include "kmp.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000015#include "kmp_i18n.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000016#include "kmp_itt.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000017
18#define USE_CHECKS_COMMON
19
Jonathan Peyton30419822017-05-12 18:01:32 +000020#define KMP_INLINE_SUBR 1
Jim Cownie5e8470a2013-09-27 10:38:44 +000021
Jonathan Peyton30419822017-05-12 18:01:32 +000022void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
23 void *data_addr, size_t pc_size);
24struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
25 void *data_addr,
26 size_t pc_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +000027
Jonathan Peyton30419822017-05-12 18:01:32 +000028struct shared_table __kmp_threadprivate_d_table;
Jim Cownie5e8470a2013-09-27 10:38:44 +000029
30static
31#ifdef KMP_INLINE_SUBR
Jonathan Peyton30419822017-05-12 18:01:32 +000032 __forceinline
Jim Cownie5e8470a2013-09-27 10:38:44 +000033#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000034 struct private_common *
35 __kmp_threadprivate_find_task_common(struct common_table *tbl, int gtid,
36 void *pc_addr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000037
38{
Jonathan Peyton30419822017-05-12 18:01:32 +000039 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000040
41#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000042 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, called with "
43 "address %p\n",
44 gtid, pc_addr));
45 dump_list();
Jim Cownie5e8470a2013-09-27 10:38:44 +000046#endif
47
Jonathan Peyton30419822017-05-12 18:01:32 +000048 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
49 if (tn->gbl_addr == pc_addr) {
Jim Cownie5e8470a2013-09-27 10:38:44 +000050#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000051 KC_TRACE(10, ("__kmp_threadprivate_find_task_common: thread#%d, found "
52 "node %p on list\n",
53 gtid, pc_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +000054#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000055 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000056 }
Jonathan Peyton30419822017-05-12 18:01:32 +000057 }
58 return 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +000059}
60
61static
62#ifdef KMP_INLINE_SUBR
Jonathan Peyton30419822017-05-12 18:01:32 +000063 __forceinline
Jim Cownie5e8470a2013-09-27 10:38:44 +000064#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000065 struct shared_common *
66 __kmp_find_shared_task_common(struct shared_table *tbl, int gtid,
67 void *pc_addr) {
68 struct shared_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000069
Jonathan Peyton30419822017-05-12 18:01:32 +000070 for (tn = tbl->data[KMP_HASH(pc_addr)]; tn; tn = tn->next) {
71 if (tn->gbl_addr == pc_addr) {
Jim Cownie5e8470a2013-09-27 10:38:44 +000072#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +000073 KC_TRACE(
74 10,
75 ("__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
76 gtid, pc_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +000077#endif
Jonathan Peyton30419822017-05-12 18:01:32 +000078 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +000079 }
Jonathan Peyton30419822017-05-12 18:01:32 +000080 }
81 return 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +000082}
83
Jonathan Peyton30419822017-05-12 18:01:32 +000084// Create a template for the data initialized storage. Either the template is
85// NULL indicating zero fill, or the template is a copy of the original data.
86static struct private_data *__kmp_init_common_data(void *pc_addr,
87 size_t pc_size) {
88 struct private_data *d;
89 size_t i;
90 char *p;
Jim Cownie5e8470a2013-09-27 10:38:44 +000091
Jonathan Peyton30419822017-05-12 18:01:32 +000092 d = (struct private_data *)__kmp_allocate(sizeof(struct private_data));
93 /*
94 d->data = 0; // AC: commented out because __kmp_allocate zeroes the
95 memory
96 d->next = 0;
97 */
98 d->size = pc_size;
99 d->more = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000100
Jonathan Peyton30419822017-05-12 18:01:32 +0000101 p = (char *)pc_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000102
Jonathan Peyton30419822017-05-12 18:01:32 +0000103 for (i = pc_size; i > 0; --i) {
104 if (*p++ != '\0') {
105 d->data = __kmp_allocate(pc_size);
106 KMP_MEMCPY(d->data, pc_addr, pc_size);
107 break;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000108 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000109 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110
Jonathan Peyton30419822017-05-12 18:01:32 +0000111 return d;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000112}
113
Jonathan Peyton30419822017-05-12 18:01:32 +0000114// Initialize the data area from the template.
115static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) {
116 char *addr = (char *)pc_addr;
117 int i, offset;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118
Jonathan Peyton30419822017-05-12 18:01:32 +0000119 for (offset = 0; d != 0; d = d->next) {
120 for (i = d->more; i > 0; --i) {
121 if (d->data == 0)
122 memset(&addr[offset], '\0', d->size);
123 else
124 KMP_MEMCPY(&addr[offset], d->data, d->size);
125 offset += d->size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000126 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000127 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000128}
129
Jim Cownie5e8470a2013-09-27 10:38:44 +0000130/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */
Jonathan Peyton30419822017-05-12 18:01:32 +0000131void __kmp_common_initialize(void) {
132 if (!TCR_4(__kmp_init_common)) {
133 int q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000134#ifdef KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000135 int gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000136#endif
137
Jonathan Peyton30419822017-05-12 18:01:32 +0000138 __kmp_threadpriv_cache_list = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000139
140#ifdef KMP_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000141 /* verify the uber masters were initialized */
142 for (gtid = 0; gtid < __kmp_threads_capacity; gtid++)
143 if (__kmp_root[gtid]) {
144 KMP_DEBUG_ASSERT(__kmp_root[gtid]->r.r_uber_thread);
145 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
146 KMP_DEBUG_ASSERT(
147 !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q]);
148 /* __kmp_root[ gitd ]-> r.r_uber_thread ->
149 * th.th_pri_common -> data[ q ] = 0;*/
150 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000151#endif /* KMP_DEBUG */
152
Jonathan Peyton30419822017-05-12 18:01:32 +0000153 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
154 __kmp_threadprivate_d_table.data[q] = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000155
Jonathan Peyton30419822017-05-12 18:01:32 +0000156 TCW_4(__kmp_init_common, TRUE);
157 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000158}
159
160/* Call all destructors for threadprivate data belonging to all threads.
161 Currently unused! */
Jonathan Peyton30419822017-05-12 18:01:32 +0000162void __kmp_common_destroy(void) {
163 if (TCR_4(__kmp_init_common)) {
164 int q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000165
Jonathan Peyton30419822017-05-12 18:01:32 +0000166 TCW_4(__kmp_init_common, FALSE);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000167
Jonathan Peyton30419822017-05-12 18:01:32 +0000168 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
169 int gtid;
170 struct private_common *tn;
171 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000172
Jonathan Peyton30419822017-05-12 18:01:32 +0000173 /* C++ destructors need to be called once per thread before exiting.
174 Don't call destructors for master thread though unless we used copy
175 constructor */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000176
Jonathan Peyton30419822017-05-12 18:01:32 +0000177 for (d_tn = __kmp_threadprivate_d_table.data[q]; d_tn;
178 d_tn = d_tn->next) {
179 if (d_tn->is_vec) {
180 if (d_tn->dt.dtorv != 0) {
181 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
182 if (__kmp_threads[gtid]) {
183 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
184 : (!KMP_UBER_GTID(gtid))) {
185 tn = __kmp_threadprivate_find_task_common(
186 __kmp_threads[gtid]->th.th_pri_common, gtid,
187 d_tn->gbl_addr);
188 if (tn) {
189 (*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
190 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000191 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000192 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000193 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000194 if (d_tn->obj_init != 0) {
195 (*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
196 }
197 }
198 } else {
199 if (d_tn->dt.dtor != 0) {
200 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
201 if (__kmp_threads[gtid]) {
202 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid))
203 : (!KMP_UBER_GTID(gtid))) {
204 tn = __kmp_threadprivate_find_task_common(
205 __kmp_threads[gtid]->th.th_pri_common, gtid,
206 d_tn->gbl_addr);
207 if (tn) {
208 (*d_tn->dt.dtor)(tn->par_addr);
209 }
210 }
211 }
212 }
213 if (d_tn->obj_init != 0) {
214 (*d_tn->dt.dtor)(d_tn->obj_init);
215 }
216 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000217 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000218 }
219 __kmp_threadprivate_d_table.data[q] = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000220 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000221 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000222}
223
224/* Call all destructors for threadprivate data belonging to this thread */
Jonathan Peyton30419822017-05-12 18:01:32 +0000225void __kmp_common_destroy_gtid(int gtid) {
226 struct private_common *tn;
227 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000228
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +0000229 if (!TCR_4(__kmp_init_gtid)) {
230 // This is possible when one of multiple roots initiates early library
231 // termination in a sequential region while other teams are active, and its
232 // child threads are about to end.
233 return;
234 }
235
Jonathan Peyton30419822017-05-12 18:01:32 +0000236 KC_TRACE(10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid));
237 if ((__kmp_foreign_tp) ? (!KMP_INITIAL_GTID(gtid)) : (!KMP_UBER_GTID(gtid))) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000238
Jonathan Peyton30419822017-05-12 18:01:32 +0000239 if (TCR_4(__kmp_init_common)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000240
Jonathan Peyton30419822017-05-12 18:01:32 +0000241 /* Cannot do this here since not all threads have destroyed their data */
242 /* TCW_4(__kmp_init_common, FALSE); */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000243
Jonathan Peyton30419822017-05-12 18:01:32 +0000244 for (tn = __kmp_threads[gtid]->th.th_pri_head; tn; tn = tn->link) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000245
Jonathan Peyton30419822017-05-12 18:01:32 +0000246 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
247 tn->gbl_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000248
Jonathan Peyton30419822017-05-12 18:01:32 +0000249 KMP_DEBUG_ASSERT(d_tn);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000250
Jonathan Peyton30419822017-05-12 18:01:32 +0000251 if (d_tn->is_vec) {
252 if (d_tn->dt.dtorv != 0) {
253 (void)(*d_tn->dt.dtorv)(tn->par_addr, d_tn->vec_len);
254 }
255 if (d_tn->obj_init != 0) {
256 (void)(*d_tn->dt.dtorv)(d_tn->obj_init, d_tn->vec_len);
257 }
258 } else {
259 if (d_tn->dt.dtor != 0) {
260 (void)(*d_tn->dt.dtor)(tn->par_addr);
261 }
262 if (d_tn->obj_init != 0) {
263 (void)(*d_tn->dt.dtor)(d_tn->obj_init);
264 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000265 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000266 }
267 KC_TRACE(30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors "
268 "complete\n",
269 gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000270 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000271 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000272}
273
Jim Cownie5e8470a2013-09-27 10:38:44 +0000274#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000275static void dump_list(void) {
276 int p, q;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000277
Jonathan Peyton30419822017-05-12 18:01:32 +0000278 for (p = 0; p < __kmp_all_nth; ++p) {
279 if (!__kmp_threads[p])
280 continue;
281 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
282 if (__kmp_threads[p]->th.th_pri_common->data[q]) {
283 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000284
Jonathan Peyton30419822017-05-12 18:01:32 +0000285 KC_TRACE(10, ("\tdump_list: gtid:%d addresses\n", p));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000286
Jonathan Peyton30419822017-05-12 18:01:32 +0000287 for (tn = __kmp_threads[p]->th.th_pri_common->data[q]; tn;
288 tn = tn->next) {
289 KC_TRACE(10,
290 ("\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
291 tn->gbl_addr, tn->par_addr));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000292 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000293 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000294 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000295 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000296}
297#endif /* KMP_TASK_COMMON_DEBUG */
298
Jonathan Peyton30419822017-05-12 18:01:32 +0000299// NOTE: this routine is to be called only from the serial part of the program.
300void kmp_threadprivate_insert_private_data(int gtid, void *pc_addr,
301 void *data_addr, size_t pc_size) {
302 struct shared_common **lnk_tn, *d_tn;
303 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
304 __kmp_threads[gtid]->th.th_root->r.r_active == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000305
Jonathan Peyton30419822017-05-12 18:01:32 +0000306 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, gtid,
307 pc_addr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000308
Jonathan Peyton30419822017-05-12 18:01:32 +0000309 if (d_tn == 0) {
310 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000311
Jonathan Peyton30419822017-05-12 18:01:32 +0000312 d_tn->gbl_addr = pc_addr;
313 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
314 /*
315 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
316 zeroes the memory
317 d_tn->ct.ctor = 0;
318 d_tn->cct.cctor = 0;;
319 d_tn->dt.dtor = 0;
320 d_tn->is_vec = FALSE;
321 d_tn->vec_len = 0L;
322 */
323 d_tn->cmn_size = pc_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000324
Jonathan Peyton30419822017-05-12 18:01:32 +0000325 __kmp_acquire_lock(&__kmp_global_lock, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000326
Jonathan Peyton30419822017-05-12 18:01:32 +0000327 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000328
Jonathan Peyton30419822017-05-12 18:01:32 +0000329 d_tn->next = *lnk_tn;
330 *lnk_tn = d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000331
Jonathan Peyton30419822017-05-12 18:01:32 +0000332 __kmp_release_lock(&__kmp_global_lock, gtid);
333 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000334}
335
Jonathan Peyton30419822017-05-12 18:01:32 +0000336struct private_common *kmp_threadprivate_insert(int gtid, void *pc_addr,
337 void *data_addr,
338 size_t pc_size) {
339 struct private_common *tn, **tt;
340 struct shared_common *d_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000341
Jonathan Peyton30419822017-05-12 18:01:32 +0000342 /* +++++++++ START OF CRITICAL SECTION +++++++++ */
343 __kmp_acquire_lock(&__kmp_global_lock, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000344
Jonathan Peyton30419822017-05-12 18:01:32 +0000345 tn = (struct private_common *)__kmp_allocate(sizeof(struct private_common));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000346
Jonathan Peyton30419822017-05-12 18:01:32 +0000347 tn->gbl_addr = pc_addr;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348
Jonathan Peyton30419822017-05-12 18:01:32 +0000349 d_tn = __kmp_find_shared_task_common(
350 &__kmp_threadprivate_d_table, gtid,
351 pc_addr); /* Only the MASTER data table exists. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000352
Jonathan Peyton30419822017-05-12 18:01:32 +0000353 if (d_tn != 0) {
354 /* This threadprivate variable has already been seen. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000355
Jonathan Peyton30419822017-05-12 18:01:32 +0000356 if (d_tn->pod_init == 0 && d_tn->obj_init == 0) {
357 d_tn->cmn_size = pc_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000358
Jonathan Peyton30419822017-05-12 18:01:32 +0000359 if (d_tn->is_vec) {
360 if (d_tn->ct.ctorv != 0) {
361 /* Construct from scratch so no prototype exists */
362 d_tn->obj_init = 0;
363 } else if (d_tn->cct.cctorv != 0) {
364 /* Now data initialize the prototype since it was previously
365 * registered */
366 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
367 (void)(*d_tn->cct.cctorv)(d_tn->obj_init, pc_addr, d_tn->vec_len);
368 } else {
369 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000370 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000371 } else {
372 if (d_tn->ct.ctor != 0) {
373 /* Construct from scratch so no prototype exists */
374 d_tn->obj_init = 0;
375 } else if (d_tn->cct.cctor != 0) {
376 /* Now data initialize the prototype since it was previously
377 registered */
378 d_tn->obj_init = (void *)__kmp_allocate(d_tn->cmn_size);
379 (void)(*d_tn->cct.cctor)(d_tn->obj_init, pc_addr);
380 } else {
381 d_tn->pod_init = __kmp_init_common_data(data_addr, d_tn->cmn_size);
382 }
383 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000384 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000385 } else {
386 struct shared_common **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000387
Jonathan Peyton30419822017-05-12 18:01:32 +0000388 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
389 d_tn->gbl_addr = pc_addr;
390 d_tn->cmn_size = pc_size;
391 d_tn->pod_init = __kmp_init_common_data(data_addr, pc_size);
392 /*
393 d_tn->obj_init = 0; // AC: commented out because __kmp_allocate
394 zeroes the memory
395 d_tn->ct.ctor = 0;
396 d_tn->cct.cctor = 0;
397 d_tn->dt.dtor = 0;
398 d_tn->is_vec = FALSE;
399 d_tn->vec_len = 0L;
400 */
401 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000402
Jonathan Peyton30419822017-05-12 18:01:32 +0000403 d_tn->next = *lnk_tn;
404 *lnk_tn = d_tn;
405 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000406
Jonathan Peyton30419822017-05-12 18:01:32 +0000407 tn->cmn_size = d_tn->cmn_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000408
Jonathan Peyton30419822017-05-12 18:01:32 +0000409 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid))) {
410 tn->par_addr = (void *)pc_addr;
411 } else {
412 tn->par_addr = (void *)__kmp_allocate(tn->cmn_size);
413 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000414
Jonathan Peyton30419822017-05-12 18:01:32 +0000415 __kmp_release_lock(&__kmp_global_lock, gtid);
416/* +++++++++ END OF CRITICAL SECTION +++++++++ */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000417
418#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000419 if (pc_size > d_tn->cmn_size) {
420 KC_TRACE(
421 10, ("__kmp_threadprivate_insert: THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
422 " ,%" KMP_UINTPTR_SPEC ")\n",
423 pc_addr, pc_size, d_tn->cmn_size));
424 KMP_FATAL(TPCommonBlocksInconsist);
425 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000426#endif /* USE_CHECKS_COMMON */
427
Jonathan Peyton30419822017-05-12 18:01:32 +0000428 tt = &(__kmp_threads[gtid]->th.th_pri_common->data[KMP_HASH(pc_addr)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000429
430#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000431 if (*tt != 0) {
432 KC_TRACE(
433 10,
434 ("__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
435 gtid, pc_addr));
436 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000437#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000438 tn->next = *tt;
439 *tt = tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000440
441#ifdef KMP_TASK_COMMON_DEBUG
Jonathan Peyton30419822017-05-12 18:01:32 +0000442 KC_TRACE(10,
443 ("__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
444 gtid, pc_addr));
445 dump_list();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000446#endif
447
Jonathan Peyton30419822017-05-12 18:01:32 +0000448 /* Link the node into a simple list */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000449
Jonathan Peyton30419822017-05-12 18:01:32 +0000450 tn->link = __kmp_threads[gtid]->th.th_pri_head;
451 __kmp_threads[gtid]->th.th_pri_head = tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000452
Jonathan Peyton30419822017-05-12 18:01:32 +0000453 if ((__kmp_foreign_tp) ? (KMP_INITIAL_GTID(gtid)) : (KMP_UBER_GTID(gtid)))
Jim Cownie5e8470a2013-09-27 10:38:44 +0000454 return tn;
Jonathan Peyton30419822017-05-12 18:01:32 +0000455
456 /* if C++ object with copy constructor, use it;
457 * else if C++ object with constructor, use it for the non-master copies only;
458 * else use pod_init and memcpy
459 *
460 * C++ constructors need to be called once for each non-master thread on
461 * allocate
462 * C++ copy constructors need to be called once for each thread on allocate */
463
464 /* C++ object with constructors/destructors; don't call constructors for
465 master thread though */
466 if (d_tn->is_vec) {
467 if (d_tn->ct.ctorv != 0) {
468 (void)(*d_tn->ct.ctorv)(tn->par_addr, d_tn->vec_len);
469 } else if (d_tn->cct.cctorv != 0) {
470 (void)(*d_tn->cct.cctorv)(tn->par_addr, d_tn->obj_init, d_tn->vec_len);
471 } else if (tn->par_addr != tn->gbl_addr) {
472 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
473 }
474 } else {
475 if (d_tn->ct.ctor != 0) {
476 (void)(*d_tn->ct.ctor)(tn->par_addr);
477 } else if (d_tn->cct.cctor != 0) {
478 (void)(*d_tn->cct.cctor)(tn->par_addr, d_tn->obj_init);
479 } else if (tn->par_addr != tn->gbl_addr) {
480 __kmp_copy_common_data(tn->par_addr, d_tn->pod_init);
481 }
482 }
483 /* !BUILD_OPENMP_C
484 if (tn->par_addr != tn->gbl_addr)
485 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */
486
487 return tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000488}
489
490/* ------------------------------------------------------------------------ */
491/* We are currently parallel, and we know the thread id. */
492/* ------------------------------------------------------------------------ */
493
494/*!
495 @ingroup THREADPRIVATE
496
Jonathan Peyton61118492016-05-20 19:03:38 +0000497 @param loc source location information
498 @param data pointer to data being privatized
499 @param ctor pointer to constructor function for data
500 @param cctor pointer to copy constructor function for data
501 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000502
503 Register constructors and destructors for thread private data.
504 This function is called when executing in parallel, when we know the thread id.
505*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000506void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor,
507 kmpc_cctor cctor, kmpc_dtor dtor) {
508 struct shared_common *d_tn, **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000509
Jonathan Peyton30419822017-05-12 18:01:32 +0000510 KC_TRACE(10, ("__kmpc_threadprivate_register: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000511
512#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000513 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
514 KMP_ASSERT(cctor == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000515#endif /* USE_CHECKS_COMMON */
516
Jonathan Peyton30419822017-05-12 18:01:32 +0000517 /* Only the global data table exists. */
518 d_tn = __kmp_find_shared_task_common(&__kmp_threadprivate_d_table, -1, data);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000519
Jonathan Peyton30419822017-05-12 18:01:32 +0000520 if (d_tn == 0) {
521 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
522 d_tn->gbl_addr = data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000523
Jonathan Peyton30419822017-05-12 18:01:32 +0000524 d_tn->ct.ctor = ctor;
525 d_tn->cct.cctor = cctor;
526 d_tn->dt.dtor = dtor;
527 /*
528 d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate
529 zeroes the memory
530 d_tn->vec_len = 0L;
531 d_tn->obj_init = 0;
532 d_tn->pod_init = 0;
533 */
534 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000535
Jonathan Peyton30419822017-05-12 18:01:32 +0000536 d_tn->next = *lnk_tn;
537 *lnk_tn = d_tn;
538 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000539}
540
Jonathan Peyton30419822017-05-12 18:01:32 +0000541void *__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data,
542 size_t size) {
543 void *ret;
544 struct private_common *tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000545
Jonathan Peyton30419822017-05-12 18:01:32 +0000546 KC_TRACE(10, ("__kmpc_threadprivate: T#%d called\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000547
548#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000549 if (!__kmp_init_serial)
550 KMP_FATAL(RTLNotInitialized);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000551#endif /* USE_CHECKS_COMMON */
552
Jonathan Peyton30419822017-05-12 18:01:32 +0000553 if (!__kmp_threads[global_tid]->th.th_root->r.r_active && !__kmp_foreign_tp) {
554 /* The parallel address will NEVER overlap with the data_address */
555 /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the
556 * data_address; use data_address = data */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000557
Jonathan Peyton30419822017-05-12 18:01:32 +0000558 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting private data\n",
559 global_tid));
560 kmp_threadprivate_insert_private_data(global_tid, data, data, size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000561
Jonathan Peyton30419822017-05-12 18:01:32 +0000562 ret = data;
563 } else {
564 KC_TRACE(
565 50,
566 ("__kmpc_threadprivate: T#%d try to find private data at address %p\n",
567 global_tid, data));
568 tn = __kmp_threadprivate_find_task_common(
569 __kmp_threads[global_tid]->th.th_pri_common, global_tid, data);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000570
Jonathan Peyton30419822017-05-12 18:01:32 +0000571 if (tn) {
572 KC_TRACE(20, ("__kmpc_threadprivate: T#%d found data\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000573#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000574 if ((size_t)size > tn->cmn_size) {
575 KC_TRACE(10, ("THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
576 " ,%" KMP_UINTPTR_SPEC ")\n",
577 data, size, tn->cmn_size));
578 KMP_FATAL(TPCommonBlocksInconsist);
579 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000580#endif /* USE_CHECKS_COMMON */
Jonathan Peyton30419822017-05-12 18:01:32 +0000581 } else {
582 /* The parallel address will NEVER overlap with the data_address */
583 /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use
584 * data_address = data */
585 KC_TRACE(20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid));
586 tn = kmp_threadprivate_insert(global_tid, data, data, size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000587 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000588
Jonathan Peyton30419822017-05-12 18:01:32 +0000589 ret = tn->par_addr;
590 }
591 KC_TRACE(10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n",
592 global_tid, ret));
593
594 return ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000595}
596
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000597static kmp_cached_addr_t *__kmp_find_cache(void *data) {
598 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
599 while (ptr && ptr->data != data)
600 ptr = ptr->next;
601 return ptr;
602}
603
Jim Cownie5e8470a2013-09-27 10:38:44 +0000604/*!
605 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000606 @param loc source location information
607 @param global_tid global thread number
608 @param data pointer to data to privatize
609 @param size size of data to privatize
610 @param cache pointer to cache
611 @return pointer to private storage
Jim Cownie5e8470a2013-09-27 10:38:44 +0000612
Jonathan Peyton61118492016-05-20 19:03:38 +0000613 Allocate private storage for threadprivate data.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000614*/
615void *
Jonathan Peyton30419822017-05-12 18:01:32 +0000616__kmpc_threadprivate_cached(ident_t *loc,
617 kmp_int32 global_tid, // gtid.
618 void *data, // Pointer to original global variable.
619 size_t size, // Size of original global variable.
620 void ***cache) {
621 KC_TRACE(10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, "
622 "address: %p, size: %" KMP_SIZE_T_SPEC "\n",
623 global_tid, *cache, data, size));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000624
Jonathan Peyton30419822017-05-12 18:01:32 +0000625 if (TCR_PTR(*cache) == 0) {
626 __kmp_acquire_lock(&__kmp_global_lock, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000627
Jonathan Peyton30419822017-05-12 18:01:32 +0000628 if (TCR_PTR(*cache) == 0) {
629 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000630 // Compiler often passes in NULL cache, even if it's already been created
Jonathan Peyton30419822017-05-12 18:01:32 +0000631 void **my_cache;
Jonathan Peyton30419822017-05-12 18:01:32 +0000632 kmp_cached_addr_t *tp_cache_addr;
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000633 // Look for an existing cache
634 tp_cache_addr = __kmp_find_cache(data);
635 if (!tp_cache_addr) { // Cache was never created; do it now
636 __kmp_tp_cached = 1;
637 KMP_ITT_IGNORE(my_cache = (void **)__kmp_allocate(
638 sizeof(void *) * __kmp_tp_capacity +
639 sizeof(kmp_cached_addr_t)););
640 // No need to zero the allocated memory; __kmp_allocate does that.
641 KC_TRACE(50, ("__kmpc_threadprivate_cached: T#%d allocated cache at "
642 "address %p\n",
643 global_tid, my_cache));
644 /* TODO: free all this memory in __kmp_common_destroy using
645 * __kmp_threadpriv_cache_list */
646 /* Add address of mycache to linked list for cleanup later */
647 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[__kmp_tp_capacity];
648 tp_cache_addr->addr = my_cache;
649 tp_cache_addr->data = data;
650 tp_cache_addr->compiler_cache = cache;
651 tp_cache_addr->next = __kmp_threadpriv_cache_list;
652 __kmp_threadpriv_cache_list = tp_cache_addr;
653 } else { // A cache was already created; use it
654 my_cache = tp_cache_addr->addr;
655 tp_cache_addr->compiler_cache = cache;
656 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000657 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000658
Jonathan Peyton30419822017-05-12 18:01:32 +0000659 TCW_PTR(*cache, my_cache);
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000660 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000661
Jonathan Peyton30419822017-05-12 18:01:32 +0000662 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000663 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000664 __kmp_release_lock(&__kmp_global_lock, global_tid);
665 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000666
Jonathan Peyton30419822017-05-12 18:01:32 +0000667 void *ret;
668 if ((ret = TCR_PTR((*cache)[global_tid])) == 0) {
669 ret = __kmpc_threadprivate(loc, global_tid, data, (size_t)size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000670
Jonathan Peyton30419822017-05-12 18:01:32 +0000671 TCW_PTR((*cache)[global_tid], ret);
672 }
673 KC_TRACE(10,
674 ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
675 global_tid, ret));
Jonathan Peyton30419822017-05-12 18:01:32 +0000676 return ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000677}
678
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000679// This function should only be called when both __kmp_tp_cached_lock and
680// kmp_forkjoin_lock are held.
681void __kmp_threadprivate_resize_cache(int newCapacity) {
682 KC_TRACE(10, ("__kmp_threadprivate_resize_cache: called with size: %d\n",
683 newCapacity));
684
685 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
686
687 while (ptr) {
688 if (ptr->data) { // this location has an active cache; resize it
689 void **my_cache;
690 KMP_ITT_IGNORE(my_cache =
691 (void **)__kmp_allocate(sizeof(void *) * newCapacity +
692 sizeof(kmp_cached_addr_t)););
693 // No need to zero the allocated memory; __kmp_allocate does that.
694 KC_TRACE(50, ("__kmp_threadprivate_resize_cache: allocated cache at %p\n",
695 my_cache));
696 // Now copy old cache into new cache
697 void **old_cache = ptr->addr;
698 for (int i = 0; i < __kmp_tp_capacity; ++i) {
699 my_cache[i] = old_cache[i];
700 }
701
702 // Add address of new my_cache to linked list for cleanup later
703 kmp_cached_addr_t *tp_cache_addr;
704 tp_cache_addr = (kmp_cached_addr_t *)&my_cache[newCapacity];
705 tp_cache_addr->addr = my_cache;
706 tp_cache_addr->data = ptr->data;
707 tp_cache_addr->compiler_cache = ptr->compiler_cache;
708 tp_cache_addr->next = __kmp_threadpriv_cache_list;
709 __kmp_threadpriv_cache_list = tp_cache_addr;
710
711 // Copy new cache to compiler's location: We can copy directly
712 // to (*compiler_cache) if compiler guarantees it will keep
713 // using the same location for the cache. This is not yet true
714 // for some compilers, in which case we have to check if
715 // compiler_cache is still pointing at old cache, and if so, we
716 // can point it at the new cache with an atomic compare&swap
717 // operation. (Old method will always work, but we should shift
718 // to new method (commented line below) when Intel and Clang
719 // compilers use new method.)
720 (void)KMP_COMPARE_AND_STORE_PTR(tp_cache_addr->compiler_cache, old_cache,
721 my_cache);
722 //TCW_PTR(*(tp_cache_addr->compiler_cache), my_cache);
723
724 // If the store doesn't happen here, the compiler's old behavior will
725 // inevitably call __kmpc_threadprivate_cache with a new location for the
726 // cache, and that function will store the resized cache there at that
727 // point.
728
729 // Nullify old cache's data pointer so we skip it next time
730 ptr->data = NULL;
731 }
732 ptr = ptr->next;
733 }
734 // After all caches are resized, update __kmp_tp_capacity to the new size
735 *(volatile int *)&__kmp_tp_capacity = newCapacity;
736}
737
Jim Cownie5e8470a2013-09-27 10:38:44 +0000738/*!
739 @ingroup THREADPRIVATE
Jonathan Peyton61118492016-05-20 19:03:38 +0000740 @param loc source location information
741 @param data pointer to data being privatized
742 @param ctor pointer to constructor function for data
743 @param cctor pointer to copy constructor function for data
744 @param dtor pointer to destructor function for data
Jim Cownie5e8470a2013-09-27 10:38:44 +0000745 @param vector_length length of the vector (bytes or elements?)
746 Register vector constructors and destructors for thread private data.
747*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000748void __kmpc_threadprivate_register_vec(ident_t *loc, void *data,
749 kmpc_ctor_vec ctor, kmpc_cctor_vec cctor,
750 kmpc_dtor_vec dtor,
751 size_t vector_length) {
752 struct shared_common *d_tn, **lnk_tn;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000753
Jonathan Peyton30419822017-05-12 18:01:32 +0000754 KC_TRACE(10, ("__kmpc_threadprivate_register_vec: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000755
756#ifdef USE_CHECKS_COMMON
Jonathan Peyton30419822017-05-12 18:01:32 +0000757 /* copy constructor must be zero for current code gen (Nov 2002 - jph) */
758 KMP_ASSERT(cctor == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000759#endif /* USE_CHECKS_COMMON */
760
Jonathan Peyton30419822017-05-12 18:01:32 +0000761 d_tn = __kmp_find_shared_task_common(
762 &__kmp_threadprivate_d_table, -1,
763 data); /* Only the global data table exists. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000764
Jonathan Peyton30419822017-05-12 18:01:32 +0000765 if (d_tn == 0) {
766 d_tn = (struct shared_common *)__kmp_allocate(sizeof(struct shared_common));
767 d_tn->gbl_addr = data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000768
Jonathan Peyton30419822017-05-12 18:01:32 +0000769 d_tn->ct.ctorv = ctor;
770 d_tn->cct.cctorv = cctor;
771 d_tn->dt.dtorv = dtor;
772 d_tn->is_vec = TRUE;
773 d_tn->vec_len = (size_t)vector_length;
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000774 // d_tn->obj_init = 0; // AC: __kmp_allocate zeroes the memory
775 // d_tn->pod_init = 0;
Jonathan Peyton30419822017-05-12 18:01:32 +0000776 lnk_tn = &(__kmp_threadprivate_d_table.data[KMP_HASH(data)]);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000777
Jonathan Peyton30419822017-05-12 18:01:32 +0000778 d_tn->next = *lnk_tn;
779 *lnk_tn = d_tn;
780 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000781}
Andrey Churbanov9e9333a2018-03-05 18:42:01 +0000782
783void __kmp_cleanup_threadprivate_caches() {
784 kmp_cached_addr_t *ptr = __kmp_threadpriv_cache_list;
785
786 while (ptr) {
787 void **cache = ptr->addr;
788 __kmp_threadpriv_cache_list = ptr->next;
789 if (*ptr->compiler_cache)
790 *ptr->compiler_cache = NULL;
791 ptr->compiler_cache = NULL;
792 ptr->data = NULL;
793 ptr->addr = NULL;
794 ptr->next = NULL;
795 // Threadprivate data pointed at by cache entries are destroyed at end of
796 // __kmp_launch_thread with __kmp_common_destroy_gtid.
797 __kmp_free(cache); // implicitly frees ptr too
798 ptr = __kmp_threadpriv_cache_list;
799 }
800}